• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* niu.c: Neptune ethernet driver.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5 
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ip.h>
20 #include <linux/in.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 
26 #include <linux/io.h>
27 
28 #ifdef CONFIG_SPARC64
29 #include <linux/of_device.h>
30 #endif
31 
32 #include "niu.h"
33 
34 #define DRV_MODULE_NAME		"niu"
35 #define PFX DRV_MODULE_NAME	": "
36 #define DRV_MODULE_VERSION	"1.0"
37 #define DRV_MODULE_RELDATE	"Nov 14, 2008"
38 
39 static char version[] __devinitdata =
40 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41 
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
46 
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK	0x00000fffffffffffULL
49 #endif
50 
51 #ifndef readq
readq(void __iomem * reg)52 static u64 readq(void __iomem *reg)
53 {
54 	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
55 }
56 
writeq(u64 val,void __iomem * reg)57 static void writeq(u64 val, void __iomem *reg)
58 {
59 	writel(val & 0xffffffff, reg);
60 	writel(val >> 32, reg + 0x4UL);
61 }
62 #endif
63 
64 static struct pci_device_id niu_pci_tbl[] = {
65 	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
66 	{}
67 };
68 
69 MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
70 
71 #define NIU_TX_TIMEOUT			(5 * HZ)
72 
73 #define nr64(reg)		readq(np->regs + (reg))
74 #define nw64(reg, val)		writeq((val), np->regs + (reg))
75 
76 #define nr64_mac(reg)		readq(np->mac_regs + (reg))
77 #define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
78 
79 #define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
80 #define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
81 
82 #define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
83 #define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
84 
85 #define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
86 #define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
87 
88 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
89 
90 static int niu_debug;
91 static int debug = -1;
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(debug, "NIU debug level");
94 
95 #define niudbg(TYPE, f, a...) \
96 do {	if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97 		printk(KERN_DEBUG PFX f, ## a); \
98 } while (0)
99 
100 #define niuinfo(TYPE, f, a...) \
101 do {	if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102 		printk(KERN_INFO PFX f, ## a); \
103 } while (0)
104 
105 #define niuwarn(TYPE, f, a...) \
106 do {	if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107 		printk(KERN_WARNING PFX f, ## a); \
108 } while (0)
109 
110 #define niu_lock_parent(np, flags) \
111 	spin_lock_irqsave(&np->parent->lock, flags)
112 #define niu_unlock_parent(np, flags) \
113 	spin_unlock_irqrestore(&np->parent->lock, flags)
114 
115 static int serdes_init_10g_serdes(struct niu *np);
116 
__niu_wait_bits_clear_mac(struct niu * np,unsigned long reg,u64 bits,int limit,int delay)117 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
118 				     u64 bits, int limit, int delay)
119 {
120 	while (--limit >= 0) {
121 		u64 val = nr64_mac(reg);
122 
123 		if (!(val & bits))
124 			break;
125 		udelay(delay);
126 	}
127 	if (limit < 0)
128 		return -ENODEV;
129 	return 0;
130 }
131 
__niu_set_and_wait_clear_mac(struct niu * np,unsigned long reg,u64 bits,int limit,int delay,const char * reg_name)132 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
133 					u64 bits, int limit, int delay,
134 					const char *reg_name)
135 {
136 	int err;
137 
138 	nw64_mac(reg, bits);
139 	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
140 	if (err)
141 		dev_err(np->device, PFX "%s: bits (%llx) of register %s "
142 			"would not clear, val[%llx]\n",
143 			np->dev->name, (unsigned long long) bits, reg_name,
144 			(unsigned long long) nr64_mac(reg));
145 	return err;
146 }
147 
148 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149 ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150 	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
151 })
152 
__niu_wait_bits_clear_ipp(struct niu * np,unsigned long reg,u64 bits,int limit,int delay)153 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
154 				     u64 bits, int limit, int delay)
155 {
156 	while (--limit >= 0) {
157 		u64 val = nr64_ipp(reg);
158 
159 		if (!(val & bits))
160 			break;
161 		udelay(delay);
162 	}
163 	if (limit < 0)
164 		return -ENODEV;
165 	return 0;
166 }
167 
__niu_set_and_wait_clear_ipp(struct niu * np,unsigned long reg,u64 bits,int limit,int delay,const char * reg_name)168 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
169 					u64 bits, int limit, int delay,
170 					const char *reg_name)
171 {
172 	int err;
173 	u64 val;
174 
175 	val = nr64_ipp(reg);
176 	val |= bits;
177 	nw64_ipp(reg, val);
178 
179 	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
180 	if (err)
181 		dev_err(np->device, PFX "%s: bits (%llx) of register %s "
182 			"would not clear, val[%llx]\n",
183 			np->dev->name, (unsigned long long) bits, reg_name,
184 			(unsigned long long) nr64_ipp(reg));
185 	return err;
186 }
187 
188 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189 ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190 	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
191 })
192 
__niu_wait_bits_clear(struct niu * np,unsigned long reg,u64 bits,int limit,int delay)193 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
194 				 u64 bits, int limit, int delay)
195 {
196 	while (--limit >= 0) {
197 		u64 val = nr64(reg);
198 
199 		if (!(val & bits))
200 			break;
201 		udelay(delay);
202 	}
203 	if (limit < 0)
204 		return -ENODEV;
205 	return 0;
206 }
207 
208 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209 ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210 	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
211 })
212 
__niu_set_and_wait_clear(struct niu * np,unsigned long reg,u64 bits,int limit,int delay,const char * reg_name)213 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
214 				    u64 bits, int limit, int delay,
215 				    const char *reg_name)
216 {
217 	int err;
218 
219 	nw64(reg, bits);
220 	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
221 	if (err)
222 		dev_err(np->device, PFX "%s: bits (%llx) of register %s "
223 			"would not clear, val[%llx]\n",
224 			np->dev->name, (unsigned long long) bits, reg_name,
225 			(unsigned long long) nr64(reg));
226 	return err;
227 }
228 
229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230 ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231 	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
232 })
233 
niu_ldg_rearm(struct niu * np,struct niu_ldg * lp,int on)234 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
235 {
236 	u64 val = (u64) lp->timer;
237 
238 	if (on)
239 		val |= LDG_IMGMT_ARM;
240 
241 	nw64(LDG_IMGMT(lp->ldg_num), val);
242 }
243 
niu_ldn_irq_enable(struct niu * np,int ldn,int on)244 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
245 {
246 	unsigned long mask_reg, bits;
247 	u64 val;
248 
249 	if (ldn < 0 || ldn > LDN_MAX)
250 		return -EINVAL;
251 
252 	if (ldn < 64) {
253 		mask_reg = LD_IM0(ldn);
254 		bits = LD_IM0_MASK;
255 	} else {
256 		mask_reg = LD_IM1(ldn - 64);
257 		bits = LD_IM1_MASK;
258 	}
259 
260 	val = nr64(mask_reg);
261 	if (on)
262 		val &= ~bits;
263 	else
264 		val |= bits;
265 	nw64(mask_reg, val);
266 
267 	return 0;
268 }
269 
niu_enable_ldn_in_ldg(struct niu * np,struct niu_ldg * lp,int on)270 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
271 {
272 	struct niu_parent *parent = np->parent;
273 	int i;
274 
275 	for (i = 0; i <= LDN_MAX; i++) {
276 		int err;
277 
278 		if (parent->ldg_map[i] != lp->ldg_num)
279 			continue;
280 
281 		err = niu_ldn_irq_enable(np, i, on);
282 		if (err)
283 			return err;
284 	}
285 	return 0;
286 }
287 
niu_enable_interrupts(struct niu * np,int on)288 static int niu_enable_interrupts(struct niu *np, int on)
289 {
290 	int i;
291 
292 	for (i = 0; i < np->num_ldg; i++) {
293 		struct niu_ldg *lp = &np->ldg[i];
294 		int err;
295 
296 		err = niu_enable_ldn_in_ldg(np, lp, on);
297 		if (err)
298 			return err;
299 	}
300 	for (i = 0; i < np->num_ldg; i++)
301 		niu_ldg_rearm(np, &np->ldg[i], on);
302 
303 	return 0;
304 }
305 
phy_encode(u32 type,int port)306 static u32 phy_encode(u32 type, int port)
307 {
308 	return (type << (port * 2));
309 }
310 
phy_decode(u32 val,int port)311 static u32 phy_decode(u32 val, int port)
312 {
313 	return (val >> (port * 2)) & PORT_TYPE_MASK;
314 }
315 
mdio_wait(struct niu * np)316 static int mdio_wait(struct niu *np)
317 {
318 	int limit = 1000;
319 	u64 val;
320 
321 	while (--limit > 0) {
322 		val = nr64(MIF_FRAME_OUTPUT);
323 		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
324 			return val & MIF_FRAME_OUTPUT_DATA;
325 
326 		udelay(10);
327 	}
328 
329 	return -ENODEV;
330 }
331 
mdio_read(struct niu * np,int port,int dev,int reg)332 static int mdio_read(struct niu *np, int port, int dev, int reg)
333 {
334 	int err;
335 
336 	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
337 	err = mdio_wait(np);
338 	if (err < 0)
339 		return err;
340 
341 	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
342 	return mdio_wait(np);
343 }
344 
mdio_write(struct niu * np,int port,int dev,int reg,int data)345 static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
346 {
347 	int err;
348 
349 	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
350 	err = mdio_wait(np);
351 	if (err < 0)
352 		return err;
353 
354 	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
355 	err = mdio_wait(np);
356 	if (err < 0)
357 		return err;
358 
359 	return 0;
360 }
361 
mii_read(struct niu * np,int port,int reg)362 static int mii_read(struct niu *np, int port, int reg)
363 {
364 	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
365 	return mdio_wait(np);
366 }
367 
mii_write(struct niu * np,int port,int reg,int data)368 static int mii_write(struct niu *np, int port, int reg, int data)
369 {
370 	int err;
371 
372 	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
373 	err = mdio_wait(np);
374 	if (err < 0)
375 		return err;
376 
377 	return 0;
378 }
379 
esr2_set_tx_cfg(struct niu * np,unsigned long channel,u32 val)380 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
381 {
382 	int err;
383 
384 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
385 			 ESR2_TI_PLL_TX_CFG_L(channel),
386 			 val & 0xffff);
387 	if (!err)
388 		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
389 				 ESR2_TI_PLL_TX_CFG_H(channel),
390 				 val >> 16);
391 	return err;
392 }
393 
esr2_set_rx_cfg(struct niu * np,unsigned long channel,u32 val)394 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
395 {
396 	int err;
397 
398 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
399 			 ESR2_TI_PLL_RX_CFG_L(channel),
400 			 val & 0xffff);
401 	if (!err)
402 		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
403 				 ESR2_TI_PLL_RX_CFG_H(channel),
404 				 val >> 16);
405 	return err;
406 }
407 
408 /* Mode is always 10G fiber.  */
serdes_init_niu_10g_fiber(struct niu * np)409 static int serdes_init_niu_10g_fiber(struct niu *np)
410 {
411 	struct niu_link_config *lp = &np->link_config;
412 	u32 tx_cfg, rx_cfg;
413 	unsigned long i;
414 
415 	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
416 	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
417 		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
418 		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
419 
420 	if (lp->loopback_mode == LOOPBACK_PHY) {
421 		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
422 
423 		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
424 			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
425 
426 		tx_cfg |= PLL_TX_CFG_ENTEST;
427 		rx_cfg |= PLL_RX_CFG_ENTEST;
428 	}
429 
430 	/* Initialize all 4 lanes of the SERDES.  */
431 	for (i = 0; i < 4; i++) {
432 		int err = esr2_set_tx_cfg(np, i, tx_cfg);
433 		if (err)
434 			return err;
435 	}
436 
437 	for (i = 0; i < 4; i++) {
438 		int err = esr2_set_rx_cfg(np, i, rx_cfg);
439 		if (err)
440 			return err;
441 	}
442 
443 	return 0;
444 }
445 
serdes_init_niu_1g_serdes(struct niu * np)446 static int serdes_init_niu_1g_serdes(struct niu *np)
447 {
448 	struct niu_link_config *lp = &np->link_config;
449 	u16 pll_cfg, pll_sts;
450 	int max_retry = 100;
451 	u64 uninitialized_var(sig), mask, val;
452 	u32 tx_cfg, rx_cfg;
453 	unsigned long i;
454 	int err;
455 
456 	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
457 		  PLL_TX_CFG_RATE_HALF);
458 	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
459 		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
460 		  PLL_RX_CFG_RATE_HALF);
461 
462 	if (np->port == 0)
463 		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
464 
465 	if (lp->loopback_mode == LOOPBACK_PHY) {
466 		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
467 
468 		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469 			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
470 
471 		tx_cfg |= PLL_TX_CFG_ENTEST;
472 		rx_cfg |= PLL_RX_CFG_ENTEST;
473 	}
474 
475 	/* Initialize PLL for 1G */
476 	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
477 
478 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
479 			 ESR2_TI_PLL_CFG_L, pll_cfg);
480 	if (err) {
481 		dev_err(np->device, PFX "NIU Port %d "
482 			"serdes_init_niu_1g_serdes: "
483 			"mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
484 		return err;
485 	}
486 
487 	pll_sts = PLL_CFG_ENPLL;
488 
489 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
490 			 ESR2_TI_PLL_STS_L, pll_sts);
491 	if (err) {
492 		dev_err(np->device, PFX "NIU Port %d "
493 			"serdes_init_niu_1g_serdes: "
494 			"mdio write to ESR2_TI_PLL_STS_L failed", np->port);
495 		return err;
496 	}
497 
498 	udelay(200);
499 
500 	/* Initialize all 4 lanes of the SERDES.  */
501 	for (i = 0; i < 4; i++) {
502 		err = esr2_set_tx_cfg(np, i, tx_cfg);
503 		if (err)
504 			return err;
505 	}
506 
507 	for (i = 0; i < 4; i++) {
508 		err = esr2_set_rx_cfg(np, i, rx_cfg);
509 		if (err)
510 			return err;
511 	}
512 
513 	switch (np->port) {
514 	case 0:
515 		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
516 		mask = val;
517 		break;
518 
519 	case 1:
520 		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
521 		mask = val;
522 		break;
523 
524 	default:
525 		return -EINVAL;
526 	}
527 
528 	while (max_retry--) {
529 		sig = nr64(ESR_INT_SIGNALS);
530 		if ((sig & mask) == val)
531 			break;
532 
533 		mdelay(500);
534 	}
535 
536 	if ((sig & mask) != val) {
537 		dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
538 			"[%08x]\n", np->port, (int) (sig & mask), (int) val);
539 		return -ENODEV;
540 	}
541 
542 	return 0;
543 }
544 
serdes_init_niu_10g_serdes(struct niu * np)545 static int serdes_init_niu_10g_serdes(struct niu *np)
546 {
547 	struct niu_link_config *lp = &np->link_config;
548 	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
549 	int max_retry = 100;
550 	u64 uninitialized_var(sig), mask, val;
551 	unsigned long i;
552 	int err;
553 
554 	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
555 	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
556 		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
557 		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
558 
559 	if (lp->loopback_mode == LOOPBACK_PHY) {
560 		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
561 
562 		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
563 			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
564 
565 		tx_cfg |= PLL_TX_CFG_ENTEST;
566 		rx_cfg |= PLL_RX_CFG_ENTEST;
567 	}
568 
569 	/* Initialize PLL for 10G */
570 	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
571 
572 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
573 			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
574 	if (err) {
575 		dev_err(np->device, PFX "NIU Port %d "
576 			"serdes_init_niu_10g_serdes: "
577 			"mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
578 		return err;
579 	}
580 
581 	pll_sts = PLL_CFG_ENPLL;
582 
583 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
584 			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
585 	if (err) {
586 		dev_err(np->device, PFX "NIU Port %d "
587 			"serdes_init_niu_10g_serdes: "
588 			"mdio write to ESR2_TI_PLL_STS_L failed", np->port);
589 		return err;
590 	}
591 
592 	udelay(200);
593 
594 	/* Initialize all 4 lanes of the SERDES.  */
595 	for (i = 0; i < 4; i++) {
596 		err = esr2_set_tx_cfg(np, i, tx_cfg);
597 		if (err)
598 			return err;
599 	}
600 
601 	for (i = 0; i < 4; i++) {
602 		err = esr2_set_rx_cfg(np, i, rx_cfg);
603 		if (err)
604 			return err;
605 	}
606 
607 	/* check if serdes is ready */
608 
609 	switch (np->port) {
610 	case 0:
611 		mask = ESR_INT_SIGNALS_P0_BITS;
612 		val = (ESR_INT_SRDY0_P0 |
613 		       ESR_INT_DET0_P0 |
614 		       ESR_INT_XSRDY_P0 |
615 		       ESR_INT_XDP_P0_CH3 |
616 		       ESR_INT_XDP_P0_CH2 |
617 		       ESR_INT_XDP_P0_CH1 |
618 		       ESR_INT_XDP_P0_CH0);
619 		break;
620 
621 	case 1:
622 		mask = ESR_INT_SIGNALS_P1_BITS;
623 		val = (ESR_INT_SRDY0_P1 |
624 		       ESR_INT_DET0_P1 |
625 		       ESR_INT_XSRDY_P1 |
626 		       ESR_INT_XDP_P1_CH3 |
627 		       ESR_INT_XDP_P1_CH2 |
628 		       ESR_INT_XDP_P1_CH1 |
629 		       ESR_INT_XDP_P1_CH0);
630 		break;
631 
632 	default:
633 		return -EINVAL;
634 	}
635 
636 	while (max_retry--) {
637 		sig = nr64(ESR_INT_SIGNALS);
638 		if ((sig & mask) == val)
639 			break;
640 
641 		mdelay(500);
642 	}
643 
644 	if ((sig & mask) != val) {
645 		pr_info(PFX "NIU Port %u signal bits [%08x] are not "
646 			"[%08x] for 10G...trying 1G\n",
647 			np->port, (int) (sig & mask), (int) val);
648 
649 		/* 10G failed, try initializing at 1G */
650 		err = serdes_init_niu_1g_serdes(np);
651 		if (!err) {
652 			np->flags &= ~NIU_FLAGS_10G;
653 			np->mac_xcvr = MAC_XCVR_PCS;
654 		}  else {
655 			dev_err(np->device, PFX "Port %u 10G/1G SERDES "
656 				"Link Failed \n", np->port);
657 			return -ENODEV;
658 		}
659 	}
660 	return 0;
661 }
662 
esr_read_rxtx_ctrl(struct niu * np,unsigned long chan,u32 * val)663 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
664 {
665 	int err;
666 
667 	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
668 	if (err >= 0) {
669 		*val = (err & 0xffff);
670 		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
671 				ESR_RXTX_CTRL_H(chan));
672 		if (err >= 0)
673 			*val |= ((err & 0xffff) << 16);
674 		err = 0;
675 	}
676 	return err;
677 }
678 
esr_read_glue0(struct niu * np,unsigned long chan,u32 * val)679 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
680 {
681 	int err;
682 
683 	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
684 			ESR_GLUE_CTRL0_L(chan));
685 	if (err >= 0) {
686 		*val = (err & 0xffff);
687 		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
688 				ESR_GLUE_CTRL0_H(chan));
689 		if (err >= 0) {
690 			*val |= ((err & 0xffff) << 16);
691 			err = 0;
692 		}
693 	}
694 	return err;
695 }
696 
esr_read_reset(struct niu * np,u32 * val)697 static int esr_read_reset(struct niu *np, u32 *val)
698 {
699 	int err;
700 
701 	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
702 			ESR_RXTX_RESET_CTRL_L);
703 	if (err >= 0) {
704 		*val = (err & 0xffff);
705 		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
706 				ESR_RXTX_RESET_CTRL_H);
707 		if (err >= 0) {
708 			*val |= ((err & 0xffff) << 16);
709 			err = 0;
710 		}
711 	}
712 	return err;
713 }
714 
esr_write_rxtx_ctrl(struct niu * np,unsigned long chan,u32 val)715 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
716 {
717 	int err;
718 
719 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
720 			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
721 	if (!err)
722 		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
723 				 ESR_RXTX_CTRL_H(chan), (val >> 16));
724 	return err;
725 }
726 
esr_write_glue0(struct niu * np,unsigned long chan,u32 val)727 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
728 {
729 	int err;
730 
731 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
732 			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
733 	if (!err)
734 		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
735 				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
736 	return err;
737 }
738 
esr_reset(struct niu * np)739 static int esr_reset(struct niu *np)
740 {
741 	u32 uninitialized_var(reset);
742 	int err;
743 
744 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
745 			 ESR_RXTX_RESET_CTRL_L, 0x0000);
746 	if (err)
747 		return err;
748 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
749 			 ESR_RXTX_RESET_CTRL_H, 0xffff);
750 	if (err)
751 		return err;
752 	udelay(200);
753 
754 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
755 			 ESR_RXTX_RESET_CTRL_L, 0xffff);
756 	if (err)
757 		return err;
758 	udelay(200);
759 
760 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
761 			 ESR_RXTX_RESET_CTRL_H, 0x0000);
762 	if (err)
763 		return err;
764 	udelay(200);
765 
766 	err = esr_read_reset(np, &reset);
767 	if (err)
768 		return err;
769 	if (reset != 0) {
770 		dev_err(np->device, PFX "Port %u ESR_RESET "
771 			"did not clear [%08x]\n",
772 			np->port, reset);
773 		return -ENODEV;
774 	}
775 
776 	return 0;
777 }
778 
serdes_init_10g(struct niu * np)779 static int serdes_init_10g(struct niu *np)
780 {
781 	struct niu_link_config *lp = &np->link_config;
782 	unsigned long ctrl_reg, test_cfg_reg, i;
783 	u64 ctrl_val, test_cfg_val, sig, mask, val;
784 	int err;
785 
786 	switch (np->port) {
787 	case 0:
788 		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
789 		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
790 		break;
791 	case 1:
792 		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
793 		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
794 		break;
795 
796 	default:
797 		return -EINVAL;
798 	}
799 	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
800 		    ENET_SERDES_CTRL_SDET_1 |
801 		    ENET_SERDES_CTRL_SDET_2 |
802 		    ENET_SERDES_CTRL_SDET_3 |
803 		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
804 		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
805 		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
806 		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
807 		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
808 		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
809 		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
810 		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
811 	test_cfg_val = 0;
812 
813 	if (lp->loopback_mode == LOOPBACK_PHY) {
814 		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
815 				  ENET_SERDES_TEST_MD_0_SHIFT) |
816 				 (ENET_TEST_MD_PAD_LOOPBACK <<
817 				  ENET_SERDES_TEST_MD_1_SHIFT) |
818 				 (ENET_TEST_MD_PAD_LOOPBACK <<
819 				  ENET_SERDES_TEST_MD_2_SHIFT) |
820 				 (ENET_TEST_MD_PAD_LOOPBACK <<
821 				  ENET_SERDES_TEST_MD_3_SHIFT));
822 	}
823 
824 	nw64(ctrl_reg, ctrl_val);
825 	nw64(test_cfg_reg, test_cfg_val);
826 
827 	/* Initialize all 4 lanes of the SERDES.  */
828 	for (i = 0; i < 4; i++) {
829 		u32 rxtx_ctrl, glue0;
830 
831 		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
832 		if (err)
833 			return err;
834 		err = esr_read_glue0(np, i, &glue0);
835 		if (err)
836 			return err;
837 
838 		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
839 		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
840 			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
841 
842 		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
843 			   ESR_GLUE_CTRL0_THCNT |
844 			   ESR_GLUE_CTRL0_BLTIME);
845 		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
846 			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
847 			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
848 			  (BLTIME_300_CYCLES <<
849 			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
850 
851 		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
852 		if (err)
853 			return err;
854 		err = esr_write_glue0(np, i, glue0);
855 		if (err)
856 			return err;
857 	}
858 
859 	err = esr_reset(np);
860 	if (err)
861 		return err;
862 
863 	sig = nr64(ESR_INT_SIGNALS);
864 	switch (np->port) {
865 	case 0:
866 		mask = ESR_INT_SIGNALS_P0_BITS;
867 		val = (ESR_INT_SRDY0_P0 |
868 		       ESR_INT_DET0_P0 |
869 		       ESR_INT_XSRDY_P0 |
870 		       ESR_INT_XDP_P0_CH3 |
871 		       ESR_INT_XDP_P0_CH2 |
872 		       ESR_INT_XDP_P0_CH1 |
873 		       ESR_INT_XDP_P0_CH0);
874 		break;
875 
876 	case 1:
877 		mask = ESR_INT_SIGNALS_P1_BITS;
878 		val = (ESR_INT_SRDY0_P1 |
879 		       ESR_INT_DET0_P1 |
880 		       ESR_INT_XSRDY_P1 |
881 		       ESR_INT_XDP_P1_CH3 |
882 		       ESR_INT_XDP_P1_CH2 |
883 		       ESR_INT_XDP_P1_CH1 |
884 		       ESR_INT_XDP_P1_CH0);
885 		break;
886 
887 	default:
888 		return -EINVAL;
889 	}
890 
891 	if ((sig & mask) != val) {
892 		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
893 			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
894 			return 0;
895 		}
896 		dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
897 			"[%08x]\n", np->port, (int) (sig & mask), (int) val);
898 		return -ENODEV;
899 	}
900 	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
901 		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
902 	return 0;
903 }
904 
serdes_init_1g(struct niu * np)905 static int serdes_init_1g(struct niu *np)
906 {
907 	u64 val;
908 
909 	val = nr64(ENET_SERDES_1_PLL_CFG);
910 	val &= ~ENET_SERDES_PLL_FBDIV2;
911 	switch (np->port) {
912 	case 0:
913 		val |= ENET_SERDES_PLL_HRATE0;
914 		break;
915 	case 1:
916 		val |= ENET_SERDES_PLL_HRATE1;
917 		break;
918 	case 2:
919 		val |= ENET_SERDES_PLL_HRATE2;
920 		break;
921 	case 3:
922 		val |= ENET_SERDES_PLL_HRATE3;
923 		break;
924 	default:
925 		return -EINVAL;
926 	}
927 	nw64(ENET_SERDES_1_PLL_CFG, val);
928 
929 	return 0;
930 }
931 
serdes_init_1g_serdes(struct niu * np)932 static int serdes_init_1g_serdes(struct niu *np)
933 {
934 	struct niu_link_config *lp = &np->link_config;
935 	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
936 	u64 ctrl_val, test_cfg_val, sig, mask, val;
937 	int err;
938 	u64 reset_val, val_rd;
939 
940 	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
941 		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
942 		ENET_SERDES_PLL_FBDIV0;
943 	switch (np->port) {
944 	case 0:
945 		reset_val =  ENET_SERDES_RESET_0;
946 		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
947 		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
948 		pll_cfg = ENET_SERDES_0_PLL_CFG;
949 		break;
950 	case 1:
951 		reset_val =  ENET_SERDES_RESET_1;
952 		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
953 		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
954 		pll_cfg = ENET_SERDES_1_PLL_CFG;
955 		break;
956 
957 	default:
958 		return -EINVAL;
959 	}
960 	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
961 		    ENET_SERDES_CTRL_SDET_1 |
962 		    ENET_SERDES_CTRL_SDET_2 |
963 		    ENET_SERDES_CTRL_SDET_3 |
964 		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
965 		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
966 		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
967 		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
968 		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
969 		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
970 		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
971 		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
972 	test_cfg_val = 0;
973 
974 	if (lp->loopback_mode == LOOPBACK_PHY) {
975 		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
976 				  ENET_SERDES_TEST_MD_0_SHIFT) |
977 				 (ENET_TEST_MD_PAD_LOOPBACK <<
978 				  ENET_SERDES_TEST_MD_1_SHIFT) |
979 				 (ENET_TEST_MD_PAD_LOOPBACK <<
980 				  ENET_SERDES_TEST_MD_2_SHIFT) |
981 				 (ENET_TEST_MD_PAD_LOOPBACK <<
982 				  ENET_SERDES_TEST_MD_3_SHIFT));
983 	}
984 
985 	nw64(ENET_SERDES_RESET, reset_val);
986 	mdelay(20);
987 	val_rd = nr64(ENET_SERDES_RESET);
988 	val_rd &= ~reset_val;
989 	nw64(pll_cfg, val);
990 	nw64(ctrl_reg, ctrl_val);
991 	nw64(test_cfg_reg, test_cfg_val);
992 	nw64(ENET_SERDES_RESET, val_rd);
993 	mdelay(2000);
994 
995 	/* Initialize all 4 lanes of the SERDES.  */
996 	for (i = 0; i < 4; i++) {
997 		u32 rxtx_ctrl, glue0;
998 
999 		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
1000 		if (err)
1001 			return err;
1002 		err = esr_read_glue0(np, i, &glue0);
1003 		if (err)
1004 			return err;
1005 
1006 		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
1007 		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
1008 			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
1009 
1010 		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
1011 			   ESR_GLUE_CTRL0_THCNT |
1012 			   ESR_GLUE_CTRL0_BLTIME);
1013 		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
1014 			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
1015 			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
1016 			  (BLTIME_300_CYCLES <<
1017 			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
1018 
1019 		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
1020 		if (err)
1021 			return err;
1022 		err = esr_write_glue0(np, i, glue0);
1023 		if (err)
1024 			return err;
1025 	}
1026 
1027 
1028 	sig = nr64(ESR_INT_SIGNALS);
1029 	switch (np->port) {
1030 	case 0:
1031 		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1032 		mask = val;
1033 		break;
1034 
1035 	case 1:
1036 		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1037 		mask = val;
1038 		break;
1039 
1040 	default:
1041 		return -EINVAL;
1042 	}
1043 
1044 	if ((sig & mask) != val) {
1045 		dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
1046 			"[%08x]\n", np->port, (int) (sig & mask), (int) val);
1047 		return -ENODEV;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
link_status_1g_serdes(struct niu * np,int * link_up_p)1053 static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1054 {
1055 	struct niu_link_config *lp = &np->link_config;
1056 	int link_up;
1057 	u64 val;
1058 	u16 current_speed;
1059 	unsigned long flags;
1060 	u8 current_duplex;
1061 
1062 	link_up = 0;
1063 	current_speed = SPEED_INVALID;
1064 	current_duplex = DUPLEX_INVALID;
1065 
1066 	spin_lock_irqsave(&np->lock, flags);
1067 
1068 	val = nr64_pcs(PCS_MII_STAT);
1069 
1070 	if (val & PCS_MII_STAT_LINK_STATUS) {
1071 		link_up = 1;
1072 		current_speed = SPEED_1000;
1073 		current_duplex = DUPLEX_FULL;
1074 	}
1075 
1076 	lp->active_speed = current_speed;
1077 	lp->active_duplex = current_duplex;
1078 	spin_unlock_irqrestore(&np->lock, flags);
1079 
1080 	*link_up_p = link_up;
1081 	return 0;
1082 }
1083 
link_status_10g_serdes(struct niu * np,int * link_up_p)1084 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1085 {
1086 	unsigned long flags;
1087 	struct niu_link_config *lp = &np->link_config;
1088 	int link_up = 0;
1089 	int link_ok = 1;
1090 	u64 val, val2;
1091 	u16 current_speed;
1092 	u8 current_duplex;
1093 
1094 	if (!(np->flags & NIU_FLAGS_10G))
1095 		return link_status_1g_serdes(np, link_up_p);
1096 
1097 	current_speed = SPEED_INVALID;
1098 	current_duplex = DUPLEX_INVALID;
1099 	spin_lock_irqsave(&np->lock, flags);
1100 
1101 	val = nr64_xpcs(XPCS_STATUS(0));
1102 	val2 = nr64_mac(XMAC_INTER2);
1103 	if (val2 & 0x01000000)
1104 		link_ok = 0;
1105 
1106 	if ((val & 0x1000ULL) && link_ok) {
1107 		link_up = 1;
1108 		current_speed = SPEED_10000;
1109 		current_duplex = DUPLEX_FULL;
1110 	}
1111 	lp->active_speed = current_speed;
1112 	lp->active_duplex = current_duplex;
1113 	spin_unlock_irqrestore(&np->lock, flags);
1114 	*link_up_p = link_up;
1115 	return 0;
1116 }
1117 
link_status_1g_rgmii(struct niu * np,int * link_up_p)1118 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1119 {
1120 	struct niu_link_config *lp = &np->link_config;
1121 	u16 current_speed, bmsr;
1122 	unsigned long flags;
1123 	u8 current_duplex;
1124 	int err, link_up;
1125 
1126 	link_up = 0;
1127 	current_speed = SPEED_INVALID;
1128 	current_duplex = DUPLEX_INVALID;
1129 
1130 	spin_lock_irqsave(&np->lock, flags);
1131 
1132 	err = -EINVAL;
1133 
1134 	err = mii_read(np, np->phy_addr, MII_BMSR);
1135 	if (err < 0)
1136 		goto out;
1137 
1138 	bmsr = err;
1139 	if (bmsr & BMSR_LSTATUS) {
1140 		u16 adv, lpa, common, estat;
1141 
1142 		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1143 		if (err < 0)
1144 			goto out;
1145 		adv = err;
1146 
1147 		err = mii_read(np, np->phy_addr, MII_LPA);
1148 		if (err < 0)
1149 			goto out;
1150 		lpa = err;
1151 
1152 		common = adv & lpa;
1153 
1154 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1155 		if (err < 0)
1156 			goto out;
1157 		estat = err;
1158 		link_up = 1;
1159 		current_speed = SPEED_1000;
1160 		current_duplex = DUPLEX_FULL;
1161 
1162 	}
1163 	lp->active_speed = current_speed;
1164 	lp->active_duplex = current_duplex;
1165 	err = 0;
1166 
1167 out:
1168 	spin_unlock_irqrestore(&np->lock, flags);
1169 
1170 	*link_up_p = link_up;
1171 	return err;
1172 }
1173 
bcm8704_reset(struct niu * np)1174 static int bcm8704_reset(struct niu *np)
1175 {
1176 	int err, limit;
1177 
1178 	err = mdio_read(np, np->phy_addr,
1179 			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1180 	if (err < 0)
1181 		return err;
1182 	err |= BMCR_RESET;
1183 	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1184 			 MII_BMCR, err);
1185 	if (err)
1186 		return err;
1187 
1188 	limit = 1000;
1189 	while (--limit >= 0) {
1190 		err = mdio_read(np, np->phy_addr,
1191 				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1192 		if (err < 0)
1193 			return err;
1194 		if (!(err & BMCR_RESET))
1195 			break;
1196 	}
1197 	if (limit < 0) {
1198 		dev_err(np->device, PFX "Port %u PHY will not reset "
1199 			"(bmcr=%04x)\n", np->port, (err & 0xffff));
1200 		return -ENODEV;
1201 	}
1202 	return 0;
1203 }
1204 
1205 /* When written, certain PHY registers need to be read back twice
1206  * in order for the bits to settle properly.
1207  */
bcm8704_user_dev3_readback(struct niu * np,int reg)1208 static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1209 {
1210 	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1211 	if (err < 0)
1212 		return err;
1213 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1214 	if (err < 0)
1215 		return err;
1216 	return 0;
1217 }
1218 
bcm8706_init_user_dev3(struct niu * np)1219 static int bcm8706_init_user_dev3(struct niu *np)
1220 {
1221 	int err;
1222 
1223 
1224 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1225 			BCM8704_USER_OPT_DIGITAL_CTRL);
1226 	if (err < 0)
1227 		return err;
1228 	err &= ~USER_ODIG_CTRL_GPIOS;
1229 	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1230 	err |=  USER_ODIG_CTRL_RESV2;
1231 	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1232 			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1233 	if (err)
1234 		return err;
1235 
1236 	mdelay(1000);
1237 
1238 	return 0;
1239 }
1240 
bcm8704_init_user_dev3(struct niu * np)1241 static int bcm8704_init_user_dev3(struct niu *np)
1242 {
1243 	int err;
1244 
1245 	err = mdio_write(np, np->phy_addr,
1246 			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1247 			 (USER_CONTROL_OPTXRST_LVL |
1248 			  USER_CONTROL_OPBIASFLT_LVL |
1249 			  USER_CONTROL_OBTMPFLT_LVL |
1250 			  USER_CONTROL_OPPRFLT_LVL |
1251 			  USER_CONTROL_OPTXFLT_LVL |
1252 			  USER_CONTROL_OPRXLOS_LVL |
1253 			  USER_CONTROL_OPRXFLT_LVL |
1254 			  USER_CONTROL_OPTXON_LVL |
1255 			  (0x3f << USER_CONTROL_RES1_SHIFT)));
1256 	if (err)
1257 		return err;
1258 
1259 	err = mdio_write(np, np->phy_addr,
1260 			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1261 			 (USER_PMD_TX_CTL_XFP_CLKEN |
1262 			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1263 			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1264 			  USER_PMD_TX_CTL_TSCK_LPWREN));
1265 	if (err)
1266 		return err;
1267 
1268 	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1269 	if (err)
1270 		return err;
1271 	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1272 	if (err)
1273 		return err;
1274 
1275 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1276 			BCM8704_USER_OPT_DIGITAL_CTRL);
1277 	if (err < 0)
1278 		return err;
1279 	err &= ~USER_ODIG_CTRL_GPIOS;
1280 	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1281 	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1282 			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1283 	if (err)
1284 		return err;
1285 
1286 	mdelay(1000);
1287 
1288 	return 0;
1289 }
1290 
mrvl88x2011_act_led(struct niu * np,int val)1291 static int mrvl88x2011_act_led(struct niu *np, int val)
1292 {
1293 	int	err;
1294 
1295 	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1296 		MRVL88X2011_LED_8_TO_11_CTL);
1297 	if (err < 0)
1298 		return err;
1299 
1300 	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1301 	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1302 
1303 	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1304 			  MRVL88X2011_LED_8_TO_11_CTL, err);
1305 }
1306 
mrvl88x2011_led_blink_rate(struct niu * np,int rate)1307 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1308 {
1309 	int	err;
1310 
1311 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1312 			MRVL88X2011_LED_BLINK_CTL);
1313 	if (err >= 0) {
1314 		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1315 		err |= (rate << 4);
1316 
1317 		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1318 				 MRVL88X2011_LED_BLINK_CTL, err);
1319 	}
1320 
1321 	return err;
1322 }
1323 
xcvr_init_10g_mrvl88x2011(struct niu * np)1324 static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1325 {
1326 	int	err;
1327 
1328 	/* Set LED functions */
1329 	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1330 	if (err)
1331 		return err;
1332 
1333 	/* led activity */
1334 	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1335 	if (err)
1336 		return err;
1337 
1338 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1339 			MRVL88X2011_GENERAL_CTL);
1340 	if (err < 0)
1341 		return err;
1342 
1343 	err |= MRVL88X2011_ENA_XFPREFCLK;
1344 
1345 	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1346 			 MRVL88X2011_GENERAL_CTL, err);
1347 	if (err < 0)
1348 		return err;
1349 
1350 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1351 			MRVL88X2011_PMA_PMD_CTL_1);
1352 	if (err < 0)
1353 		return err;
1354 
1355 	if (np->link_config.loopback_mode == LOOPBACK_MAC)
1356 		err |= MRVL88X2011_LOOPBACK;
1357 	else
1358 		err &= ~MRVL88X2011_LOOPBACK;
1359 
1360 	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1361 			 MRVL88X2011_PMA_PMD_CTL_1, err);
1362 	if (err < 0)
1363 		return err;
1364 
1365 	/* Enable PMD  */
1366 	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1367 			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1368 }
1369 
1370 
xcvr_diag_bcm870x(struct niu * np)1371 static int xcvr_diag_bcm870x(struct niu *np)
1372 {
1373 	u16 analog_stat0, tx_alarm_status;
1374 	int err = 0;
1375 
1376 #if 1
1377 	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1378 			MII_STAT1000);
1379 	if (err < 0)
1380 		return err;
1381 	pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1382 		np->port, err);
1383 
1384 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1385 	if (err < 0)
1386 		return err;
1387 	pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
1388 		np->port, err);
1389 
1390 	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1391 			MII_NWAYTEST);
1392 	if (err < 0)
1393 		return err;
1394 	pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1395 		np->port, err);
1396 #endif
1397 
1398 	/* XXX dig this out it might not be so useful XXX */
1399 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1400 			BCM8704_USER_ANALOG_STATUS0);
1401 	if (err < 0)
1402 		return err;
1403 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1404 			BCM8704_USER_ANALOG_STATUS0);
1405 	if (err < 0)
1406 		return err;
1407 	analog_stat0 = err;
1408 
1409 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1410 			BCM8704_USER_TX_ALARM_STATUS);
1411 	if (err < 0)
1412 		return err;
1413 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1414 			BCM8704_USER_TX_ALARM_STATUS);
1415 	if (err < 0)
1416 		return err;
1417 	tx_alarm_status = err;
1418 
1419 	if (analog_stat0 != 0x03fc) {
1420 		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1421 			pr_info(PFX "Port %u cable not connected "
1422 				"or bad cable.\n", np->port);
1423 		} else if (analog_stat0 == 0x639c) {
1424 			pr_info(PFX "Port %u optical module is bad "
1425 				"or missing.\n", np->port);
1426 		}
1427 	}
1428 
1429 	return 0;
1430 }
1431 
xcvr_10g_set_lb_bcm870x(struct niu * np)1432 static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1433 {
1434 	struct niu_link_config *lp = &np->link_config;
1435 	int err;
1436 
1437 	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1438 			MII_BMCR);
1439 	if (err < 0)
1440 		return err;
1441 
1442 	err &= ~BMCR_LOOPBACK;
1443 
1444 	if (lp->loopback_mode == LOOPBACK_MAC)
1445 		err |= BMCR_LOOPBACK;
1446 
1447 	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1448 			 MII_BMCR, err);
1449 	if (err)
1450 		return err;
1451 
1452 	return 0;
1453 }
1454 
xcvr_init_10g_bcm8706(struct niu * np)1455 static int xcvr_init_10g_bcm8706(struct niu *np)
1456 {
1457 	int err = 0;
1458 	u64 val;
1459 
1460 	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1461 	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1462 			return err;
1463 
1464 	val = nr64_mac(XMAC_CONFIG);
1465 	val &= ~XMAC_CONFIG_LED_POLARITY;
1466 	val |= XMAC_CONFIG_FORCE_LED_ON;
1467 	nw64_mac(XMAC_CONFIG, val);
1468 
1469 	val = nr64(MIF_CONFIG);
1470 	val |= MIF_CONFIG_INDIRECT_MODE;
1471 	nw64(MIF_CONFIG, val);
1472 
1473 	err = bcm8704_reset(np);
1474 	if (err)
1475 		return err;
1476 
1477 	err = xcvr_10g_set_lb_bcm870x(np);
1478 	if (err)
1479 		return err;
1480 
1481 	err = bcm8706_init_user_dev3(np);
1482 	if (err)
1483 		return err;
1484 
1485 	err = xcvr_diag_bcm870x(np);
1486 	if (err)
1487 		return err;
1488 
1489 	return 0;
1490 }
1491 
xcvr_init_10g_bcm8704(struct niu * np)1492 static int xcvr_init_10g_bcm8704(struct niu *np)
1493 {
1494 	int err;
1495 
1496 	err = bcm8704_reset(np);
1497 	if (err)
1498 		return err;
1499 
1500 	err = bcm8704_init_user_dev3(np);
1501 	if (err)
1502 		return err;
1503 
1504 	err = xcvr_10g_set_lb_bcm870x(np);
1505 	if (err)
1506 		return err;
1507 
1508 	err =  xcvr_diag_bcm870x(np);
1509 	if (err)
1510 		return err;
1511 
1512 	return 0;
1513 }
1514 
xcvr_init_10g(struct niu * np)1515 static int xcvr_init_10g(struct niu *np)
1516 {
1517 	int phy_id, err;
1518 	u64 val;
1519 
1520 	val = nr64_mac(XMAC_CONFIG);
1521 	val &= ~XMAC_CONFIG_LED_POLARITY;
1522 	val |= XMAC_CONFIG_FORCE_LED_ON;
1523 	nw64_mac(XMAC_CONFIG, val);
1524 
1525 	/* XXX shared resource, lock parent XXX */
1526 	val = nr64(MIF_CONFIG);
1527 	val |= MIF_CONFIG_INDIRECT_MODE;
1528 	nw64(MIF_CONFIG, val);
1529 
1530 	phy_id = phy_decode(np->parent->port_phy, np->port);
1531 	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1532 
1533 	/* handle different phy types */
1534 	switch (phy_id & NIU_PHY_ID_MASK) {
1535 	case NIU_PHY_ID_MRVL88X2011:
1536 		err = xcvr_init_10g_mrvl88x2011(np);
1537 		break;
1538 
1539 	default: /* bcom 8704 */
1540 		err = xcvr_init_10g_bcm8704(np);
1541 		break;
1542 	}
1543 
1544 	return 0;
1545 }
1546 
mii_reset(struct niu * np)1547 static int mii_reset(struct niu *np)
1548 {
1549 	int limit, err;
1550 
1551 	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1552 	if (err)
1553 		return err;
1554 
1555 	limit = 1000;
1556 	while (--limit >= 0) {
1557 		udelay(500);
1558 		err = mii_read(np, np->phy_addr, MII_BMCR);
1559 		if (err < 0)
1560 			return err;
1561 		if (!(err & BMCR_RESET))
1562 			break;
1563 	}
1564 	if (limit < 0) {
1565 		dev_err(np->device, PFX "Port %u MII would not reset, "
1566 			"bmcr[%04x]\n", np->port, err);
1567 		return -ENODEV;
1568 	}
1569 
1570 	return 0;
1571 }
1572 
xcvr_init_1g_rgmii(struct niu * np)1573 static int xcvr_init_1g_rgmii(struct niu *np)
1574 {
1575 	int err;
1576 	u64 val;
1577 	u16 bmcr, bmsr, estat;
1578 
1579 	val = nr64(MIF_CONFIG);
1580 	val &= ~MIF_CONFIG_INDIRECT_MODE;
1581 	nw64(MIF_CONFIG, val);
1582 
1583 	err = mii_reset(np);
1584 	if (err)
1585 		return err;
1586 
1587 	err = mii_read(np, np->phy_addr, MII_BMSR);
1588 	if (err < 0)
1589 		return err;
1590 	bmsr = err;
1591 
1592 	estat = 0;
1593 	if (bmsr & BMSR_ESTATEN) {
1594 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1595 		if (err < 0)
1596 			return err;
1597 		estat = err;
1598 	}
1599 
1600 	bmcr = 0;
1601 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1602 	if (err)
1603 		return err;
1604 
1605 	if (bmsr & BMSR_ESTATEN) {
1606 		u16 ctrl1000 = 0;
1607 
1608 		if (estat & ESTATUS_1000_TFULL)
1609 			ctrl1000 |= ADVERTISE_1000FULL;
1610 		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1611 		if (err)
1612 			return err;
1613 	}
1614 
1615 	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1616 
1617 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1618 	if (err)
1619 		return err;
1620 
1621 	err = mii_read(np, np->phy_addr, MII_BMCR);
1622 	if (err < 0)
1623 		return err;
1624 	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1625 
1626 	err = mii_read(np, np->phy_addr, MII_BMSR);
1627 	if (err < 0)
1628 		return err;
1629 
1630 	return 0;
1631 }
1632 
mii_init_common(struct niu * np)1633 static int mii_init_common(struct niu *np)
1634 {
1635 	struct niu_link_config *lp = &np->link_config;
1636 	u16 bmcr, bmsr, adv, estat;
1637 	int err;
1638 
1639 	err = mii_reset(np);
1640 	if (err)
1641 		return err;
1642 
1643 	err = mii_read(np, np->phy_addr, MII_BMSR);
1644 	if (err < 0)
1645 		return err;
1646 	bmsr = err;
1647 
1648 	estat = 0;
1649 	if (bmsr & BMSR_ESTATEN) {
1650 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1651 		if (err < 0)
1652 			return err;
1653 		estat = err;
1654 	}
1655 
1656 	bmcr = 0;
1657 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1658 	if (err)
1659 		return err;
1660 
1661 	if (lp->loopback_mode == LOOPBACK_MAC) {
1662 		bmcr |= BMCR_LOOPBACK;
1663 		if (lp->active_speed == SPEED_1000)
1664 			bmcr |= BMCR_SPEED1000;
1665 		if (lp->active_duplex == DUPLEX_FULL)
1666 			bmcr |= BMCR_FULLDPLX;
1667 	}
1668 
1669 	if (lp->loopback_mode == LOOPBACK_PHY) {
1670 		u16 aux;
1671 
1672 		aux = (BCM5464R_AUX_CTL_EXT_LB |
1673 		       BCM5464R_AUX_CTL_WRITE_1);
1674 		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1675 		if (err)
1676 			return err;
1677 	}
1678 
1679 	/* XXX configurable XXX */
1680 	/* XXX for now don't advertise half-duplex or asym pause... XXX */
1681 	adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1682 	if (bmsr & BMSR_10FULL)
1683 		adv |= ADVERTISE_10FULL;
1684 	if (bmsr & BMSR_100FULL)
1685 		adv |= ADVERTISE_100FULL;
1686 	err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1687 	if (err)
1688 		return err;
1689 
1690 	if (bmsr & BMSR_ESTATEN) {
1691 		u16 ctrl1000 = 0;
1692 
1693 		if (estat & ESTATUS_1000_TFULL)
1694 			ctrl1000 |= ADVERTISE_1000FULL;
1695 		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1696 		if (err)
1697 			return err;
1698 	}
1699 	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1700 
1701 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1702 	if (err)
1703 		return err;
1704 
1705 	err = mii_read(np, np->phy_addr, MII_BMCR);
1706 	if (err < 0)
1707 		return err;
1708 	err = mii_read(np, np->phy_addr, MII_BMSR);
1709 	if (err < 0)
1710 		return err;
1711 #if 0
1712 	pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1713 		np->port, bmcr, bmsr);
1714 #endif
1715 
1716 	return 0;
1717 }
1718 
xcvr_init_1g(struct niu * np)1719 static int xcvr_init_1g(struct niu *np)
1720 {
1721 	u64 val;
1722 
1723 	/* XXX shared resource, lock parent XXX */
1724 	val = nr64(MIF_CONFIG);
1725 	val &= ~MIF_CONFIG_INDIRECT_MODE;
1726 	nw64(MIF_CONFIG, val);
1727 
1728 	return mii_init_common(np);
1729 }
1730 
niu_xcvr_init(struct niu * np)1731 static int niu_xcvr_init(struct niu *np)
1732 {
1733 	const struct niu_phy_ops *ops = np->phy_ops;
1734 	int err;
1735 
1736 	err = 0;
1737 	if (ops->xcvr_init)
1738 		err = ops->xcvr_init(np);
1739 
1740 	return err;
1741 }
1742 
niu_serdes_init(struct niu * np)1743 static int niu_serdes_init(struct niu *np)
1744 {
1745 	const struct niu_phy_ops *ops = np->phy_ops;
1746 	int err;
1747 
1748 	err = 0;
1749 	if (ops->serdes_init)
1750 		err = ops->serdes_init(np);
1751 
1752 	return err;
1753 }
1754 
1755 static void niu_init_xif(struct niu *);
1756 static void niu_handle_led(struct niu *, int status);
1757 
niu_link_status_common(struct niu * np,int link_up)1758 static int niu_link_status_common(struct niu *np, int link_up)
1759 {
1760 	struct niu_link_config *lp = &np->link_config;
1761 	struct net_device *dev = np->dev;
1762 	unsigned long flags;
1763 
1764 	if (!netif_carrier_ok(dev) && link_up) {
1765 		niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
1766 		       dev->name,
1767 		       (lp->active_speed == SPEED_10000 ?
1768 			"10Gb/sec" :
1769 			(lp->active_speed == SPEED_1000 ?
1770 			 "1Gb/sec" :
1771 			 (lp->active_speed == SPEED_100 ?
1772 			  "100Mbit/sec" : "10Mbit/sec"))),
1773 		       (lp->active_duplex == DUPLEX_FULL ?
1774 			"full" : "half"));
1775 
1776 		spin_lock_irqsave(&np->lock, flags);
1777 		niu_init_xif(np);
1778 		niu_handle_led(np, 1);
1779 		spin_unlock_irqrestore(&np->lock, flags);
1780 
1781 		netif_carrier_on(dev);
1782 	} else if (netif_carrier_ok(dev) && !link_up) {
1783 		niuwarn(LINK, "%s: Link is down\n", dev->name);
1784 		spin_lock_irqsave(&np->lock, flags);
1785 		niu_handle_led(np, 0);
1786 		spin_unlock_irqrestore(&np->lock, flags);
1787 		netif_carrier_off(dev);
1788 	}
1789 
1790 	return 0;
1791 }
1792 
link_status_10g_mrvl(struct niu * np,int * link_up_p)1793 static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1794 {
1795 	int err, link_up, pma_status, pcs_status;
1796 
1797 	link_up = 0;
1798 
1799 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1800 			MRVL88X2011_10G_PMD_STATUS_2);
1801 	if (err < 0)
1802 		goto out;
1803 
1804 	/* Check PMA/PMD Register: 1.0001.2 == 1 */
1805 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1806 			MRVL88X2011_PMA_PMD_STATUS_1);
1807 	if (err < 0)
1808 		goto out;
1809 
1810 	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1811 
1812         /* Check PMC Register : 3.0001.2 == 1: read twice */
1813 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1814 			MRVL88X2011_PMA_PMD_STATUS_1);
1815 	if (err < 0)
1816 		goto out;
1817 
1818 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1819 			MRVL88X2011_PMA_PMD_STATUS_1);
1820 	if (err < 0)
1821 		goto out;
1822 
1823 	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1824 
1825         /* Check XGXS Register : 4.0018.[0-3,12] */
1826 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1827 			MRVL88X2011_10G_XGXS_LANE_STAT);
1828 	if (err < 0)
1829 		goto out;
1830 
1831 	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1832 		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1833 		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1834 		    0x800))
1835 		link_up = (pma_status && pcs_status) ? 1 : 0;
1836 
1837 	np->link_config.active_speed = SPEED_10000;
1838 	np->link_config.active_duplex = DUPLEX_FULL;
1839 	err = 0;
1840 out:
1841 	mrvl88x2011_act_led(np, (link_up ?
1842 				 MRVL88X2011_LED_CTL_PCS_ACT :
1843 				 MRVL88X2011_LED_CTL_OFF));
1844 
1845 	*link_up_p = link_up;
1846 	return err;
1847 }
1848 
link_status_10g_bcm8706(struct niu * np,int * link_up_p)1849 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1850 {
1851 	int err, link_up;
1852 	link_up = 0;
1853 
1854 	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1855 			BCM8704_PMD_RCV_SIGDET);
1856 	if (err < 0)
1857 		goto out;
1858 	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1859 		err = 0;
1860 		goto out;
1861 	}
1862 
1863 	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1864 			BCM8704_PCS_10G_R_STATUS);
1865 	if (err < 0)
1866 		goto out;
1867 
1868 	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1869 		err = 0;
1870 		goto out;
1871 	}
1872 
1873 	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1874 			BCM8704_PHYXS_XGXS_LANE_STAT);
1875 	if (err < 0)
1876 		goto out;
1877 	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1878 		    PHYXS_XGXS_LANE_STAT_MAGIC |
1879 		    PHYXS_XGXS_LANE_STAT_PATTEST |
1880 		    PHYXS_XGXS_LANE_STAT_LANE3 |
1881 		    PHYXS_XGXS_LANE_STAT_LANE2 |
1882 		    PHYXS_XGXS_LANE_STAT_LANE1 |
1883 		    PHYXS_XGXS_LANE_STAT_LANE0)) {
1884 		err = 0;
1885 		np->link_config.active_speed = SPEED_INVALID;
1886 		np->link_config.active_duplex = DUPLEX_INVALID;
1887 		goto out;
1888 	}
1889 
1890 	link_up = 1;
1891 	np->link_config.active_speed = SPEED_10000;
1892 	np->link_config.active_duplex = DUPLEX_FULL;
1893 	err = 0;
1894 
1895 out:
1896 	*link_up_p = link_up;
1897 	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
1898 		err = 0;
1899 	return err;
1900 }
1901 
link_status_10g_bcom(struct niu * np,int * link_up_p)1902 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
1903 {
1904 	int err, link_up;
1905 
1906 	link_up = 0;
1907 
1908 	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1909 			BCM8704_PMD_RCV_SIGDET);
1910 	if (err < 0)
1911 		goto out;
1912 	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1913 		err = 0;
1914 		goto out;
1915 	}
1916 
1917 	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1918 			BCM8704_PCS_10G_R_STATUS);
1919 	if (err < 0)
1920 		goto out;
1921 	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1922 		err = 0;
1923 		goto out;
1924 	}
1925 
1926 	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1927 			BCM8704_PHYXS_XGXS_LANE_STAT);
1928 	if (err < 0)
1929 		goto out;
1930 
1931 	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1932 		    PHYXS_XGXS_LANE_STAT_MAGIC |
1933 		    PHYXS_XGXS_LANE_STAT_LANE3 |
1934 		    PHYXS_XGXS_LANE_STAT_LANE2 |
1935 		    PHYXS_XGXS_LANE_STAT_LANE1 |
1936 		    PHYXS_XGXS_LANE_STAT_LANE0)) {
1937 		err = 0;
1938 		goto out;
1939 	}
1940 
1941 	link_up = 1;
1942 	np->link_config.active_speed = SPEED_10000;
1943 	np->link_config.active_duplex = DUPLEX_FULL;
1944 	err = 0;
1945 
1946 out:
1947 	*link_up_p = link_up;
1948 	return err;
1949 }
1950 
link_status_10g(struct niu * np,int * link_up_p)1951 static int link_status_10g(struct niu *np, int *link_up_p)
1952 {
1953 	unsigned long flags;
1954 	int err = -EINVAL;
1955 
1956 	spin_lock_irqsave(&np->lock, flags);
1957 
1958 	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1959 		int phy_id;
1960 
1961 		phy_id = phy_decode(np->parent->port_phy, np->port);
1962 		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1963 
1964 		/* handle different phy types */
1965 		switch (phy_id & NIU_PHY_ID_MASK) {
1966 		case NIU_PHY_ID_MRVL88X2011:
1967 			err = link_status_10g_mrvl(np, link_up_p);
1968 			break;
1969 
1970 		default: /* bcom 8704 */
1971 			err = link_status_10g_bcom(np, link_up_p);
1972 			break;
1973 		}
1974 	}
1975 
1976 	spin_unlock_irqrestore(&np->lock, flags);
1977 
1978 	return err;
1979 }
1980 
niu_10g_phy_present(struct niu * np)1981 static int niu_10g_phy_present(struct niu *np)
1982 {
1983 	u64 sig, mask, val;
1984 
1985 	sig = nr64(ESR_INT_SIGNALS);
1986 	switch (np->port) {
1987 	case 0:
1988 		mask = ESR_INT_SIGNALS_P0_BITS;
1989 		val = (ESR_INT_SRDY0_P0 |
1990 		       ESR_INT_DET0_P0 |
1991 		       ESR_INT_XSRDY_P0 |
1992 		       ESR_INT_XDP_P0_CH3 |
1993 		       ESR_INT_XDP_P0_CH2 |
1994 		       ESR_INT_XDP_P0_CH1 |
1995 		       ESR_INT_XDP_P0_CH0);
1996 		break;
1997 
1998 	case 1:
1999 		mask = ESR_INT_SIGNALS_P1_BITS;
2000 		val = (ESR_INT_SRDY0_P1 |
2001 		       ESR_INT_DET0_P1 |
2002 		       ESR_INT_XSRDY_P1 |
2003 		       ESR_INT_XDP_P1_CH3 |
2004 		       ESR_INT_XDP_P1_CH2 |
2005 		       ESR_INT_XDP_P1_CH1 |
2006 		       ESR_INT_XDP_P1_CH0);
2007 		break;
2008 
2009 	default:
2010 		return 0;
2011 	}
2012 
2013 	if ((sig & mask) != val)
2014 		return 0;
2015 	return 1;
2016 }
2017 
link_status_10g_hotplug(struct niu * np,int * link_up_p)2018 static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2019 {
2020 	unsigned long flags;
2021 	int err = 0;
2022 	int phy_present;
2023 	int phy_present_prev;
2024 
2025 	spin_lock_irqsave(&np->lock, flags);
2026 
2027 	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2028 		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2029 			1 : 0;
2030 		phy_present = niu_10g_phy_present(np);
2031 		if (phy_present != phy_present_prev) {
2032 			/* state change */
2033 			if (phy_present) {
2034 				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2035 				if (np->phy_ops->xcvr_init)
2036 					err = np->phy_ops->xcvr_init(np);
2037 				if (err) {
2038 					/* debounce */
2039 					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2040 				}
2041 			} else {
2042 				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2043 				*link_up_p = 0;
2044 				niuwarn(LINK, "%s: Hotplug PHY Removed\n",
2045 					np->dev->name);
2046 			}
2047 		}
2048 		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
2049 			err = link_status_10g_bcm8706(np, link_up_p);
2050 	}
2051 
2052 	spin_unlock_irqrestore(&np->lock, flags);
2053 
2054 	return err;
2055 }
2056 
link_status_1g(struct niu * np,int * link_up_p)2057 static int link_status_1g(struct niu *np, int *link_up_p)
2058 {
2059 	struct niu_link_config *lp = &np->link_config;
2060 	u16 current_speed, bmsr;
2061 	unsigned long flags;
2062 	u8 current_duplex;
2063 	int err, link_up;
2064 
2065 	link_up = 0;
2066 	current_speed = SPEED_INVALID;
2067 	current_duplex = DUPLEX_INVALID;
2068 
2069 	spin_lock_irqsave(&np->lock, flags);
2070 
2071 	err = -EINVAL;
2072 	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
2073 		goto out;
2074 
2075 	err = mii_read(np, np->phy_addr, MII_BMSR);
2076 	if (err < 0)
2077 		goto out;
2078 
2079 	bmsr = err;
2080 	if (bmsr & BMSR_LSTATUS) {
2081 		u16 adv, lpa, common, estat;
2082 
2083 		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
2084 		if (err < 0)
2085 			goto out;
2086 		adv = err;
2087 
2088 		err = mii_read(np, np->phy_addr, MII_LPA);
2089 		if (err < 0)
2090 			goto out;
2091 		lpa = err;
2092 
2093 		common = adv & lpa;
2094 
2095 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
2096 		if (err < 0)
2097 			goto out;
2098 		estat = err;
2099 
2100 		link_up = 1;
2101 		if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
2102 			current_speed = SPEED_1000;
2103 			if (estat & ESTATUS_1000_TFULL)
2104 				current_duplex = DUPLEX_FULL;
2105 			else
2106 				current_duplex = DUPLEX_HALF;
2107 		} else {
2108 			if (common & ADVERTISE_100BASE4) {
2109 				current_speed = SPEED_100;
2110 				current_duplex = DUPLEX_HALF;
2111 			} else if (common & ADVERTISE_100FULL) {
2112 				current_speed = SPEED_100;
2113 				current_duplex = DUPLEX_FULL;
2114 			} else if (common & ADVERTISE_100HALF) {
2115 				current_speed = SPEED_100;
2116 				current_duplex = DUPLEX_HALF;
2117 			} else if (common & ADVERTISE_10FULL) {
2118 				current_speed = SPEED_10;
2119 				current_duplex = DUPLEX_FULL;
2120 			} else if (common & ADVERTISE_10HALF) {
2121 				current_speed = SPEED_10;
2122 				current_duplex = DUPLEX_HALF;
2123 			} else
2124 				link_up = 0;
2125 		}
2126 	}
2127 	lp->active_speed = current_speed;
2128 	lp->active_duplex = current_duplex;
2129 	err = 0;
2130 
2131 out:
2132 	spin_unlock_irqrestore(&np->lock, flags);
2133 
2134 	*link_up_p = link_up;
2135 	return err;
2136 }
2137 
niu_link_status(struct niu * np,int * link_up_p)2138 static int niu_link_status(struct niu *np, int *link_up_p)
2139 {
2140 	const struct niu_phy_ops *ops = np->phy_ops;
2141 	int err;
2142 
2143 	err = 0;
2144 	if (ops->link_status)
2145 		err = ops->link_status(np, link_up_p);
2146 
2147 	return err;
2148 }
2149 
niu_timer(unsigned long __opaque)2150 static void niu_timer(unsigned long __opaque)
2151 {
2152 	struct niu *np = (struct niu *) __opaque;
2153 	unsigned long off;
2154 	int err, link_up;
2155 
2156 	err = niu_link_status(np, &link_up);
2157 	if (!err)
2158 		niu_link_status_common(np, link_up);
2159 
2160 	if (netif_carrier_ok(np->dev))
2161 		off = 5 * HZ;
2162 	else
2163 		off = 1 * HZ;
2164 	np->timer.expires = jiffies + off;
2165 
2166 	add_timer(&np->timer);
2167 }
2168 
2169 static const struct niu_phy_ops phy_ops_10g_serdes = {
2170 	.serdes_init		= serdes_init_10g_serdes,
2171 	.link_status		= link_status_10g_serdes,
2172 };
2173 
2174 static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2175 	.serdes_init		= serdes_init_niu_10g_serdes,
2176 	.link_status		= link_status_10g_serdes,
2177 };
2178 
2179 static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2180 	.serdes_init		= serdes_init_niu_1g_serdes,
2181 	.link_status		= link_status_1g_serdes,
2182 };
2183 
2184 static const struct niu_phy_ops phy_ops_1g_rgmii = {
2185 	.xcvr_init		= xcvr_init_1g_rgmii,
2186 	.link_status		= link_status_1g_rgmii,
2187 };
2188 
2189 static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2190 	.serdes_init		= serdes_init_niu_10g_fiber,
2191 	.xcvr_init		= xcvr_init_10g,
2192 	.link_status		= link_status_10g,
2193 };
2194 
2195 static const struct niu_phy_ops phy_ops_10g_fiber = {
2196 	.serdes_init		= serdes_init_10g,
2197 	.xcvr_init		= xcvr_init_10g,
2198 	.link_status		= link_status_10g,
2199 };
2200 
2201 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2202 	.serdes_init		= serdes_init_10g,
2203 	.xcvr_init		= xcvr_init_10g_bcm8706,
2204 	.link_status		= link_status_10g_hotplug,
2205 };
2206 
2207 static const struct niu_phy_ops phy_ops_10g_copper = {
2208 	.serdes_init		= serdes_init_10g,
2209 	.link_status		= link_status_10g, /* XXX */
2210 };
2211 
2212 static const struct niu_phy_ops phy_ops_1g_fiber = {
2213 	.serdes_init		= serdes_init_1g,
2214 	.xcvr_init		= xcvr_init_1g,
2215 	.link_status		= link_status_1g,
2216 };
2217 
2218 static const struct niu_phy_ops phy_ops_1g_copper = {
2219 	.xcvr_init		= xcvr_init_1g,
2220 	.link_status		= link_status_1g,
2221 };
2222 
2223 struct niu_phy_template {
2224 	const struct niu_phy_ops	*ops;
2225 	u32				phy_addr_base;
2226 };
2227 
2228 static const struct niu_phy_template phy_template_niu_10g_fiber = {
2229 	.ops		= &phy_ops_10g_fiber_niu,
2230 	.phy_addr_base	= 16,
2231 };
2232 
2233 static const struct niu_phy_template phy_template_niu_10g_serdes = {
2234 	.ops		= &phy_ops_10g_serdes_niu,
2235 	.phy_addr_base	= 0,
2236 };
2237 
2238 static const struct niu_phy_template phy_template_niu_1g_serdes = {
2239 	.ops		= &phy_ops_1g_serdes_niu,
2240 	.phy_addr_base	= 0,
2241 };
2242 
2243 static const struct niu_phy_template phy_template_10g_fiber = {
2244 	.ops		= &phy_ops_10g_fiber,
2245 	.phy_addr_base	= 8,
2246 };
2247 
2248 static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2249 	.ops		= &phy_ops_10g_fiber_hotplug,
2250 	.phy_addr_base	= 8,
2251 };
2252 
2253 static const struct niu_phy_template phy_template_10g_copper = {
2254 	.ops		= &phy_ops_10g_copper,
2255 	.phy_addr_base	= 10,
2256 };
2257 
2258 static const struct niu_phy_template phy_template_1g_fiber = {
2259 	.ops		= &phy_ops_1g_fiber,
2260 	.phy_addr_base	= 0,
2261 };
2262 
2263 static const struct niu_phy_template phy_template_1g_copper = {
2264 	.ops		= &phy_ops_1g_copper,
2265 	.phy_addr_base	= 0,
2266 };
2267 
2268 static const struct niu_phy_template phy_template_1g_rgmii = {
2269 	.ops		= &phy_ops_1g_rgmii,
2270 	.phy_addr_base	= 0,
2271 };
2272 
2273 static const struct niu_phy_template phy_template_10g_serdes = {
2274 	.ops		= &phy_ops_10g_serdes,
2275 	.phy_addr_base	= 0,
2276 };
2277 
2278 static int niu_atca_port_num[4] = {
2279 	0, 0,  11, 10
2280 };
2281 
serdes_init_10g_serdes(struct niu * np)2282 static int serdes_init_10g_serdes(struct niu *np)
2283 {
2284 	struct niu_link_config *lp = &np->link_config;
2285 	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2286 	u64 ctrl_val, test_cfg_val, sig, mask, val;
2287 	int err;
2288 	u64 reset_val;
2289 
2290 	switch (np->port) {
2291 	case 0:
2292 		reset_val =  ENET_SERDES_RESET_0;
2293 		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2294 		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2295 		pll_cfg = ENET_SERDES_0_PLL_CFG;
2296 		break;
2297 	case 1:
2298 		reset_val =  ENET_SERDES_RESET_1;
2299 		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2300 		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2301 		pll_cfg = ENET_SERDES_1_PLL_CFG;
2302 		break;
2303 
2304 	default:
2305 		return -EINVAL;
2306 	}
2307 	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2308 		    ENET_SERDES_CTRL_SDET_1 |
2309 		    ENET_SERDES_CTRL_SDET_2 |
2310 		    ENET_SERDES_CTRL_SDET_3 |
2311 		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2312 		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2313 		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2314 		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2315 		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2316 		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2317 		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2318 		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2319 	test_cfg_val = 0;
2320 
2321 	if (lp->loopback_mode == LOOPBACK_PHY) {
2322 		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2323 				  ENET_SERDES_TEST_MD_0_SHIFT) |
2324 				 (ENET_TEST_MD_PAD_LOOPBACK <<
2325 				  ENET_SERDES_TEST_MD_1_SHIFT) |
2326 				 (ENET_TEST_MD_PAD_LOOPBACK <<
2327 				  ENET_SERDES_TEST_MD_2_SHIFT) |
2328 				 (ENET_TEST_MD_PAD_LOOPBACK <<
2329 				  ENET_SERDES_TEST_MD_3_SHIFT));
2330 	}
2331 
2332 	esr_reset(np);
2333 	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2334 	nw64(ctrl_reg, ctrl_val);
2335 	nw64(test_cfg_reg, test_cfg_val);
2336 
2337 	/* Initialize all 4 lanes of the SERDES.  */
2338 	for (i = 0; i < 4; i++) {
2339 		u32 rxtx_ctrl, glue0;
2340 
2341 		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2342 		if (err)
2343 			return err;
2344 		err = esr_read_glue0(np, i, &glue0);
2345 		if (err)
2346 			return err;
2347 
2348 		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2349 		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2350 			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2351 
2352 		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2353 			   ESR_GLUE_CTRL0_THCNT |
2354 			   ESR_GLUE_CTRL0_BLTIME);
2355 		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2356 			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2357 			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2358 			  (BLTIME_300_CYCLES <<
2359 			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
2360 
2361 		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2362 		if (err)
2363 			return err;
2364 		err = esr_write_glue0(np, i, glue0);
2365 		if (err)
2366 			return err;
2367 	}
2368 
2369 
2370 	sig = nr64(ESR_INT_SIGNALS);
2371 	switch (np->port) {
2372 	case 0:
2373 		mask = ESR_INT_SIGNALS_P0_BITS;
2374 		val = (ESR_INT_SRDY0_P0 |
2375 		       ESR_INT_DET0_P0 |
2376 		       ESR_INT_XSRDY_P0 |
2377 		       ESR_INT_XDP_P0_CH3 |
2378 		       ESR_INT_XDP_P0_CH2 |
2379 		       ESR_INT_XDP_P0_CH1 |
2380 		       ESR_INT_XDP_P0_CH0);
2381 		break;
2382 
2383 	case 1:
2384 		mask = ESR_INT_SIGNALS_P1_BITS;
2385 		val = (ESR_INT_SRDY0_P1 |
2386 		       ESR_INT_DET0_P1 |
2387 		       ESR_INT_XSRDY_P1 |
2388 		       ESR_INT_XDP_P1_CH3 |
2389 		       ESR_INT_XDP_P1_CH2 |
2390 		       ESR_INT_XDP_P1_CH1 |
2391 		       ESR_INT_XDP_P1_CH0);
2392 		break;
2393 
2394 	default:
2395 		return -EINVAL;
2396 	}
2397 
2398 	if ((sig & mask) != val) {
2399 		int err;
2400 		err = serdes_init_1g_serdes(np);
2401 		if (!err) {
2402 			np->flags &= ~NIU_FLAGS_10G;
2403 			np->mac_xcvr = MAC_XCVR_PCS;
2404 		}  else {
2405 			dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
2406 			 np->port);
2407 			return -ENODEV;
2408 		}
2409 	}
2410 
2411 	return 0;
2412 }
2413 
niu_determine_phy_disposition(struct niu * np)2414 static int niu_determine_phy_disposition(struct niu *np)
2415 {
2416 	struct niu_parent *parent = np->parent;
2417 	u8 plat_type = parent->plat_type;
2418 	const struct niu_phy_template *tp;
2419 	u32 phy_addr_off = 0;
2420 
2421 	if (plat_type == PLAT_TYPE_NIU) {
2422 		switch (np->flags &
2423 			(NIU_FLAGS_10G |
2424 			 NIU_FLAGS_FIBER |
2425 			 NIU_FLAGS_XCVR_SERDES)) {
2426 		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2427 			/* 10G Serdes */
2428 			tp = &phy_template_niu_10g_serdes;
2429 			break;
2430 		case NIU_FLAGS_XCVR_SERDES:
2431 			/* 1G Serdes */
2432 			tp = &phy_template_niu_1g_serdes;
2433 			break;
2434 		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2435 			/* 10G Fiber */
2436 		default:
2437 			tp = &phy_template_niu_10g_fiber;
2438 			phy_addr_off += np->port;
2439 			break;
2440 		}
2441 	} else {
2442 		switch (np->flags &
2443 			(NIU_FLAGS_10G |
2444 			 NIU_FLAGS_FIBER |
2445 			 NIU_FLAGS_XCVR_SERDES)) {
2446 		case 0:
2447 			/* 1G copper */
2448 			tp = &phy_template_1g_copper;
2449 			if (plat_type == PLAT_TYPE_VF_P0)
2450 				phy_addr_off = 10;
2451 			else if (plat_type == PLAT_TYPE_VF_P1)
2452 				phy_addr_off = 26;
2453 
2454 			phy_addr_off += (np->port ^ 0x3);
2455 			break;
2456 
2457 		case NIU_FLAGS_10G:
2458 			/* 10G copper */
2459 			tp = &phy_template_1g_copper;
2460 			break;
2461 
2462 		case NIU_FLAGS_FIBER:
2463 			/* 1G fiber */
2464 			tp = &phy_template_1g_fiber;
2465 			break;
2466 
2467 		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2468 			/* 10G fiber */
2469 			tp = &phy_template_10g_fiber;
2470 			if (plat_type == PLAT_TYPE_VF_P0 ||
2471 			    plat_type == PLAT_TYPE_VF_P1)
2472 				phy_addr_off = 8;
2473 			phy_addr_off += np->port;
2474 			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2475 				tp = &phy_template_10g_fiber_hotplug;
2476 				if (np->port == 0)
2477 					phy_addr_off = 8;
2478 				if (np->port == 1)
2479 					phy_addr_off = 12;
2480 			}
2481 			break;
2482 
2483 		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2484 		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2485 		case NIU_FLAGS_XCVR_SERDES:
2486 			switch(np->port) {
2487 			case 0:
2488 			case 1:
2489 				tp = &phy_template_10g_serdes;
2490 				break;
2491 			case 2:
2492 			case 3:
2493 				tp = &phy_template_1g_rgmii;
2494 				break;
2495 			default:
2496 				return -EINVAL;
2497 				break;
2498 			}
2499 			phy_addr_off = niu_atca_port_num[np->port];
2500 			break;
2501 
2502 		default:
2503 			return -EINVAL;
2504 		}
2505 	}
2506 
2507 	np->phy_ops = tp->ops;
2508 	np->phy_addr = tp->phy_addr_base + phy_addr_off;
2509 
2510 	return 0;
2511 }
2512 
niu_init_link(struct niu * np)2513 static int niu_init_link(struct niu *np)
2514 {
2515 	struct niu_parent *parent = np->parent;
2516 	int err, ignore;
2517 
2518 	if (parent->plat_type == PLAT_TYPE_NIU) {
2519 		err = niu_xcvr_init(np);
2520 		if (err)
2521 			return err;
2522 		msleep(200);
2523 	}
2524 	err = niu_serdes_init(np);
2525 	if (err)
2526 		return err;
2527 	msleep(200);
2528 	err = niu_xcvr_init(np);
2529 	if (!err)
2530 		niu_link_status(np, &ignore);
2531 	return 0;
2532 }
2533 
niu_set_primary_mac(struct niu * np,unsigned char * addr)2534 static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2535 {
2536 	u16 reg0 = addr[4] << 8 | addr[5];
2537 	u16 reg1 = addr[2] << 8 | addr[3];
2538 	u16 reg2 = addr[0] << 8 | addr[1];
2539 
2540 	if (np->flags & NIU_FLAGS_XMAC) {
2541 		nw64_mac(XMAC_ADDR0, reg0);
2542 		nw64_mac(XMAC_ADDR1, reg1);
2543 		nw64_mac(XMAC_ADDR2, reg2);
2544 	} else {
2545 		nw64_mac(BMAC_ADDR0, reg0);
2546 		nw64_mac(BMAC_ADDR1, reg1);
2547 		nw64_mac(BMAC_ADDR2, reg2);
2548 	}
2549 }
2550 
niu_num_alt_addr(struct niu * np)2551 static int niu_num_alt_addr(struct niu *np)
2552 {
2553 	if (np->flags & NIU_FLAGS_XMAC)
2554 		return XMAC_NUM_ALT_ADDR;
2555 	else
2556 		return BMAC_NUM_ALT_ADDR;
2557 }
2558 
niu_set_alt_mac(struct niu * np,int index,unsigned char * addr)2559 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2560 {
2561 	u16 reg0 = addr[4] << 8 | addr[5];
2562 	u16 reg1 = addr[2] << 8 | addr[3];
2563 	u16 reg2 = addr[0] << 8 | addr[1];
2564 
2565 	if (index >= niu_num_alt_addr(np))
2566 		return -EINVAL;
2567 
2568 	if (np->flags & NIU_FLAGS_XMAC) {
2569 		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2570 		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2571 		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2572 	} else {
2573 		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2574 		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2575 		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2576 	}
2577 
2578 	return 0;
2579 }
2580 
niu_enable_alt_mac(struct niu * np,int index,int on)2581 static int niu_enable_alt_mac(struct niu *np, int index, int on)
2582 {
2583 	unsigned long reg;
2584 	u64 val, mask;
2585 
2586 	if (index >= niu_num_alt_addr(np))
2587 		return -EINVAL;
2588 
2589 	if (np->flags & NIU_FLAGS_XMAC) {
2590 		reg = XMAC_ADDR_CMPEN;
2591 		mask = 1 << index;
2592 	} else {
2593 		reg = BMAC_ADDR_CMPEN;
2594 		mask = 1 << (index + 1);
2595 	}
2596 
2597 	val = nr64_mac(reg);
2598 	if (on)
2599 		val |= mask;
2600 	else
2601 		val &= ~mask;
2602 	nw64_mac(reg, val);
2603 
2604 	return 0;
2605 }
2606 
__set_rdc_table_num_hw(struct niu * np,unsigned long reg,int num,int mac_pref)2607 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2608 				   int num, int mac_pref)
2609 {
2610 	u64 val = nr64_mac(reg);
2611 	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2612 	val |= num;
2613 	if (mac_pref)
2614 		val |= HOST_INFO_MPR;
2615 	nw64_mac(reg, val);
2616 }
2617 
__set_rdc_table_num(struct niu * np,int xmac_index,int bmac_index,int rdc_table_num,int mac_pref)2618 static int __set_rdc_table_num(struct niu *np,
2619 			       int xmac_index, int bmac_index,
2620 			       int rdc_table_num, int mac_pref)
2621 {
2622 	unsigned long reg;
2623 
2624 	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2625 		return -EINVAL;
2626 	if (np->flags & NIU_FLAGS_XMAC)
2627 		reg = XMAC_HOST_INFO(xmac_index);
2628 	else
2629 		reg = BMAC_HOST_INFO(bmac_index);
2630 	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2631 	return 0;
2632 }
2633 
niu_set_primary_mac_rdc_table(struct niu * np,int table_num,int mac_pref)2634 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2635 					 int mac_pref)
2636 {
2637 	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2638 }
2639 
niu_set_multicast_mac_rdc_table(struct niu * np,int table_num,int mac_pref)2640 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2641 					   int mac_pref)
2642 {
2643 	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2644 }
2645 
niu_set_alt_mac_rdc_table(struct niu * np,int idx,int table_num,int mac_pref)2646 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2647 				     int table_num, int mac_pref)
2648 {
2649 	if (idx >= niu_num_alt_addr(np))
2650 		return -EINVAL;
2651 	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2652 }
2653 
vlan_entry_set_parity(u64 reg_val)2654 static u64 vlan_entry_set_parity(u64 reg_val)
2655 {
2656 	u64 port01_mask;
2657 	u64 port23_mask;
2658 
2659 	port01_mask = 0x00ff;
2660 	port23_mask = 0xff00;
2661 
2662 	if (hweight64(reg_val & port01_mask) & 1)
2663 		reg_val |= ENET_VLAN_TBL_PARITY0;
2664 	else
2665 		reg_val &= ~ENET_VLAN_TBL_PARITY0;
2666 
2667 	if (hweight64(reg_val & port23_mask) & 1)
2668 		reg_val |= ENET_VLAN_TBL_PARITY1;
2669 	else
2670 		reg_val &= ~ENET_VLAN_TBL_PARITY1;
2671 
2672 	return reg_val;
2673 }
2674 
vlan_tbl_write(struct niu * np,unsigned long index,int port,int vpr,int rdc_table)2675 static void vlan_tbl_write(struct niu *np, unsigned long index,
2676 			   int port, int vpr, int rdc_table)
2677 {
2678 	u64 reg_val = nr64(ENET_VLAN_TBL(index));
2679 
2680 	reg_val &= ~((ENET_VLAN_TBL_VPR |
2681 		      ENET_VLAN_TBL_VLANRDCTBLN) <<
2682 		     ENET_VLAN_TBL_SHIFT(port));
2683 	if (vpr)
2684 		reg_val |= (ENET_VLAN_TBL_VPR <<
2685 			    ENET_VLAN_TBL_SHIFT(port));
2686 	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2687 
2688 	reg_val = vlan_entry_set_parity(reg_val);
2689 
2690 	nw64(ENET_VLAN_TBL(index), reg_val);
2691 }
2692 
vlan_tbl_clear(struct niu * np)2693 static void vlan_tbl_clear(struct niu *np)
2694 {
2695 	int i;
2696 
2697 	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2698 		nw64(ENET_VLAN_TBL(i), 0);
2699 }
2700 
tcam_wait_bit(struct niu * np,u64 bit)2701 static int tcam_wait_bit(struct niu *np, u64 bit)
2702 {
2703 	int limit = 1000;
2704 
2705 	while (--limit > 0) {
2706 		if (nr64(TCAM_CTL) & bit)
2707 			break;
2708 		udelay(1);
2709 	}
2710 	if (limit < 0)
2711 		return -ENODEV;
2712 
2713 	return 0;
2714 }
2715 
tcam_flush(struct niu * np,int index)2716 static int tcam_flush(struct niu *np, int index)
2717 {
2718 	nw64(TCAM_KEY_0, 0x00);
2719 	nw64(TCAM_KEY_MASK_0, 0xff);
2720 	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2721 
2722 	return tcam_wait_bit(np, TCAM_CTL_STAT);
2723 }
2724 
2725 #if 0
2726 static int tcam_read(struct niu *np, int index,
2727 		     u64 *key, u64 *mask)
2728 {
2729 	int err;
2730 
2731 	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2732 	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2733 	if (!err) {
2734 		key[0] = nr64(TCAM_KEY_0);
2735 		key[1] = nr64(TCAM_KEY_1);
2736 		key[2] = nr64(TCAM_KEY_2);
2737 		key[3] = nr64(TCAM_KEY_3);
2738 		mask[0] = nr64(TCAM_KEY_MASK_0);
2739 		mask[1] = nr64(TCAM_KEY_MASK_1);
2740 		mask[2] = nr64(TCAM_KEY_MASK_2);
2741 		mask[3] = nr64(TCAM_KEY_MASK_3);
2742 	}
2743 	return err;
2744 }
2745 #endif
2746 
tcam_write(struct niu * np,int index,u64 * key,u64 * mask)2747 static int tcam_write(struct niu *np, int index,
2748 		      u64 *key, u64 *mask)
2749 {
2750 	nw64(TCAM_KEY_0, key[0]);
2751 	nw64(TCAM_KEY_1, key[1]);
2752 	nw64(TCAM_KEY_2, key[2]);
2753 	nw64(TCAM_KEY_3, key[3]);
2754 	nw64(TCAM_KEY_MASK_0, mask[0]);
2755 	nw64(TCAM_KEY_MASK_1, mask[1]);
2756 	nw64(TCAM_KEY_MASK_2, mask[2]);
2757 	nw64(TCAM_KEY_MASK_3, mask[3]);
2758 	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2759 
2760 	return tcam_wait_bit(np, TCAM_CTL_STAT);
2761 }
2762 
2763 #if 0
2764 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2765 {
2766 	int err;
2767 
2768 	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2769 	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2770 	if (!err)
2771 		*data = nr64(TCAM_KEY_1);
2772 
2773 	return err;
2774 }
2775 #endif
2776 
tcam_assoc_write(struct niu * np,int index,u64 assoc_data)2777 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2778 {
2779 	nw64(TCAM_KEY_1, assoc_data);
2780 	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2781 
2782 	return tcam_wait_bit(np, TCAM_CTL_STAT);
2783 }
2784 
tcam_enable(struct niu * np,int on)2785 static void tcam_enable(struct niu *np, int on)
2786 {
2787 	u64 val = nr64(FFLP_CFG_1);
2788 
2789 	if (on)
2790 		val &= ~FFLP_CFG_1_TCAM_DIS;
2791 	else
2792 		val |= FFLP_CFG_1_TCAM_DIS;
2793 	nw64(FFLP_CFG_1, val);
2794 }
2795 
tcam_set_lat_and_ratio(struct niu * np,u64 latency,u64 ratio)2796 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2797 {
2798 	u64 val = nr64(FFLP_CFG_1);
2799 
2800 	val &= ~(FFLP_CFG_1_FFLPINITDONE |
2801 		 FFLP_CFG_1_CAMLAT |
2802 		 FFLP_CFG_1_CAMRATIO);
2803 	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2804 	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2805 	nw64(FFLP_CFG_1, val);
2806 
2807 	val = nr64(FFLP_CFG_1);
2808 	val |= FFLP_CFG_1_FFLPINITDONE;
2809 	nw64(FFLP_CFG_1, val);
2810 }
2811 
tcam_user_eth_class_enable(struct niu * np,unsigned long class,int on)2812 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2813 				      int on)
2814 {
2815 	unsigned long reg;
2816 	u64 val;
2817 
2818 	if (class < CLASS_CODE_ETHERTYPE1 ||
2819 	    class > CLASS_CODE_ETHERTYPE2)
2820 		return -EINVAL;
2821 
2822 	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2823 	val = nr64(reg);
2824 	if (on)
2825 		val |= L2_CLS_VLD;
2826 	else
2827 		val &= ~L2_CLS_VLD;
2828 	nw64(reg, val);
2829 
2830 	return 0;
2831 }
2832 
2833 #if 0
2834 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2835 				   u64 ether_type)
2836 {
2837 	unsigned long reg;
2838 	u64 val;
2839 
2840 	if (class < CLASS_CODE_ETHERTYPE1 ||
2841 	    class > CLASS_CODE_ETHERTYPE2 ||
2842 	    (ether_type & ~(u64)0xffff) != 0)
2843 		return -EINVAL;
2844 
2845 	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2846 	val = nr64(reg);
2847 	val &= ~L2_CLS_ETYPE;
2848 	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2849 	nw64(reg, val);
2850 
2851 	return 0;
2852 }
2853 #endif
2854 
tcam_user_ip_class_enable(struct niu * np,unsigned long class,int on)2855 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2856 				     int on)
2857 {
2858 	unsigned long reg;
2859 	u64 val;
2860 
2861 	if (class < CLASS_CODE_USER_PROG1 ||
2862 	    class > CLASS_CODE_USER_PROG4)
2863 		return -EINVAL;
2864 
2865 	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2866 	val = nr64(reg);
2867 	if (on)
2868 		val |= L3_CLS_VALID;
2869 	else
2870 		val &= ~L3_CLS_VALID;
2871 	nw64(reg, val);
2872 
2873 	return 0;
2874 }
2875 
2876 #if 0
2877 static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2878 				  int ipv6, u64 protocol_id,
2879 				  u64 tos_mask, u64 tos_val)
2880 {
2881 	unsigned long reg;
2882 	u64 val;
2883 
2884 	if (class < CLASS_CODE_USER_PROG1 ||
2885 	    class > CLASS_CODE_USER_PROG4 ||
2886 	    (protocol_id & ~(u64)0xff) != 0 ||
2887 	    (tos_mask & ~(u64)0xff) != 0 ||
2888 	    (tos_val & ~(u64)0xff) != 0)
2889 		return -EINVAL;
2890 
2891 	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2892 	val = nr64(reg);
2893 	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2894 		 L3_CLS_TOSMASK | L3_CLS_TOS);
2895 	if (ipv6)
2896 		val |= L3_CLS_IPVER;
2897 	val |= (protocol_id << L3_CLS_PID_SHIFT);
2898 	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2899 	val |= (tos_val << L3_CLS_TOS_SHIFT);
2900 	nw64(reg, val);
2901 
2902 	return 0;
2903 }
2904 #endif
2905 
tcam_early_init(struct niu * np)2906 static int tcam_early_init(struct niu *np)
2907 {
2908 	unsigned long i;
2909 	int err;
2910 
2911 	tcam_enable(np, 0);
2912 	tcam_set_lat_and_ratio(np,
2913 			       DEFAULT_TCAM_LATENCY,
2914 			       DEFAULT_TCAM_ACCESS_RATIO);
2915 	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2916 		err = tcam_user_eth_class_enable(np, i, 0);
2917 		if (err)
2918 			return err;
2919 	}
2920 	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2921 		err = tcam_user_ip_class_enable(np, i, 0);
2922 		if (err)
2923 			return err;
2924 	}
2925 
2926 	return 0;
2927 }
2928 
tcam_flush_all(struct niu * np)2929 static int tcam_flush_all(struct niu *np)
2930 {
2931 	unsigned long i;
2932 
2933 	for (i = 0; i < np->parent->tcam_num_entries; i++) {
2934 		int err = tcam_flush(np, i);
2935 		if (err)
2936 			return err;
2937 	}
2938 	return 0;
2939 }
2940 
hash_addr_regval(unsigned long index,unsigned long num_entries)2941 static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
2942 {
2943 	return ((u64)index | (num_entries == 1 ?
2944 			      HASH_TBL_ADDR_AUTOINC : 0));
2945 }
2946 
2947 #if 0
2948 static int hash_read(struct niu *np, unsigned long partition,
2949 		     unsigned long index, unsigned long num_entries,
2950 		     u64 *data)
2951 {
2952 	u64 val = hash_addr_regval(index, num_entries);
2953 	unsigned long i;
2954 
2955 	if (partition >= FCRAM_NUM_PARTITIONS ||
2956 	    index + num_entries > FCRAM_SIZE)
2957 		return -EINVAL;
2958 
2959 	nw64(HASH_TBL_ADDR(partition), val);
2960 	for (i = 0; i < num_entries; i++)
2961 		data[i] = nr64(HASH_TBL_DATA(partition));
2962 
2963 	return 0;
2964 }
2965 #endif
2966 
hash_write(struct niu * np,unsigned long partition,unsigned long index,unsigned long num_entries,u64 * data)2967 static int hash_write(struct niu *np, unsigned long partition,
2968 		      unsigned long index, unsigned long num_entries,
2969 		      u64 *data)
2970 {
2971 	u64 val = hash_addr_regval(index, num_entries);
2972 	unsigned long i;
2973 
2974 	if (partition >= FCRAM_NUM_PARTITIONS ||
2975 	    index + (num_entries * 8) > FCRAM_SIZE)
2976 		return -EINVAL;
2977 
2978 	nw64(HASH_TBL_ADDR(partition), val);
2979 	for (i = 0; i < num_entries; i++)
2980 		nw64(HASH_TBL_DATA(partition), data[i]);
2981 
2982 	return 0;
2983 }
2984 
fflp_reset(struct niu * np)2985 static void fflp_reset(struct niu *np)
2986 {
2987 	u64 val;
2988 
2989 	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
2990 	udelay(10);
2991 	nw64(FFLP_CFG_1, 0);
2992 
2993 	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
2994 	nw64(FFLP_CFG_1, val);
2995 }
2996 
fflp_set_timings(struct niu * np)2997 static void fflp_set_timings(struct niu *np)
2998 {
2999 	u64 val = nr64(FFLP_CFG_1);
3000 
3001 	val &= ~FFLP_CFG_1_FFLPINITDONE;
3002 	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3003 	nw64(FFLP_CFG_1, val);
3004 
3005 	val = nr64(FFLP_CFG_1);
3006 	val |= FFLP_CFG_1_FFLPINITDONE;
3007 	nw64(FFLP_CFG_1, val);
3008 
3009 	val = nr64(FCRAM_REF_TMR);
3010 	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3011 	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3012 	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3013 	nw64(FCRAM_REF_TMR, val);
3014 }
3015 
fflp_set_partition(struct niu * np,u64 partition,u64 mask,u64 base,int enable)3016 static int fflp_set_partition(struct niu *np, u64 partition,
3017 			      u64 mask, u64 base, int enable)
3018 {
3019 	unsigned long reg;
3020 	u64 val;
3021 
3022 	if (partition >= FCRAM_NUM_PARTITIONS ||
3023 	    (mask & ~(u64)0x1f) != 0 ||
3024 	    (base & ~(u64)0x1f) != 0)
3025 		return -EINVAL;
3026 
3027 	reg = FLW_PRT_SEL(partition);
3028 
3029 	val = nr64(reg);
3030 	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3031 	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3032 	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3033 	if (enable)
3034 		val |= FLW_PRT_SEL_EXT;
3035 	nw64(reg, val);
3036 
3037 	return 0;
3038 }
3039 
fflp_disable_all_partitions(struct niu * np)3040 static int fflp_disable_all_partitions(struct niu *np)
3041 {
3042 	unsigned long i;
3043 
3044 	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3045 		int err = fflp_set_partition(np, 0, 0, 0, 0);
3046 		if (err)
3047 			return err;
3048 	}
3049 	return 0;
3050 }
3051 
fflp_llcsnap_enable(struct niu * np,int on)3052 static void fflp_llcsnap_enable(struct niu *np, int on)
3053 {
3054 	u64 val = nr64(FFLP_CFG_1);
3055 
3056 	if (on)
3057 		val |= FFLP_CFG_1_LLCSNAP;
3058 	else
3059 		val &= ~FFLP_CFG_1_LLCSNAP;
3060 	nw64(FFLP_CFG_1, val);
3061 }
3062 
fflp_errors_enable(struct niu * np,int on)3063 static void fflp_errors_enable(struct niu *np, int on)
3064 {
3065 	u64 val = nr64(FFLP_CFG_1);
3066 
3067 	if (on)
3068 		val &= ~FFLP_CFG_1_ERRORDIS;
3069 	else
3070 		val |= FFLP_CFG_1_ERRORDIS;
3071 	nw64(FFLP_CFG_1, val);
3072 }
3073 
fflp_hash_clear(struct niu * np)3074 static int fflp_hash_clear(struct niu *np)
3075 {
3076 	struct fcram_hash_ipv4 ent;
3077 	unsigned long i;
3078 
3079 	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
3080 	memset(&ent, 0, sizeof(ent));
3081 	ent.header = HASH_HEADER_EXT;
3082 
3083 	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3084 		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3085 		if (err)
3086 			return err;
3087 	}
3088 	return 0;
3089 }
3090 
fflp_early_init(struct niu * np)3091 static int fflp_early_init(struct niu *np)
3092 {
3093 	struct niu_parent *parent;
3094 	unsigned long flags;
3095 	int err;
3096 
3097 	niu_lock_parent(np, flags);
3098 
3099 	parent = np->parent;
3100 	err = 0;
3101 	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3102 		niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
3103 		       np->port);
3104 		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3105 			fflp_reset(np);
3106 			fflp_set_timings(np);
3107 			err = fflp_disable_all_partitions(np);
3108 			if (err) {
3109 				niudbg(PROBE, "fflp_disable_all_partitions "
3110 				       "failed, err=%d\n", err);
3111 				goto out;
3112 			}
3113 		}
3114 
3115 		err = tcam_early_init(np);
3116 		if (err) {
3117 			niudbg(PROBE, "tcam_early_init failed, err=%d\n",
3118 			       err);
3119 			goto out;
3120 		}
3121 		fflp_llcsnap_enable(np, 1);
3122 		fflp_errors_enable(np, 0);
3123 		nw64(H1POLY, 0);
3124 		nw64(H2POLY, 0);
3125 
3126 		err = tcam_flush_all(np);
3127 		if (err) {
3128 			niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
3129 			       err);
3130 			goto out;
3131 		}
3132 		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3133 			err = fflp_hash_clear(np);
3134 			if (err) {
3135 				niudbg(PROBE, "fflp_hash_clear failed, "
3136 				       "err=%d\n", err);
3137 				goto out;
3138 			}
3139 		}
3140 
3141 		vlan_tbl_clear(np);
3142 
3143 		niudbg(PROBE, "fflp_early_init: Success\n");
3144 		parent->flags |= PARENT_FLGS_CLS_HWINIT;
3145 	}
3146 out:
3147 	niu_unlock_parent(np, flags);
3148 	return err;
3149 }
3150 
niu_set_flow_key(struct niu * np,unsigned long class_code,u64 key)3151 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3152 {
3153 	if (class_code < CLASS_CODE_USER_PROG1 ||
3154 	    class_code > CLASS_CODE_SCTP_IPV6)
3155 		return -EINVAL;
3156 
3157 	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3158 	return 0;
3159 }
3160 
niu_set_tcam_key(struct niu * np,unsigned long class_code,u64 key)3161 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3162 {
3163 	if (class_code < CLASS_CODE_USER_PROG1 ||
3164 	    class_code > CLASS_CODE_SCTP_IPV6)
3165 		return -EINVAL;
3166 
3167 	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3168 	return 0;
3169 }
3170 
niu_rx_skb_append(struct sk_buff * skb,struct page * page,u32 offset,u32 size)3171 static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3172 			      u32 offset, u32 size)
3173 {
3174 	int i = skb_shinfo(skb)->nr_frags;
3175 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3176 
3177 	frag->page = page;
3178 	frag->page_offset = offset;
3179 	frag->size = size;
3180 
3181 	skb->len += size;
3182 	skb->data_len += size;
3183 	skb->truesize += size;
3184 
3185 	skb_shinfo(skb)->nr_frags = i + 1;
3186 }
3187 
niu_hash_rxaddr(struct rx_ring_info * rp,u64 a)3188 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3189 {
3190 	a >>= PAGE_SHIFT;
3191 	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3192 
3193 	return (a & (MAX_RBR_RING_SIZE - 1));
3194 }
3195 
niu_find_rxpage(struct rx_ring_info * rp,u64 addr,struct page *** link)3196 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3197 				    struct page ***link)
3198 {
3199 	unsigned int h = niu_hash_rxaddr(rp, addr);
3200 	struct page *p, **pp;
3201 
3202 	addr &= PAGE_MASK;
3203 	pp = &rp->rxhash[h];
3204 	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3205 		if (p->index == addr) {
3206 			*link = pp;
3207 			break;
3208 		}
3209 	}
3210 
3211 	return p;
3212 }
3213 
niu_hash_page(struct rx_ring_info * rp,struct page * page,u64 base)3214 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3215 {
3216 	unsigned int h = niu_hash_rxaddr(rp, base);
3217 
3218 	page->index = base;
3219 	page->mapping = (struct address_space *) rp->rxhash[h];
3220 	rp->rxhash[h] = page;
3221 }
3222 
niu_rbr_add_page(struct niu * np,struct rx_ring_info * rp,gfp_t mask,int start_index)3223 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3224 			    gfp_t mask, int start_index)
3225 {
3226 	struct page *page;
3227 	u64 addr;
3228 	int i;
3229 
3230 	page = alloc_page(mask);
3231 	if (!page)
3232 		return -ENOMEM;
3233 
3234 	addr = np->ops->map_page(np->device, page, 0,
3235 				 PAGE_SIZE, DMA_FROM_DEVICE);
3236 
3237 	niu_hash_page(rp, page, addr);
3238 	if (rp->rbr_blocks_per_page > 1)
3239 		atomic_add(rp->rbr_blocks_per_page - 1,
3240 			   &compound_head(page)->_count);
3241 
3242 	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3243 		__le32 *rbr = &rp->rbr[start_index + i];
3244 
3245 		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3246 		addr += rp->rbr_block_size;
3247 	}
3248 
3249 	return 0;
3250 }
3251 
niu_rbr_refill(struct niu * np,struct rx_ring_info * rp,gfp_t mask)3252 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3253 {
3254 	int index = rp->rbr_index;
3255 
3256 	rp->rbr_pending++;
3257 	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3258 		int err = niu_rbr_add_page(np, rp, mask, index);
3259 
3260 		if (unlikely(err)) {
3261 			rp->rbr_pending--;
3262 			return;
3263 		}
3264 
3265 		rp->rbr_index += rp->rbr_blocks_per_page;
3266 		BUG_ON(rp->rbr_index > rp->rbr_table_size);
3267 		if (rp->rbr_index == rp->rbr_table_size)
3268 			rp->rbr_index = 0;
3269 
3270 		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3271 			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3272 			rp->rbr_pending = 0;
3273 		}
3274 	}
3275 }
3276 
niu_rx_pkt_ignore(struct niu * np,struct rx_ring_info * rp)3277 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3278 {
3279 	unsigned int index = rp->rcr_index;
3280 	int num_rcr = 0;
3281 
3282 	rp->rx_dropped++;
3283 	while (1) {
3284 		struct page *page, **link;
3285 		u64 addr, val;
3286 		u32 rcr_size;
3287 
3288 		num_rcr++;
3289 
3290 		val = le64_to_cpup(&rp->rcr[index]);
3291 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3292 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3293 		page = niu_find_rxpage(rp, addr, &link);
3294 
3295 		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3296 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3297 		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3298 			*link = (struct page *) page->mapping;
3299 			np->ops->unmap_page(np->device, page->index,
3300 					    PAGE_SIZE, DMA_FROM_DEVICE);
3301 			page->index = 0;
3302 			page->mapping = NULL;
3303 			__free_page(page);
3304 			rp->rbr_refill_pending++;
3305 		}
3306 
3307 		index = NEXT_RCR(rp, index);
3308 		if (!(val & RCR_ENTRY_MULTI))
3309 			break;
3310 
3311 	}
3312 	rp->rcr_index = index;
3313 
3314 	return num_rcr;
3315 }
3316 
niu_process_rx_pkt(struct niu * np,struct rx_ring_info * rp)3317 static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3318 {
3319 	unsigned int index = rp->rcr_index;
3320 	struct sk_buff *skb;
3321 	int len, num_rcr;
3322 
3323 	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3324 	if (unlikely(!skb))
3325 		return niu_rx_pkt_ignore(np, rp);
3326 
3327 	num_rcr = 0;
3328 	while (1) {
3329 		struct page *page, **link;
3330 		u32 rcr_size, append_size;
3331 		u64 addr, val, off;
3332 
3333 		num_rcr++;
3334 
3335 		val = le64_to_cpup(&rp->rcr[index]);
3336 
3337 		len = (val & RCR_ENTRY_L2_LEN) >>
3338 			RCR_ENTRY_L2_LEN_SHIFT;
3339 		len -= ETH_FCS_LEN;
3340 
3341 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3342 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3343 		page = niu_find_rxpage(rp, addr, &link);
3344 
3345 		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3346 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3347 
3348 		off = addr & ~PAGE_MASK;
3349 		append_size = rcr_size;
3350 		if (num_rcr == 1) {
3351 			int ptype;
3352 
3353 			off += 2;
3354 			append_size -= 2;
3355 
3356 			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3357 			if ((ptype == RCR_PKT_TYPE_TCP ||
3358 			     ptype == RCR_PKT_TYPE_UDP) &&
3359 			    !(val & (RCR_ENTRY_NOPORT |
3360 				     RCR_ENTRY_ERROR)))
3361 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3362 			else
3363 				skb->ip_summed = CHECKSUM_NONE;
3364 		}
3365 		if (!(val & RCR_ENTRY_MULTI))
3366 			append_size = len - skb->len;
3367 
3368 		niu_rx_skb_append(skb, page, off, append_size);
3369 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3370 			*link = (struct page *) page->mapping;
3371 			np->ops->unmap_page(np->device, page->index,
3372 					    PAGE_SIZE, DMA_FROM_DEVICE);
3373 			page->index = 0;
3374 			page->mapping = NULL;
3375 			rp->rbr_refill_pending++;
3376 		} else
3377 			get_page(page);
3378 
3379 		index = NEXT_RCR(rp, index);
3380 		if (!(val & RCR_ENTRY_MULTI))
3381 			break;
3382 
3383 	}
3384 	rp->rcr_index = index;
3385 
3386 	skb_reserve(skb, NET_IP_ALIGN);
3387 	__pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
3388 
3389 	rp->rx_packets++;
3390 	rp->rx_bytes += skb->len;
3391 
3392 	skb->protocol = eth_type_trans(skb, np->dev);
3393 	netif_receive_skb(skb);
3394 
3395 	return num_rcr;
3396 }
3397 
niu_rbr_fill(struct niu * np,struct rx_ring_info * rp,gfp_t mask)3398 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3399 {
3400 	int blocks_per_page = rp->rbr_blocks_per_page;
3401 	int err, index = rp->rbr_index;
3402 
3403 	err = 0;
3404 	while (index < (rp->rbr_table_size - blocks_per_page)) {
3405 		err = niu_rbr_add_page(np, rp, mask, index);
3406 		if (err)
3407 			break;
3408 
3409 		index += blocks_per_page;
3410 	}
3411 
3412 	rp->rbr_index = index;
3413 	return err;
3414 }
3415 
niu_rbr_free(struct niu * np,struct rx_ring_info * rp)3416 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3417 {
3418 	int i;
3419 
3420 	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3421 		struct page *page;
3422 
3423 		page = rp->rxhash[i];
3424 		while (page) {
3425 			struct page *next = (struct page *) page->mapping;
3426 			u64 base = page->index;
3427 
3428 			np->ops->unmap_page(np->device, base, PAGE_SIZE,
3429 					    DMA_FROM_DEVICE);
3430 			page->index = 0;
3431 			page->mapping = NULL;
3432 
3433 			__free_page(page);
3434 
3435 			page = next;
3436 		}
3437 	}
3438 
3439 	for (i = 0; i < rp->rbr_table_size; i++)
3440 		rp->rbr[i] = cpu_to_le32(0);
3441 	rp->rbr_index = 0;
3442 }
3443 
release_tx_packet(struct niu * np,struct tx_ring_info * rp,int idx)3444 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3445 {
3446 	struct tx_buff_info *tb = &rp->tx_buffs[idx];
3447 	struct sk_buff *skb = tb->skb;
3448 	struct tx_pkt_hdr *tp;
3449 	u64 tx_flags;
3450 	int i, len;
3451 
3452 	tp = (struct tx_pkt_hdr *) skb->data;
3453 	tx_flags = le64_to_cpup(&tp->flags);
3454 
3455 	rp->tx_packets++;
3456 	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3457 			 ((tx_flags & TXHDR_PAD) / 2));
3458 
3459 	len = skb_headlen(skb);
3460 	np->ops->unmap_single(np->device, tb->mapping,
3461 			      len, DMA_TO_DEVICE);
3462 
3463 	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3464 		rp->mark_pending--;
3465 
3466 	tb->skb = NULL;
3467 	do {
3468 		idx = NEXT_TX(rp, idx);
3469 		len -= MAX_TX_DESC_LEN;
3470 	} while (len > 0);
3471 
3472 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3473 		tb = &rp->tx_buffs[idx];
3474 		BUG_ON(tb->skb != NULL);
3475 		np->ops->unmap_page(np->device, tb->mapping,
3476 				    skb_shinfo(skb)->frags[i].size,
3477 				    DMA_TO_DEVICE);
3478 		idx = NEXT_TX(rp, idx);
3479 	}
3480 
3481 	dev_kfree_skb(skb);
3482 
3483 	return idx;
3484 }
3485 
3486 #define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
3487 
niu_tx_work(struct niu * np,struct tx_ring_info * rp)3488 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3489 {
3490 	struct netdev_queue *txq;
3491 	u16 pkt_cnt, tmp;
3492 	int cons, index;
3493 	u64 cs;
3494 
3495 	index = (rp - np->tx_rings);
3496 	txq = netdev_get_tx_queue(np->dev, index);
3497 
3498 	cs = rp->tx_cs;
3499 	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3500 		goto out;
3501 
3502 	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3503 	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3504 		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3505 
3506 	rp->last_pkt_cnt = tmp;
3507 
3508 	cons = rp->cons;
3509 
3510 	niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3511 	       np->dev->name, pkt_cnt, cons);
3512 
3513 	while (pkt_cnt--)
3514 		cons = release_tx_packet(np, rp, cons);
3515 
3516 	rp->cons = cons;
3517 	smp_mb();
3518 
3519 out:
3520 	if (unlikely(netif_tx_queue_stopped(txq) &&
3521 		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3522 		__netif_tx_lock(txq, smp_processor_id());
3523 		if (netif_tx_queue_stopped(txq) &&
3524 		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3525 			netif_tx_wake_queue(txq);
3526 		__netif_tx_unlock(txq);
3527 	}
3528 }
3529 
niu_sync_rx_discard_stats(struct niu * np,struct rx_ring_info * rp,const int limit)3530 static inline void niu_sync_rx_discard_stats(struct niu *np,
3531 					     struct rx_ring_info *rp,
3532 					     const int limit)
3533 {
3534 	/* This elaborate scheme is needed for reading the RX discard
3535 	 * counters, as they are only 16-bit and can overflow quickly,
3536 	 * and because the overflow indication bit is not usable as
3537 	 * the counter value does not wrap, but remains at max value
3538 	 * 0xFFFF.
3539 	 *
3540 	 * In theory and in practice counters can be lost in between
3541 	 * reading nr64() and clearing the counter nw64().  For this
3542 	 * reason, the number of counter clearings nw64() is
3543 	 * limited/reduced though the limit parameter.
3544 	 */
3545 	int rx_channel = rp->rx_channel;
3546 	u32 misc, wred;
3547 
3548 	/* RXMISC (Receive Miscellaneous Discard Count), covers the
3549 	 * following discard events: IPP (Input Port Process),
3550 	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3551 	 * Block Ring) prefetch buffer is empty.
3552 	 */
3553 	misc = nr64(RXMISC(rx_channel));
3554 	if (unlikely((misc & RXMISC_COUNT) > limit)) {
3555 		nw64(RXMISC(rx_channel), 0);
3556 		rp->rx_errors += misc & RXMISC_COUNT;
3557 
3558 		if (unlikely(misc & RXMISC_OFLOW))
3559 			dev_err(np->device, "rx-%d: Counter overflow "
3560 				"RXMISC discard\n", rx_channel);
3561 
3562 		niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
3563 		       np->dev->name, rx_channel, misc, misc-limit);
3564 	}
3565 
3566 	/* WRED (Weighted Random Early Discard) by hardware */
3567 	wred = nr64(RED_DIS_CNT(rx_channel));
3568 	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3569 		nw64(RED_DIS_CNT(rx_channel), 0);
3570 		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3571 
3572 		if (unlikely(wred & RED_DIS_CNT_OFLOW))
3573 			dev_err(np->device, "rx-%d: Counter overflow "
3574 				"WRED discard\n", rx_channel);
3575 
3576 		niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
3577 		       np->dev->name, rx_channel, wred, wred-limit);
3578 	}
3579 }
3580 
niu_rx_work(struct niu * np,struct rx_ring_info * rp,int budget)3581 static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3582 {
3583 	int qlen, rcr_done = 0, work_done = 0;
3584 	struct rxdma_mailbox *mbox = rp->mbox;
3585 	u64 stat;
3586 
3587 #if 1
3588 	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3589 	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3590 #else
3591 	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3592 	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3593 #endif
3594 	mbox->rx_dma_ctl_stat = 0;
3595 	mbox->rcrstat_a = 0;
3596 
3597 	niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3598 	       np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
3599 
3600 	rcr_done = work_done = 0;
3601 	qlen = min(qlen, budget);
3602 	while (work_done < qlen) {
3603 		rcr_done += niu_process_rx_pkt(np, rp);
3604 		work_done++;
3605 	}
3606 
3607 	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3608 		unsigned int i;
3609 
3610 		for (i = 0; i < rp->rbr_refill_pending; i++)
3611 			niu_rbr_refill(np, rp, GFP_ATOMIC);
3612 		rp->rbr_refill_pending = 0;
3613 	}
3614 
3615 	stat = (RX_DMA_CTL_STAT_MEX |
3616 		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3617 		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3618 
3619 	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3620 
3621 	/* Only sync discards stats when qlen indicate potential for drops */
3622 	if (qlen > 10)
3623 		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3624 
3625 	return work_done;
3626 }
3627 
niu_poll_core(struct niu * np,struct niu_ldg * lp,int budget)3628 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3629 {
3630 	u64 v0 = lp->v0;
3631 	u32 tx_vec = (v0 >> 32);
3632 	u32 rx_vec = (v0 & 0xffffffff);
3633 	int i, work_done = 0;
3634 
3635 	niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
3636 	       np->dev->name, (unsigned long long) v0);
3637 
3638 	for (i = 0; i < np->num_tx_rings; i++) {
3639 		struct tx_ring_info *rp = &np->tx_rings[i];
3640 		if (tx_vec & (1 << rp->tx_channel))
3641 			niu_tx_work(np, rp);
3642 		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3643 	}
3644 
3645 	for (i = 0; i < np->num_rx_rings; i++) {
3646 		struct rx_ring_info *rp = &np->rx_rings[i];
3647 
3648 		if (rx_vec & (1 << rp->rx_channel)) {
3649 			int this_work_done;
3650 
3651 			this_work_done = niu_rx_work(np, rp,
3652 						     budget);
3653 
3654 			budget -= this_work_done;
3655 			work_done += this_work_done;
3656 		}
3657 		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3658 	}
3659 
3660 	return work_done;
3661 }
3662 
niu_poll(struct napi_struct * napi,int budget)3663 static int niu_poll(struct napi_struct *napi, int budget)
3664 {
3665 	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3666 	struct niu *np = lp->np;
3667 	int work_done;
3668 
3669 	work_done = niu_poll_core(np, lp, budget);
3670 
3671 	if (work_done < budget) {
3672 		netif_rx_complete(napi);
3673 		niu_ldg_rearm(np, lp, 1);
3674 	}
3675 	return work_done;
3676 }
3677 
niu_log_rxchan_errors(struct niu * np,struct rx_ring_info * rp,u64 stat)3678 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3679 				  u64 stat)
3680 {
3681 	dev_err(np->device, PFX "%s: RX channel %u errors ( ",
3682 		np->dev->name, rp->rx_channel);
3683 
3684 	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3685 		printk("RBR_TMOUT ");
3686 	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3687 		printk("RSP_CNT ");
3688 	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3689 		printk("BYTE_EN_BUS ");
3690 	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3691 		printk("RSP_DAT ");
3692 	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3693 		printk("RCR_ACK ");
3694 	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3695 		printk("RCR_SHA_PAR ");
3696 	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3697 		printk("RBR_PRE_PAR ");
3698 	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3699 		printk("CONFIG ");
3700 	if (stat & RX_DMA_CTL_STAT_RCRINCON)
3701 		printk("RCRINCON ");
3702 	if (stat & RX_DMA_CTL_STAT_RCRFULL)
3703 		printk("RCRFULL ");
3704 	if (stat & RX_DMA_CTL_STAT_RBRFULL)
3705 		printk("RBRFULL ");
3706 	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3707 		printk("RBRLOGPAGE ");
3708 	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3709 		printk("CFIGLOGPAGE ");
3710 	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3711 		printk("DC_FIDO ");
3712 
3713 	printk(")\n");
3714 }
3715 
niu_rx_error(struct niu * np,struct rx_ring_info * rp)3716 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3717 {
3718 	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3719 	int err = 0;
3720 
3721 
3722 	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3723 		    RX_DMA_CTL_STAT_PORT_FATAL))
3724 		err = -EINVAL;
3725 
3726 	if (err) {
3727 		dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
3728 			np->dev->name, rp->rx_channel,
3729 			(unsigned long long) stat);
3730 
3731 		niu_log_rxchan_errors(np, rp, stat);
3732 	}
3733 
3734 	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3735 	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3736 
3737 	return err;
3738 }
3739 
niu_log_txchan_errors(struct niu * np,struct tx_ring_info * rp,u64 cs)3740 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3741 				  u64 cs)
3742 {
3743 	dev_err(np->device, PFX "%s: TX channel %u errors ( ",
3744 		np->dev->name, rp->tx_channel);
3745 
3746 	if (cs & TX_CS_MBOX_ERR)
3747 		printk("MBOX ");
3748 	if (cs & TX_CS_PKT_SIZE_ERR)
3749 		printk("PKT_SIZE ");
3750 	if (cs & TX_CS_TX_RING_OFLOW)
3751 		printk("TX_RING_OFLOW ");
3752 	if (cs & TX_CS_PREF_BUF_PAR_ERR)
3753 		printk("PREF_BUF_PAR ");
3754 	if (cs & TX_CS_NACK_PREF)
3755 		printk("NACK_PREF ");
3756 	if (cs & TX_CS_NACK_PKT_RD)
3757 		printk("NACK_PKT_RD ");
3758 	if (cs & TX_CS_CONF_PART_ERR)
3759 		printk("CONF_PART ");
3760 	if (cs & TX_CS_PKT_PRT_ERR)
3761 		printk("PKT_PTR ");
3762 
3763 	printk(")\n");
3764 }
3765 
niu_tx_error(struct niu * np,struct tx_ring_info * rp)3766 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3767 {
3768 	u64 cs, logh, logl;
3769 
3770 	cs = nr64(TX_CS(rp->tx_channel));
3771 	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3772 	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3773 
3774 	dev_err(np->device, PFX "%s: TX channel %u error, "
3775 		"cs[%llx] logh[%llx] logl[%llx]\n",
3776 		np->dev->name, rp->tx_channel,
3777 		(unsigned long long) cs,
3778 		(unsigned long long) logh,
3779 		(unsigned long long) logl);
3780 
3781 	niu_log_txchan_errors(np, rp, cs);
3782 
3783 	return -ENODEV;
3784 }
3785 
niu_mif_interrupt(struct niu * np)3786 static int niu_mif_interrupt(struct niu *np)
3787 {
3788 	u64 mif_status = nr64(MIF_STATUS);
3789 	int phy_mdint = 0;
3790 
3791 	if (np->flags & NIU_FLAGS_XMAC) {
3792 		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3793 
3794 		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3795 			phy_mdint = 1;
3796 	}
3797 
3798 	dev_err(np->device, PFX "%s: MIF interrupt, "
3799 		"stat[%llx] phy_mdint(%d)\n",
3800 		np->dev->name, (unsigned long long) mif_status, phy_mdint);
3801 
3802 	return -ENODEV;
3803 }
3804 
niu_xmac_interrupt(struct niu * np)3805 static void niu_xmac_interrupt(struct niu *np)
3806 {
3807 	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3808 	u64 val;
3809 
3810 	val = nr64_mac(XTXMAC_STATUS);
3811 	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3812 		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3813 	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3814 		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3815 	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3816 		mp->tx_fifo_errors++;
3817 	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3818 		mp->tx_overflow_errors++;
3819 	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3820 		mp->tx_max_pkt_size_errors++;
3821 	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3822 		mp->tx_underflow_errors++;
3823 
3824 	val = nr64_mac(XRXMAC_STATUS);
3825 	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3826 		mp->rx_local_faults++;
3827 	if (val & XRXMAC_STATUS_RFLT_DET)
3828 		mp->rx_remote_faults++;
3829 	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3830 		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3831 	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3832 		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3833 	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3834 		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3835 	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3836 		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3837 	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3838 		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3839 	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3840 		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3841 	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3842 		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3843 	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3844 		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3845 	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3846 		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3847 	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3848 		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3849 	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3850 		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3851 	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3852 		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3853 	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3854 		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3855 	if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP)
3856 		mp->rx_octets += RXMAC_BT_CNT_COUNT;
3857 	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3858 		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3859 	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3860 		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3861 	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3862 		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3863 	if (val & XRXMAC_STATUS_RXUFLOW)
3864 		mp->rx_underflows++;
3865 	if (val & XRXMAC_STATUS_RXOFLOW)
3866 		mp->rx_overflows++;
3867 
3868 	val = nr64_mac(XMAC_FC_STAT);
3869 	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3870 		mp->pause_off_state++;
3871 	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3872 		mp->pause_on_state++;
3873 	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3874 		mp->pause_received++;
3875 }
3876 
niu_bmac_interrupt(struct niu * np)3877 static void niu_bmac_interrupt(struct niu *np)
3878 {
3879 	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3880 	u64 val;
3881 
3882 	val = nr64_mac(BTXMAC_STATUS);
3883 	if (val & BTXMAC_STATUS_UNDERRUN)
3884 		mp->tx_underflow_errors++;
3885 	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3886 		mp->tx_max_pkt_size_errors++;
3887 	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3888 		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3889 	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3890 		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3891 
3892 	val = nr64_mac(BRXMAC_STATUS);
3893 	if (val & BRXMAC_STATUS_OVERFLOW)
3894 		mp->rx_overflows++;
3895 	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3896 		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3897 	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3898 		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3899 	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3900 		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3901 	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3902 		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3903 
3904 	val = nr64_mac(BMAC_CTRL_STATUS);
3905 	if (val & BMAC_CTRL_STATUS_NOPAUSE)
3906 		mp->pause_off_state++;
3907 	if (val & BMAC_CTRL_STATUS_PAUSE)
3908 		mp->pause_on_state++;
3909 	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
3910 		mp->pause_received++;
3911 }
3912 
niu_mac_interrupt(struct niu * np)3913 static int niu_mac_interrupt(struct niu *np)
3914 {
3915 	if (np->flags & NIU_FLAGS_XMAC)
3916 		niu_xmac_interrupt(np);
3917 	else
3918 		niu_bmac_interrupt(np);
3919 
3920 	return 0;
3921 }
3922 
niu_log_device_error(struct niu * np,u64 stat)3923 static void niu_log_device_error(struct niu *np, u64 stat)
3924 {
3925 	dev_err(np->device, PFX "%s: Core device errors ( ",
3926 		np->dev->name);
3927 
3928 	if (stat & SYS_ERR_MASK_META2)
3929 		printk("META2 ");
3930 	if (stat & SYS_ERR_MASK_META1)
3931 		printk("META1 ");
3932 	if (stat & SYS_ERR_MASK_PEU)
3933 		printk("PEU ");
3934 	if (stat & SYS_ERR_MASK_TXC)
3935 		printk("TXC ");
3936 	if (stat & SYS_ERR_MASK_RDMC)
3937 		printk("RDMC ");
3938 	if (stat & SYS_ERR_MASK_TDMC)
3939 		printk("TDMC ");
3940 	if (stat & SYS_ERR_MASK_ZCP)
3941 		printk("ZCP ");
3942 	if (stat & SYS_ERR_MASK_FFLP)
3943 		printk("FFLP ");
3944 	if (stat & SYS_ERR_MASK_IPP)
3945 		printk("IPP ");
3946 	if (stat & SYS_ERR_MASK_MAC)
3947 		printk("MAC ");
3948 	if (stat & SYS_ERR_MASK_SMX)
3949 		printk("SMX ");
3950 
3951 	printk(")\n");
3952 }
3953 
niu_device_error(struct niu * np)3954 static int niu_device_error(struct niu *np)
3955 {
3956 	u64 stat = nr64(SYS_ERR_STAT);
3957 
3958 	dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
3959 		np->dev->name, (unsigned long long) stat);
3960 
3961 	niu_log_device_error(np, stat);
3962 
3963 	return -ENODEV;
3964 }
3965 
niu_slowpath_interrupt(struct niu * np,struct niu_ldg * lp,u64 v0,u64 v1,u64 v2)3966 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
3967 			      u64 v0, u64 v1, u64 v2)
3968 {
3969 
3970 	int i, err = 0;
3971 
3972 	lp->v0 = v0;
3973 	lp->v1 = v1;
3974 	lp->v2 = v2;
3975 
3976 	if (v1 & 0x00000000ffffffffULL) {
3977 		u32 rx_vec = (v1 & 0xffffffff);
3978 
3979 		for (i = 0; i < np->num_rx_rings; i++) {
3980 			struct rx_ring_info *rp = &np->rx_rings[i];
3981 
3982 			if (rx_vec & (1 << rp->rx_channel)) {
3983 				int r = niu_rx_error(np, rp);
3984 				if (r) {
3985 					err = r;
3986 				} else {
3987 					if (!v0)
3988 						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3989 						     RX_DMA_CTL_STAT_MEX);
3990 				}
3991 			}
3992 		}
3993 	}
3994 	if (v1 & 0x7fffffff00000000ULL) {
3995 		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
3996 
3997 		for (i = 0; i < np->num_tx_rings; i++) {
3998 			struct tx_ring_info *rp = &np->tx_rings[i];
3999 
4000 			if (tx_vec & (1 << rp->tx_channel)) {
4001 				int r = niu_tx_error(np, rp);
4002 				if (r)
4003 					err = r;
4004 			}
4005 		}
4006 	}
4007 	if ((v0 | v1) & 0x8000000000000000ULL) {
4008 		int r = niu_mif_interrupt(np);
4009 		if (r)
4010 			err = r;
4011 	}
4012 	if (v2) {
4013 		if (v2 & 0x01ef) {
4014 			int r = niu_mac_interrupt(np);
4015 			if (r)
4016 				err = r;
4017 		}
4018 		if (v2 & 0x0210) {
4019 			int r = niu_device_error(np);
4020 			if (r)
4021 				err = r;
4022 		}
4023 	}
4024 
4025 	if (err)
4026 		niu_enable_interrupts(np, 0);
4027 
4028 	return err;
4029 }
4030 
niu_rxchan_intr(struct niu * np,struct rx_ring_info * rp,int ldn)4031 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4032 			    int ldn)
4033 {
4034 	struct rxdma_mailbox *mbox = rp->mbox;
4035 	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
4036 
4037 	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
4038 		      RX_DMA_CTL_STAT_RCRTO);
4039 	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4040 
4041 	niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
4042 	       np->dev->name, (unsigned long long) stat);
4043 }
4044 
niu_txchan_intr(struct niu * np,struct tx_ring_info * rp,int ldn)4045 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4046 			    int ldn)
4047 {
4048 	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4049 
4050 	niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
4051 	       np->dev->name, (unsigned long long) rp->tx_cs);
4052 }
4053 
__niu_fastpath_interrupt(struct niu * np,int ldg,u64 v0)4054 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4055 {
4056 	struct niu_parent *parent = np->parent;
4057 	u32 rx_vec, tx_vec;
4058 	int i;
4059 
4060 	tx_vec = (v0 >> 32);
4061 	rx_vec = (v0 & 0xffffffff);
4062 
4063 	for (i = 0; i < np->num_rx_rings; i++) {
4064 		struct rx_ring_info *rp = &np->rx_rings[i];
4065 		int ldn = LDN_RXDMA(rp->rx_channel);
4066 
4067 		if (parent->ldg_map[ldn] != ldg)
4068 			continue;
4069 
4070 		nw64(LD_IM0(ldn), LD_IM0_MASK);
4071 		if (rx_vec & (1 << rp->rx_channel))
4072 			niu_rxchan_intr(np, rp, ldn);
4073 	}
4074 
4075 	for (i = 0; i < np->num_tx_rings; i++) {
4076 		struct tx_ring_info *rp = &np->tx_rings[i];
4077 		int ldn = LDN_TXDMA(rp->tx_channel);
4078 
4079 		if (parent->ldg_map[ldn] != ldg)
4080 			continue;
4081 
4082 		nw64(LD_IM0(ldn), LD_IM0_MASK);
4083 		if (tx_vec & (1 << rp->tx_channel))
4084 			niu_txchan_intr(np, rp, ldn);
4085 	}
4086 }
4087 
niu_schedule_napi(struct niu * np,struct niu_ldg * lp,u64 v0,u64 v1,u64 v2)4088 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4089 			      u64 v0, u64 v1, u64 v2)
4090 {
4091 	if (likely(netif_rx_schedule_prep(&lp->napi))) {
4092 		lp->v0 = v0;
4093 		lp->v1 = v1;
4094 		lp->v2 = v2;
4095 		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
4096 		__netif_rx_schedule(&lp->napi);
4097 	}
4098 }
4099 
niu_interrupt(int irq,void * dev_id)4100 static irqreturn_t niu_interrupt(int irq, void *dev_id)
4101 {
4102 	struct niu_ldg *lp = dev_id;
4103 	struct niu *np = lp->np;
4104 	int ldg = lp->ldg_num;
4105 	unsigned long flags;
4106 	u64 v0, v1, v2;
4107 
4108 	if (netif_msg_intr(np))
4109 		printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
4110 		       lp, ldg);
4111 
4112 	spin_lock_irqsave(&np->lock, flags);
4113 
4114 	v0 = nr64(LDSV0(ldg));
4115 	v1 = nr64(LDSV1(ldg));
4116 	v2 = nr64(LDSV2(ldg));
4117 
4118 	if (netif_msg_intr(np))
4119 		printk("v0[%llx] v1[%llx] v2[%llx]\n",
4120 		       (unsigned long long) v0,
4121 		       (unsigned long long) v1,
4122 		       (unsigned long long) v2);
4123 
4124 	if (unlikely(!v0 && !v1 && !v2)) {
4125 		spin_unlock_irqrestore(&np->lock, flags);
4126 		return IRQ_NONE;
4127 	}
4128 
4129 	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4130 		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4131 		if (err)
4132 			goto out;
4133 	}
4134 	if (likely(v0 & ~((u64)1 << LDN_MIF)))
4135 		niu_schedule_napi(np, lp, v0, v1, v2);
4136 	else
4137 		niu_ldg_rearm(np, lp, 1);
4138 out:
4139 	spin_unlock_irqrestore(&np->lock, flags);
4140 
4141 	return IRQ_HANDLED;
4142 }
4143 
niu_free_rx_ring_info(struct niu * np,struct rx_ring_info * rp)4144 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4145 {
4146 	if (rp->mbox) {
4147 		np->ops->free_coherent(np->device,
4148 				       sizeof(struct rxdma_mailbox),
4149 				       rp->mbox, rp->mbox_dma);
4150 		rp->mbox = NULL;
4151 	}
4152 	if (rp->rcr) {
4153 		np->ops->free_coherent(np->device,
4154 				       MAX_RCR_RING_SIZE * sizeof(__le64),
4155 				       rp->rcr, rp->rcr_dma);
4156 		rp->rcr = NULL;
4157 		rp->rcr_table_size = 0;
4158 		rp->rcr_index = 0;
4159 	}
4160 	if (rp->rbr) {
4161 		niu_rbr_free(np, rp);
4162 
4163 		np->ops->free_coherent(np->device,
4164 				       MAX_RBR_RING_SIZE * sizeof(__le32),
4165 				       rp->rbr, rp->rbr_dma);
4166 		rp->rbr = NULL;
4167 		rp->rbr_table_size = 0;
4168 		rp->rbr_index = 0;
4169 	}
4170 	kfree(rp->rxhash);
4171 	rp->rxhash = NULL;
4172 }
4173 
niu_free_tx_ring_info(struct niu * np,struct tx_ring_info * rp)4174 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4175 {
4176 	if (rp->mbox) {
4177 		np->ops->free_coherent(np->device,
4178 				       sizeof(struct txdma_mailbox),
4179 				       rp->mbox, rp->mbox_dma);
4180 		rp->mbox = NULL;
4181 	}
4182 	if (rp->descr) {
4183 		int i;
4184 
4185 		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4186 			if (rp->tx_buffs[i].skb)
4187 				(void) release_tx_packet(np, rp, i);
4188 		}
4189 
4190 		np->ops->free_coherent(np->device,
4191 				       MAX_TX_RING_SIZE * sizeof(__le64),
4192 				       rp->descr, rp->descr_dma);
4193 		rp->descr = NULL;
4194 		rp->pending = 0;
4195 		rp->prod = 0;
4196 		rp->cons = 0;
4197 		rp->wrap_bit = 0;
4198 	}
4199 }
4200 
niu_free_channels(struct niu * np)4201 static void niu_free_channels(struct niu *np)
4202 {
4203 	int i;
4204 
4205 	if (np->rx_rings) {
4206 		for (i = 0; i < np->num_rx_rings; i++) {
4207 			struct rx_ring_info *rp = &np->rx_rings[i];
4208 
4209 			niu_free_rx_ring_info(np, rp);
4210 		}
4211 		kfree(np->rx_rings);
4212 		np->rx_rings = NULL;
4213 		np->num_rx_rings = 0;
4214 	}
4215 
4216 	if (np->tx_rings) {
4217 		for (i = 0; i < np->num_tx_rings; i++) {
4218 			struct tx_ring_info *rp = &np->tx_rings[i];
4219 
4220 			niu_free_tx_ring_info(np, rp);
4221 		}
4222 		kfree(np->tx_rings);
4223 		np->tx_rings = NULL;
4224 		np->num_tx_rings = 0;
4225 	}
4226 }
4227 
niu_alloc_rx_ring_info(struct niu * np,struct rx_ring_info * rp)4228 static int niu_alloc_rx_ring_info(struct niu *np,
4229 				  struct rx_ring_info *rp)
4230 {
4231 	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4232 
4233 	rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
4234 			     GFP_KERNEL);
4235 	if (!rp->rxhash)
4236 		return -ENOMEM;
4237 
4238 	rp->mbox = np->ops->alloc_coherent(np->device,
4239 					   sizeof(struct rxdma_mailbox),
4240 					   &rp->mbox_dma, GFP_KERNEL);
4241 	if (!rp->mbox)
4242 		return -ENOMEM;
4243 	if ((unsigned long)rp->mbox & (64UL - 1)) {
4244 		dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4245 			"RXDMA mailbox %p\n", np->dev->name, rp->mbox);
4246 		return -EINVAL;
4247 	}
4248 
4249 	rp->rcr = np->ops->alloc_coherent(np->device,
4250 					  MAX_RCR_RING_SIZE * sizeof(__le64),
4251 					  &rp->rcr_dma, GFP_KERNEL);
4252 	if (!rp->rcr)
4253 		return -ENOMEM;
4254 	if ((unsigned long)rp->rcr & (64UL - 1)) {
4255 		dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4256 			"RXDMA RCR table %p\n", np->dev->name, rp->rcr);
4257 		return -EINVAL;
4258 	}
4259 	rp->rcr_table_size = MAX_RCR_RING_SIZE;
4260 	rp->rcr_index = 0;
4261 
4262 	rp->rbr = np->ops->alloc_coherent(np->device,
4263 					  MAX_RBR_RING_SIZE * sizeof(__le32),
4264 					  &rp->rbr_dma, GFP_KERNEL);
4265 	if (!rp->rbr)
4266 		return -ENOMEM;
4267 	if ((unsigned long)rp->rbr & (64UL - 1)) {
4268 		dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4269 			"RXDMA RBR table %p\n", np->dev->name, rp->rbr);
4270 		return -EINVAL;
4271 	}
4272 	rp->rbr_table_size = MAX_RBR_RING_SIZE;
4273 	rp->rbr_index = 0;
4274 	rp->rbr_pending = 0;
4275 
4276 	return 0;
4277 }
4278 
niu_set_max_burst(struct niu * np,struct tx_ring_info * rp)4279 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4280 {
4281 	int mtu = np->dev->mtu;
4282 
4283 	/* These values are recommended by the HW designers for fair
4284 	 * utilization of DRR amongst the rings.
4285 	 */
4286 	rp->max_burst = mtu + 32;
4287 	if (rp->max_burst > 4096)
4288 		rp->max_burst = 4096;
4289 }
4290 
niu_alloc_tx_ring_info(struct niu * np,struct tx_ring_info * rp)4291 static int niu_alloc_tx_ring_info(struct niu *np,
4292 				  struct tx_ring_info *rp)
4293 {
4294 	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4295 
4296 	rp->mbox = np->ops->alloc_coherent(np->device,
4297 					   sizeof(struct txdma_mailbox),
4298 					   &rp->mbox_dma, GFP_KERNEL);
4299 	if (!rp->mbox)
4300 		return -ENOMEM;
4301 	if ((unsigned long)rp->mbox & (64UL - 1)) {
4302 		dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4303 			"TXDMA mailbox %p\n", np->dev->name, rp->mbox);
4304 		return -EINVAL;
4305 	}
4306 
4307 	rp->descr = np->ops->alloc_coherent(np->device,
4308 					    MAX_TX_RING_SIZE * sizeof(__le64),
4309 					    &rp->descr_dma, GFP_KERNEL);
4310 	if (!rp->descr)
4311 		return -ENOMEM;
4312 	if ((unsigned long)rp->descr & (64UL - 1)) {
4313 		dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4314 			"TXDMA descr table %p\n", np->dev->name, rp->descr);
4315 		return -EINVAL;
4316 	}
4317 
4318 	rp->pending = MAX_TX_RING_SIZE;
4319 	rp->prod = 0;
4320 	rp->cons = 0;
4321 	rp->wrap_bit = 0;
4322 
4323 	/* XXX make these configurable... XXX */
4324 	rp->mark_freq = rp->pending / 4;
4325 
4326 	niu_set_max_burst(np, rp);
4327 
4328 	return 0;
4329 }
4330 
niu_size_rbr(struct niu * np,struct rx_ring_info * rp)4331 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4332 {
4333 	u16 bss;
4334 
4335 	bss = min(PAGE_SHIFT, 15);
4336 
4337 	rp->rbr_block_size = 1 << bss;
4338 	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4339 
4340 	rp->rbr_sizes[0] = 256;
4341 	rp->rbr_sizes[1] = 1024;
4342 	if (np->dev->mtu > ETH_DATA_LEN) {
4343 		switch (PAGE_SIZE) {
4344 		case 4 * 1024:
4345 			rp->rbr_sizes[2] = 4096;
4346 			break;
4347 
4348 		default:
4349 			rp->rbr_sizes[2] = 8192;
4350 			break;
4351 		}
4352 	} else {
4353 		rp->rbr_sizes[2] = 2048;
4354 	}
4355 	rp->rbr_sizes[3] = rp->rbr_block_size;
4356 }
4357 
niu_alloc_channels(struct niu * np)4358 static int niu_alloc_channels(struct niu *np)
4359 {
4360 	struct niu_parent *parent = np->parent;
4361 	int first_rx_channel, first_tx_channel;
4362 	int i, port, err;
4363 
4364 	port = np->port;
4365 	first_rx_channel = first_tx_channel = 0;
4366 	for (i = 0; i < port; i++) {
4367 		first_rx_channel += parent->rxchan_per_port[i];
4368 		first_tx_channel += parent->txchan_per_port[i];
4369 	}
4370 
4371 	np->num_rx_rings = parent->rxchan_per_port[port];
4372 	np->num_tx_rings = parent->txchan_per_port[port];
4373 
4374 	np->dev->real_num_tx_queues = np->num_tx_rings;
4375 
4376 	np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4377 			       GFP_KERNEL);
4378 	err = -ENOMEM;
4379 	if (!np->rx_rings)
4380 		goto out_err;
4381 
4382 	for (i = 0; i < np->num_rx_rings; i++) {
4383 		struct rx_ring_info *rp = &np->rx_rings[i];
4384 
4385 		rp->np = np;
4386 		rp->rx_channel = first_rx_channel + i;
4387 
4388 		err = niu_alloc_rx_ring_info(np, rp);
4389 		if (err)
4390 			goto out_err;
4391 
4392 		niu_size_rbr(np, rp);
4393 
4394 		/* XXX better defaults, configurable, etc... XXX */
4395 		rp->nonsyn_window = 64;
4396 		rp->nonsyn_threshold = rp->rcr_table_size - 64;
4397 		rp->syn_window = 64;
4398 		rp->syn_threshold = rp->rcr_table_size - 64;
4399 		rp->rcr_pkt_threshold = 16;
4400 		rp->rcr_timeout = 8;
4401 		rp->rbr_kick_thresh = RBR_REFILL_MIN;
4402 		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4403 			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4404 
4405 		err = niu_rbr_fill(np, rp, GFP_KERNEL);
4406 		if (err)
4407 			return err;
4408 	}
4409 
4410 	np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4411 			       GFP_KERNEL);
4412 	err = -ENOMEM;
4413 	if (!np->tx_rings)
4414 		goto out_err;
4415 
4416 	for (i = 0; i < np->num_tx_rings; i++) {
4417 		struct tx_ring_info *rp = &np->tx_rings[i];
4418 
4419 		rp->np = np;
4420 		rp->tx_channel = first_tx_channel + i;
4421 
4422 		err = niu_alloc_tx_ring_info(np, rp);
4423 		if (err)
4424 			goto out_err;
4425 	}
4426 
4427 	return 0;
4428 
4429 out_err:
4430 	niu_free_channels(np);
4431 	return err;
4432 }
4433 
niu_tx_cs_sng_poll(struct niu * np,int channel)4434 static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4435 {
4436 	int limit = 1000;
4437 
4438 	while (--limit > 0) {
4439 		u64 val = nr64(TX_CS(channel));
4440 		if (val & TX_CS_SNG_STATE)
4441 			return 0;
4442 	}
4443 	return -ENODEV;
4444 }
4445 
niu_tx_channel_stop(struct niu * np,int channel)4446 static int niu_tx_channel_stop(struct niu *np, int channel)
4447 {
4448 	u64 val = nr64(TX_CS(channel));
4449 
4450 	val |= TX_CS_STOP_N_GO;
4451 	nw64(TX_CS(channel), val);
4452 
4453 	return niu_tx_cs_sng_poll(np, channel);
4454 }
4455 
niu_tx_cs_reset_poll(struct niu * np,int channel)4456 static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4457 {
4458 	int limit = 1000;
4459 
4460 	while (--limit > 0) {
4461 		u64 val = nr64(TX_CS(channel));
4462 		if (!(val & TX_CS_RST))
4463 			return 0;
4464 	}
4465 	return -ENODEV;
4466 }
4467 
niu_tx_channel_reset(struct niu * np,int channel)4468 static int niu_tx_channel_reset(struct niu *np, int channel)
4469 {
4470 	u64 val = nr64(TX_CS(channel));
4471 	int err;
4472 
4473 	val |= TX_CS_RST;
4474 	nw64(TX_CS(channel), val);
4475 
4476 	err = niu_tx_cs_reset_poll(np, channel);
4477 	if (!err)
4478 		nw64(TX_RING_KICK(channel), 0);
4479 
4480 	return err;
4481 }
4482 
niu_tx_channel_lpage_init(struct niu * np,int channel)4483 static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4484 {
4485 	u64 val;
4486 
4487 	nw64(TX_LOG_MASK1(channel), 0);
4488 	nw64(TX_LOG_VAL1(channel), 0);
4489 	nw64(TX_LOG_MASK2(channel), 0);
4490 	nw64(TX_LOG_VAL2(channel), 0);
4491 	nw64(TX_LOG_PAGE_RELO1(channel), 0);
4492 	nw64(TX_LOG_PAGE_RELO2(channel), 0);
4493 	nw64(TX_LOG_PAGE_HDL(channel), 0);
4494 
4495 	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4496 	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4497 	nw64(TX_LOG_PAGE_VLD(channel), val);
4498 
4499 	/* XXX TXDMA 32bit mode? XXX */
4500 
4501 	return 0;
4502 }
4503 
niu_txc_enable_port(struct niu * np,int on)4504 static void niu_txc_enable_port(struct niu *np, int on)
4505 {
4506 	unsigned long flags;
4507 	u64 val, mask;
4508 
4509 	niu_lock_parent(np, flags);
4510 	val = nr64(TXC_CONTROL);
4511 	mask = (u64)1 << np->port;
4512 	if (on) {
4513 		val |= TXC_CONTROL_ENABLE | mask;
4514 	} else {
4515 		val &= ~mask;
4516 		if ((val & ~TXC_CONTROL_ENABLE) == 0)
4517 			val &= ~TXC_CONTROL_ENABLE;
4518 	}
4519 	nw64(TXC_CONTROL, val);
4520 	niu_unlock_parent(np, flags);
4521 }
4522 
niu_txc_set_imask(struct niu * np,u64 imask)4523 static void niu_txc_set_imask(struct niu *np, u64 imask)
4524 {
4525 	unsigned long flags;
4526 	u64 val;
4527 
4528 	niu_lock_parent(np, flags);
4529 	val = nr64(TXC_INT_MASK);
4530 	val &= ~TXC_INT_MASK_VAL(np->port);
4531 	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4532 	niu_unlock_parent(np, flags);
4533 }
4534 
niu_txc_port_dma_enable(struct niu * np,int on)4535 static void niu_txc_port_dma_enable(struct niu *np, int on)
4536 {
4537 	u64 val = 0;
4538 
4539 	if (on) {
4540 		int i;
4541 
4542 		for (i = 0; i < np->num_tx_rings; i++)
4543 			val |= (1 << np->tx_rings[i].tx_channel);
4544 	}
4545 	nw64(TXC_PORT_DMA(np->port), val);
4546 }
4547 
niu_init_one_tx_channel(struct niu * np,struct tx_ring_info * rp)4548 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4549 {
4550 	int err, channel = rp->tx_channel;
4551 	u64 val, ring_len;
4552 
4553 	err = niu_tx_channel_stop(np, channel);
4554 	if (err)
4555 		return err;
4556 
4557 	err = niu_tx_channel_reset(np, channel);
4558 	if (err)
4559 		return err;
4560 
4561 	err = niu_tx_channel_lpage_init(np, channel);
4562 	if (err)
4563 		return err;
4564 
4565 	nw64(TXC_DMA_MAX(channel), rp->max_burst);
4566 	nw64(TX_ENT_MSK(channel), 0);
4567 
4568 	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4569 			      TX_RNG_CFIG_STADDR)) {
4570 		dev_err(np->device, PFX "%s: TX ring channel %d "
4571 			"DMA addr (%llx) is not aligned.\n",
4572 			np->dev->name, channel,
4573 			(unsigned long long) rp->descr_dma);
4574 		return -EINVAL;
4575 	}
4576 
4577 	/* The length field in TX_RNG_CFIG is measured in 64-byte
4578 	 * blocks.  rp->pending is the number of TX descriptors in
4579 	 * our ring, 8 bytes each, thus we divide by 8 bytes more
4580 	 * to get the proper value the chip wants.
4581 	 */
4582 	ring_len = (rp->pending / 8);
4583 
4584 	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4585 	       rp->descr_dma);
4586 	nw64(TX_RNG_CFIG(channel), val);
4587 
4588 	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4589 	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4590 		dev_err(np->device, PFX "%s: TX ring channel %d "
4591 			"MBOX addr (%llx) is has illegal bits.\n",
4592 			np->dev->name, channel,
4593 			(unsigned long long) rp->mbox_dma);
4594 		return -EINVAL;
4595 	}
4596 	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4597 	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4598 
4599 	nw64(TX_CS(channel), 0);
4600 
4601 	rp->last_pkt_cnt = 0;
4602 
4603 	return 0;
4604 }
4605 
niu_init_rdc_groups(struct niu * np)4606 static void niu_init_rdc_groups(struct niu *np)
4607 {
4608 	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4609 	int i, first_table_num = tp->first_table_num;
4610 
4611 	for (i = 0; i < tp->num_tables; i++) {
4612 		struct rdc_table *tbl = &tp->tables[i];
4613 		int this_table = first_table_num + i;
4614 		int slot;
4615 
4616 		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4617 			nw64(RDC_TBL(this_table, slot),
4618 			     tbl->rxdma_channel[slot]);
4619 	}
4620 
4621 	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4622 }
4623 
niu_init_drr_weight(struct niu * np)4624 static void niu_init_drr_weight(struct niu *np)
4625 {
4626 	int type = phy_decode(np->parent->port_phy, np->port);
4627 	u64 val;
4628 
4629 	switch (type) {
4630 	case PORT_TYPE_10G:
4631 		val = PT_DRR_WEIGHT_DEFAULT_10G;
4632 		break;
4633 
4634 	case PORT_TYPE_1G:
4635 	default:
4636 		val = PT_DRR_WEIGHT_DEFAULT_1G;
4637 		break;
4638 	}
4639 	nw64(PT_DRR_WT(np->port), val);
4640 }
4641 
niu_init_hostinfo(struct niu * np)4642 static int niu_init_hostinfo(struct niu *np)
4643 {
4644 	struct niu_parent *parent = np->parent;
4645 	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4646 	int i, err, num_alt = niu_num_alt_addr(np);
4647 	int first_rdc_table = tp->first_table_num;
4648 
4649 	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4650 	if (err)
4651 		return err;
4652 
4653 	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4654 	if (err)
4655 		return err;
4656 
4657 	for (i = 0; i < num_alt; i++) {
4658 		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4659 		if (err)
4660 			return err;
4661 	}
4662 
4663 	return 0;
4664 }
4665 
niu_rx_channel_reset(struct niu * np,int channel)4666 static int niu_rx_channel_reset(struct niu *np, int channel)
4667 {
4668 	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4669 				      RXDMA_CFIG1_RST, 1000, 10,
4670 				      "RXDMA_CFIG1");
4671 }
4672 
niu_rx_channel_lpage_init(struct niu * np,int channel)4673 static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4674 {
4675 	u64 val;
4676 
4677 	nw64(RX_LOG_MASK1(channel), 0);
4678 	nw64(RX_LOG_VAL1(channel), 0);
4679 	nw64(RX_LOG_MASK2(channel), 0);
4680 	nw64(RX_LOG_VAL2(channel), 0);
4681 	nw64(RX_LOG_PAGE_RELO1(channel), 0);
4682 	nw64(RX_LOG_PAGE_RELO2(channel), 0);
4683 	nw64(RX_LOG_PAGE_HDL(channel), 0);
4684 
4685 	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4686 	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4687 	nw64(RX_LOG_PAGE_VLD(channel), val);
4688 
4689 	return 0;
4690 }
4691 
niu_rx_channel_wred_init(struct niu * np,struct rx_ring_info * rp)4692 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4693 {
4694 	u64 val;
4695 
4696 	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4697 	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4698 	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4699 	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4700 	nw64(RDC_RED_PARA(rp->rx_channel), val);
4701 }
4702 
niu_compute_rbr_cfig_b(struct rx_ring_info * rp,u64 * ret)4703 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4704 {
4705 	u64 val = 0;
4706 
4707 	switch (rp->rbr_block_size) {
4708 	case 4 * 1024:
4709 		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4710 		break;
4711 	case 8 * 1024:
4712 		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4713 		break;
4714 	case 16 * 1024:
4715 		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4716 		break;
4717 	case 32 * 1024:
4718 		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4719 		break;
4720 	default:
4721 		return -EINVAL;
4722 	}
4723 	val |= RBR_CFIG_B_VLD2;
4724 	switch (rp->rbr_sizes[2]) {
4725 	case 2 * 1024:
4726 		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4727 		break;
4728 	case 4 * 1024:
4729 		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4730 		break;
4731 	case 8 * 1024:
4732 		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4733 		break;
4734 	case 16 * 1024:
4735 		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4736 		break;
4737 
4738 	default:
4739 		return -EINVAL;
4740 	}
4741 	val |= RBR_CFIG_B_VLD1;
4742 	switch (rp->rbr_sizes[1]) {
4743 	case 1 * 1024:
4744 		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4745 		break;
4746 	case 2 * 1024:
4747 		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4748 		break;
4749 	case 4 * 1024:
4750 		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4751 		break;
4752 	case 8 * 1024:
4753 		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4754 		break;
4755 
4756 	default:
4757 		return -EINVAL;
4758 	}
4759 	val |= RBR_CFIG_B_VLD0;
4760 	switch (rp->rbr_sizes[0]) {
4761 	case 256:
4762 		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4763 		break;
4764 	case 512:
4765 		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4766 		break;
4767 	case 1 * 1024:
4768 		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4769 		break;
4770 	case 2 * 1024:
4771 		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4772 		break;
4773 
4774 	default:
4775 		return -EINVAL;
4776 	}
4777 
4778 	*ret = val;
4779 	return 0;
4780 }
4781 
niu_enable_rx_channel(struct niu * np,int channel,int on)4782 static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4783 {
4784 	u64 val = nr64(RXDMA_CFIG1(channel));
4785 	int limit;
4786 
4787 	if (on)
4788 		val |= RXDMA_CFIG1_EN;
4789 	else
4790 		val &= ~RXDMA_CFIG1_EN;
4791 	nw64(RXDMA_CFIG1(channel), val);
4792 
4793 	limit = 1000;
4794 	while (--limit > 0) {
4795 		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4796 			break;
4797 		udelay(10);
4798 	}
4799 	if (limit <= 0)
4800 		return -ENODEV;
4801 	return 0;
4802 }
4803 
niu_init_one_rx_channel(struct niu * np,struct rx_ring_info * rp)4804 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4805 {
4806 	int err, channel = rp->rx_channel;
4807 	u64 val;
4808 
4809 	err = niu_rx_channel_reset(np, channel);
4810 	if (err)
4811 		return err;
4812 
4813 	err = niu_rx_channel_lpage_init(np, channel);
4814 	if (err)
4815 		return err;
4816 
4817 	niu_rx_channel_wred_init(np, rp);
4818 
4819 	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4820 	nw64(RX_DMA_CTL_STAT(channel),
4821 	     (RX_DMA_CTL_STAT_MEX |
4822 	      RX_DMA_CTL_STAT_RCRTHRES |
4823 	      RX_DMA_CTL_STAT_RCRTO |
4824 	      RX_DMA_CTL_STAT_RBR_EMPTY));
4825 	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4826 	nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
4827 	nw64(RBR_CFIG_A(channel),
4828 	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4829 	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4830 	err = niu_compute_rbr_cfig_b(rp, &val);
4831 	if (err)
4832 		return err;
4833 	nw64(RBR_CFIG_B(channel), val);
4834 	nw64(RCRCFIG_A(channel),
4835 	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4836 	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4837 	nw64(RCRCFIG_B(channel),
4838 	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4839 	     RCRCFIG_B_ENTOUT |
4840 	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4841 
4842 	err = niu_enable_rx_channel(np, channel, 1);
4843 	if (err)
4844 		return err;
4845 
4846 	nw64(RBR_KICK(channel), rp->rbr_index);
4847 
4848 	val = nr64(RX_DMA_CTL_STAT(channel));
4849 	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4850 	nw64(RX_DMA_CTL_STAT(channel), val);
4851 
4852 	return 0;
4853 }
4854 
niu_init_rx_channels(struct niu * np)4855 static int niu_init_rx_channels(struct niu *np)
4856 {
4857 	unsigned long flags;
4858 	u64 seed = jiffies_64;
4859 	int err, i;
4860 
4861 	niu_lock_parent(np, flags);
4862 	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4863 	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4864 	niu_unlock_parent(np, flags);
4865 
4866 	/* XXX RXDMA 32bit mode? XXX */
4867 
4868 	niu_init_rdc_groups(np);
4869 	niu_init_drr_weight(np);
4870 
4871 	err = niu_init_hostinfo(np);
4872 	if (err)
4873 		return err;
4874 
4875 	for (i = 0; i < np->num_rx_rings; i++) {
4876 		struct rx_ring_info *rp = &np->rx_rings[i];
4877 
4878 		err = niu_init_one_rx_channel(np, rp);
4879 		if (err)
4880 			return err;
4881 	}
4882 
4883 	return 0;
4884 }
4885 
niu_set_ip_frag_rule(struct niu * np)4886 static int niu_set_ip_frag_rule(struct niu *np)
4887 {
4888 	struct niu_parent *parent = np->parent;
4889 	struct niu_classifier *cp = &np->clas;
4890 	struct niu_tcam_entry *tp;
4891 	int index, err;
4892 
4893 	/* XXX fix this allocation scheme XXX */
4894 	index = cp->tcam_index;
4895 	tp = &parent->tcam[index];
4896 
4897 	/* Note that the noport bit is the same in both ipv4 and
4898 	 * ipv6 format TCAM entries.
4899 	 */
4900 	memset(tp, 0, sizeof(*tp));
4901 	tp->key[1] = TCAM_V4KEY1_NOPORT;
4902 	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
4903 	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
4904 			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
4905 	err = tcam_write(np, index, tp->key, tp->key_mask);
4906 	if (err)
4907 		return err;
4908 	err = tcam_assoc_write(np, index, tp->assoc_data);
4909 	if (err)
4910 		return err;
4911 
4912 	return 0;
4913 }
4914 
niu_init_classifier_hw(struct niu * np)4915 static int niu_init_classifier_hw(struct niu *np)
4916 {
4917 	struct niu_parent *parent = np->parent;
4918 	struct niu_classifier *cp = &np->clas;
4919 	int i, err;
4920 
4921 	nw64(H1POLY, cp->h1_init);
4922 	nw64(H2POLY, cp->h2_init);
4923 
4924 	err = niu_init_hostinfo(np);
4925 	if (err)
4926 		return err;
4927 
4928 	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
4929 		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
4930 
4931 		vlan_tbl_write(np, i, np->port,
4932 			       vp->vlan_pref, vp->rdc_num);
4933 	}
4934 
4935 	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
4936 		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
4937 
4938 		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
4939 						ap->rdc_num, ap->mac_pref);
4940 		if (err)
4941 			return err;
4942 	}
4943 
4944 	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
4945 		int index = i - CLASS_CODE_USER_PROG1;
4946 
4947 		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
4948 		if (err)
4949 			return err;
4950 		err = niu_set_flow_key(np, i, parent->flow_key[index]);
4951 		if (err)
4952 			return err;
4953 	}
4954 
4955 	err = niu_set_ip_frag_rule(np);
4956 	if (err)
4957 		return err;
4958 
4959 	tcam_enable(np, 1);
4960 
4961 	return 0;
4962 }
4963 
niu_zcp_write(struct niu * np,int index,u64 * data)4964 static int niu_zcp_write(struct niu *np, int index, u64 *data)
4965 {
4966 	nw64(ZCP_RAM_DATA0, data[0]);
4967 	nw64(ZCP_RAM_DATA1, data[1]);
4968 	nw64(ZCP_RAM_DATA2, data[2]);
4969 	nw64(ZCP_RAM_DATA3, data[3]);
4970 	nw64(ZCP_RAM_DATA4, data[4]);
4971 	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
4972 	nw64(ZCP_RAM_ACC,
4973 	     (ZCP_RAM_ACC_WRITE |
4974 	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4975 	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4976 
4977 	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4978 				   1000, 100);
4979 }
4980 
niu_zcp_read(struct niu * np,int index,u64 * data)4981 static int niu_zcp_read(struct niu *np, int index, u64 *data)
4982 {
4983 	int err;
4984 
4985 	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4986 				  1000, 100);
4987 	if (err) {
4988 		dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
4989 			"ZCP_RAM_ACC[%llx]\n", np->dev->name,
4990 			(unsigned long long) nr64(ZCP_RAM_ACC));
4991 		return err;
4992 	}
4993 
4994 	nw64(ZCP_RAM_ACC,
4995 	     (ZCP_RAM_ACC_READ |
4996 	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4997 	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4998 
4999 	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5000 				  1000, 100);
5001 	if (err) {
5002 		dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
5003 			"ZCP_RAM_ACC[%llx]\n", np->dev->name,
5004 			(unsigned long long) nr64(ZCP_RAM_ACC));
5005 		return err;
5006 	}
5007 
5008 	data[0] = nr64(ZCP_RAM_DATA0);
5009 	data[1] = nr64(ZCP_RAM_DATA1);
5010 	data[2] = nr64(ZCP_RAM_DATA2);
5011 	data[3] = nr64(ZCP_RAM_DATA3);
5012 	data[4] = nr64(ZCP_RAM_DATA4);
5013 
5014 	return 0;
5015 }
5016 
niu_zcp_cfifo_reset(struct niu * np)5017 static void niu_zcp_cfifo_reset(struct niu *np)
5018 {
5019 	u64 val = nr64(RESET_CFIFO);
5020 
5021 	val |= RESET_CFIFO_RST(np->port);
5022 	nw64(RESET_CFIFO, val);
5023 	udelay(10);
5024 
5025 	val &= ~RESET_CFIFO_RST(np->port);
5026 	nw64(RESET_CFIFO, val);
5027 }
5028 
niu_init_zcp(struct niu * np)5029 static int niu_init_zcp(struct niu *np)
5030 {
5031 	u64 data[5], rbuf[5];
5032 	int i, max, err;
5033 
5034 	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5035 		if (np->port == 0 || np->port == 1)
5036 			max = ATLAS_P0_P1_CFIFO_ENTRIES;
5037 		else
5038 			max = ATLAS_P2_P3_CFIFO_ENTRIES;
5039 	} else
5040 		max = NIU_CFIFO_ENTRIES;
5041 
5042 	data[0] = 0;
5043 	data[1] = 0;
5044 	data[2] = 0;
5045 	data[3] = 0;
5046 	data[4] = 0;
5047 
5048 	for (i = 0; i < max; i++) {
5049 		err = niu_zcp_write(np, i, data);
5050 		if (err)
5051 			return err;
5052 		err = niu_zcp_read(np, i, rbuf);
5053 		if (err)
5054 			return err;
5055 	}
5056 
5057 	niu_zcp_cfifo_reset(np);
5058 	nw64(CFIFO_ECC(np->port), 0);
5059 	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5060 	(void) nr64(ZCP_INT_STAT);
5061 	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5062 
5063 	return 0;
5064 }
5065 
niu_ipp_write(struct niu * np,int index,u64 * data)5066 static void niu_ipp_write(struct niu *np, int index, u64 *data)
5067 {
5068 	u64 val = nr64_ipp(IPP_CFIG);
5069 
5070 	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5071 	nw64_ipp(IPP_DFIFO_WR_PTR, index);
5072 	nw64_ipp(IPP_DFIFO_WR0, data[0]);
5073 	nw64_ipp(IPP_DFIFO_WR1, data[1]);
5074 	nw64_ipp(IPP_DFIFO_WR2, data[2]);
5075 	nw64_ipp(IPP_DFIFO_WR3, data[3]);
5076 	nw64_ipp(IPP_DFIFO_WR4, data[4]);
5077 	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5078 }
5079 
niu_ipp_read(struct niu * np,int index,u64 * data)5080 static void niu_ipp_read(struct niu *np, int index, u64 *data)
5081 {
5082 	nw64_ipp(IPP_DFIFO_RD_PTR, index);
5083 	data[0] = nr64_ipp(IPP_DFIFO_RD0);
5084 	data[1] = nr64_ipp(IPP_DFIFO_RD1);
5085 	data[2] = nr64_ipp(IPP_DFIFO_RD2);
5086 	data[3] = nr64_ipp(IPP_DFIFO_RD3);
5087 	data[4] = nr64_ipp(IPP_DFIFO_RD4);
5088 }
5089 
niu_ipp_reset(struct niu * np)5090 static int niu_ipp_reset(struct niu *np)
5091 {
5092 	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5093 					  1000, 100, "IPP_CFIG");
5094 }
5095 
niu_init_ipp(struct niu * np)5096 static int niu_init_ipp(struct niu *np)
5097 {
5098 	u64 data[5], rbuf[5], val;
5099 	int i, max, err;
5100 
5101 	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5102 		if (np->port == 0 || np->port == 1)
5103 			max = ATLAS_P0_P1_DFIFO_ENTRIES;
5104 		else
5105 			max = ATLAS_P2_P3_DFIFO_ENTRIES;
5106 	} else
5107 		max = NIU_DFIFO_ENTRIES;
5108 
5109 	data[0] = 0;
5110 	data[1] = 0;
5111 	data[2] = 0;
5112 	data[3] = 0;
5113 	data[4] = 0;
5114 
5115 	for (i = 0; i < max; i++) {
5116 		niu_ipp_write(np, i, data);
5117 		niu_ipp_read(np, i, rbuf);
5118 	}
5119 
5120 	(void) nr64_ipp(IPP_INT_STAT);
5121 	(void) nr64_ipp(IPP_INT_STAT);
5122 
5123 	err = niu_ipp_reset(np);
5124 	if (err)
5125 		return err;
5126 
5127 	(void) nr64_ipp(IPP_PKT_DIS);
5128 	(void) nr64_ipp(IPP_BAD_CS_CNT);
5129 	(void) nr64_ipp(IPP_ECC);
5130 
5131 	(void) nr64_ipp(IPP_INT_STAT);
5132 
5133 	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5134 
5135 	val = nr64_ipp(IPP_CFIG);
5136 	val &= ~IPP_CFIG_IP_MAX_PKT;
5137 	val |= (IPP_CFIG_IPP_ENABLE |
5138 		IPP_CFIG_DFIFO_ECC_EN |
5139 		IPP_CFIG_DROP_BAD_CRC |
5140 		IPP_CFIG_CKSUM_EN |
5141 		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5142 	nw64_ipp(IPP_CFIG, val);
5143 
5144 	return 0;
5145 }
5146 
niu_handle_led(struct niu * np,int status)5147 static void niu_handle_led(struct niu *np, int status)
5148 {
5149 	u64 val;
5150 	val = nr64_mac(XMAC_CONFIG);
5151 
5152 	if ((np->flags & NIU_FLAGS_10G) != 0 &&
5153 	    (np->flags & NIU_FLAGS_FIBER) != 0) {
5154 		if (status) {
5155 			val |= XMAC_CONFIG_LED_POLARITY;
5156 			val &= ~XMAC_CONFIG_FORCE_LED_ON;
5157 		} else {
5158 			val |= XMAC_CONFIG_FORCE_LED_ON;
5159 			val &= ~XMAC_CONFIG_LED_POLARITY;
5160 		}
5161 	}
5162 
5163 	nw64_mac(XMAC_CONFIG, val);
5164 }
5165 
niu_init_xif_xmac(struct niu * np)5166 static void niu_init_xif_xmac(struct niu *np)
5167 {
5168 	struct niu_link_config *lp = &np->link_config;
5169 	u64 val;
5170 
5171 	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5172 		val = nr64(MIF_CONFIG);
5173 		val |= MIF_CONFIG_ATCA_GE;
5174 		nw64(MIF_CONFIG, val);
5175 	}
5176 
5177 	val = nr64_mac(XMAC_CONFIG);
5178 	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5179 
5180 	val |= XMAC_CONFIG_TX_OUTPUT_EN;
5181 
5182 	if (lp->loopback_mode == LOOPBACK_MAC) {
5183 		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5184 		val |= XMAC_CONFIG_LOOPBACK;
5185 	} else {
5186 		val &= ~XMAC_CONFIG_LOOPBACK;
5187 	}
5188 
5189 	if (np->flags & NIU_FLAGS_10G) {
5190 		val &= ~XMAC_CONFIG_LFS_DISABLE;
5191 	} else {
5192 		val |= XMAC_CONFIG_LFS_DISABLE;
5193 		if (!(np->flags & NIU_FLAGS_FIBER) &&
5194 		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
5195 			val |= XMAC_CONFIG_1G_PCS_BYPASS;
5196 		else
5197 			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5198 	}
5199 
5200 	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5201 
5202 	if (lp->active_speed == SPEED_100)
5203 		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5204 	else
5205 		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5206 
5207 	nw64_mac(XMAC_CONFIG, val);
5208 
5209 	val = nr64_mac(XMAC_CONFIG);
5210 	val &= ~XMAC_CONFIG_MODE_MASK;
5211 	if (np->flags & NIU_FLAGS_10G) {
5212 		val |= XMAC_CONFIG_MODE_XGMII;
5213 	} else {
5214 		if (lp->active_speed == SPEED_100)
5215 			val |= XMAC_CONFIG_MODE_MII;
5216 		else
5217 			val |= XMAC_CONFIG_MODE_GMII;
5218 	}
5219 
5220 	nw64_mac(XMAC_CONFIG, val);
5221 }
5222 
niu_init_xif_bmac(struct niu * np)5223 static void niu_init_xif_bmac(struct niu *np)
5224 {
5225 	struct niu_link_config *lp = &np->link_config;
5226 	u64 val;
5227 
5228 	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5229 
5230 	if (lp->loopback_mode == LOOPBACK_MAC)
5231 		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5232 	else
5233 		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5234 
5235 	if (lp->active_speed == SPEED_1000)
5236 		val |= BMAC_XIF_CONFIG_GMII_MODE;
5237 	else
5238 		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5239 
5240 	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5241 		 BMAC_XIF_CONFIG_LED_POLARITY);
5242 
5243 	if (!(np->flags & NIU_FLAGS_10G) &&
5244 	    !(np->flags & NIU_FLAGS_FIBER) &&
5245 	    lp->active_speed == SPEED_100)
5246 		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5247 	else
5248 		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5249 
5250 	nw64_mac(BMAC_XIF_CONFIG, val);
5251 }
5252 
niu_init_xif(struct niu * np)5253 static void niu_init_xif(struct niu *np)
5254 {
5255 	if (np->flags & NIU_FLAGS_XMAC)
5256 		niu_init_xif_xmac(np);
5257 	else
5258 		niu_init_xif_bmac(np);
5259 }
5260 
niu_pcs_mii_reset(struct niu * np)5261 static void niu_pcs_mii_reset(struct niu *np)
5262 {
5263 	int limit = 1000;
5264 	u64 val = nr64_pcs(PCS_MII_CTL);
5265 	val |= PCS_MII_CTL_RST;
5266 	nw64_pcs(PCS_MII_CTL, val);
5267 	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5268 		udelay(100);
5269 		val = nr64_pcs(PCS_MII_CTL);
5270 	}
5271 }
5272 
niu_xpcs_reset(struct niu * np)5273 static void niu_xpcs_reset(struct niu *np)
5274 {
5275 	int limit = 1000;
5276 	u64 val = nr64_xpcs(XPCS_CONTROL1);
5277 	val |= XPCS_CONTROL1_RESET;
5278 	nw64_xpcs(XPCS_CONTROL1, val);
5279 	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5280 		udelay(100);
5281 		val = nr64_xpcs(XPCS_CONTROL1);
5282 	}
5283 }
5284 
niu_init_pcs(struct niu * np)5285 static int niu_init_pcs(struct niu *np)
5286 {
5287 	struct niu_link_config *lp = &np->link_config;
5288 	u64 val;
5289 
5290 	switch (np->flags & (NIU_FLAGS_10G |
5291 			     NIU_FLAGS_FIBER |
5292 			     NIU_FLAGS_XCVR_SERDES)) {
5293 	case NIU_FLAGS_FIBER:
5294 		/* 1G fiber */
5295 		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5296 		nw64_pcs(PCS_DPATH_MODE, 0);
5297 		niu_pcs_mii_reset(np);
5298 		break;
5299 
5300 	case NIU_FLAGS_10G:
5301 	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5302 	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5303 		/* 10G SERDES */
5304 		if (!(np->flags & NIU_FLAGS_XMAC))
5305 			return -EINVAL;
5306 
5307 		/* 10G copper or fiber */
5308 		val = nr64_mac(XMAC_CONFIG);
5309 		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5310 		nw64_mac(XMAC_CONFIG, val);
5311 
5312 		niu_xpcs_reset(np);
5313 
5314 		val = nr64_xpcs(XPCS_CONTROL1);
5315 		if (lp->loopback_mode == LOOPBACK_PHY)
5316 			val |= XPCS_CONTROL1_LOOPBACK;
5317 		else
5318 			val &= ~XPCS_CONTROL1_LOOPBACK;
5319 		nw64_xpcs(XPCS_CONTROL1, val);
5320 
5321 		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5322 		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
5323 		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
5324 		break;
5325 
5326 
5327 	case NIU_FLAGS_XCVR_SERDES:
5328 		/* 1G SERDES */
5329 		niu_pcs_mii_reset(np);
5330 		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5331 		nw64_pcs(PCS_DPATH_MODE, 0);
5332 		break;
5333 
5334 	case 0:
5335 		/* 1G copper */
5336 	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5337 		/* 1G RGMII FIBER */
5338 		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5339 		niu_pcs_mii_reset(np);
5340 		break;
5341 
5342 	default:
5343 		return -EINVAL;
5344 	}
5345 
5346 	return 0;
5347 }
5348 
niu_reset_tx_xmac(struct niu * np)5349 static int niu_reset_tx_xmac(struct niu *np)
5350 {
5351 	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5352 					  (XTXMAC_SW_RST_REG_RS |
5353 					   XTXMAC_SW_RST_SOFT_RST),
5354 					  1000, 100, "XTXMAC_SW_RST");
5355 }
5356 
niu_reset_tx_bmac(struct niu * np)5357 static int niu_reset_tx_bmac(struct niu *np)
5358 {
5359 	int limit;
5360 
5361 	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5362 	limit = 1000;
5363 	while (--limit >= 0) {
5364 		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5365 			break;
5366 		udelay(100);
5367 	}
5368 	if (limit < 0) {
5369 		dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
5370 			"BTXMAC_SW_RST[%llx]\n",
5371 			np->port,
5372 			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
5373 		return -ENODEV;
5374 	}
5375 
5376 	return 0;
5377 }
5378 
niu_reset_tx_mac(struct niu * np)5379 static int niu_reset_tx_mac(struct niu *np)
5380 {
5381 	if (np->flags & NIU_FLAGS_XMAC)
5382 		return niu_reset_tx_xmac(np);
5383 	else
5384 		return niu_reset_tx_bmac(np);
5385 }
5386 
niu_init_tx_xmac(struct niu * np,u64 min,u64 max)5387 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5388 {
5389 	u64 val;
5390 
5391 	val = nr64_mac(XMAC_MIN);
5392 	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5393 		 XMAC_MIN_RX_MIN_PKT_SIZE);
5394 	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5395 	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5396 	nw64_mac(XMAC_MIN, val);
5397 
5398 	nw64_mac(XMAC_MAX, max);
5399 
5400 	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5401 
5402 	val = nr64_mac(XMAC_IPG);
5403 	if (np->flags & NIU_FLAGS_10G) {
5404 		val &= ~XMAC_IPG_IPG_XGMII;
5405 		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5406 	} else {
5407 		val &= ~XMAC_IPG_IPG_MII_GMII;
5408 		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5409 	}
5410 	nw64_mac(XMAC_IPG, val);
5411 
5412 	val = nr64_mac(XMAC_CONFIG);
5413 	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5414 		 XMAC_CONFIG_STRETCH_MODE |
5415 		 XMAC_CONFIG_VAR_MIN_IPG_EN |
5416 		 XMAC_CONFIG_TX_ENABLE);
5417 	nw64_mac(XMAC_CONFIG, val);
5418 
5419 	nw64_mac(TXMAC_FRM_CNT, 0);
5420 	nw64_mac(TXMAC_BYTE_CNT, 0);
5421 }
5422 
niu_init_tx_bmac(struct niu * np,u64 min,u64 max)5423 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5424 {
5425 	u64 val;
5426 
5427 	nw64_mac(BMAC_MIN_FRAME, min);
5428 	nw64_mac(BMAC_MAX_FRAME, max);
5429 
5430 	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5431 	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5432 	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5433 
5434 	val = nr64_mac(BTXMAC_CONFIG);
5435 	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5436 		 BTXMAC_CONFIG_ENABLE);
5437 	nw64_mac(BTXMAC_CONFIG, val);
5438 }
5439 
niu_init_tx_mac(struct niu * np)5440 static void niu_init_tx_mac(struct niu *np)
5441 {
5442 	u64 min, max;
5443 
5444 	min = 64;
5445 	if (np->dev->mtu > ETH_DATA_LEN)
5446 		max = 9216;
5447 	else
5448 		max = 1522;
5449 
5450 	/* The XMAC_MIN register only accepts values for TX min which
5451 	 * have the low 3 bits cleared.
5452 	 */
5453 	BUILD_BUG_ON(min & 0x7);
5454 
5455 	if (np->flags & NIU_FLAGS_XMAC)
5456 		niu_init_tx_xmac(np, min, max);
5457 	else
5458 		niu_init_tx_bmac(np, min, max);
5459 }
5460 
niu_reset_rx_xmac(struct niu * np)5461 static int niu_reset_rx_xmac(struct niu *np)
5462 {
5463 	int limit;
5464 
5465 	nw64_mac(XRXMAC_SW_RST,
5466 		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5467 	limit = 1000;
5468 	while (--limit >= 0) {
5469 		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5470 						 XRXMAC_SW_RST_SOFT_RST)))
5471 		    break;
5472 		udelay(100);
5473 	}
5474 	if (limit < 0) {
5475 		dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
5476 			"XRXMAC_SW_RST[%llx]\n",
5477 			np->port,
5478 			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
5479 		return -ENODEV;
5480 	}
5481 
5482 	return 0;
5483 }
5484 
niu_reset_rx_bmac(struct niu * np)5485 static int niu_reset_rx_bmac(struct niu *np)
5486 {
5487 	int limit;
5488 
5489 	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5490 	limit = 1000;
5491 	while (--limit >= 0) {
5492 		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5493 			break;
5494 		udelay(100);
5495 	}
5496 	if (limit < 0) {
5497 		dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
5498 			"BRXMAC_SW_RST[%llx]\n",
5499 			np->port,
5500 			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
5501 		return -ENODEV;
5502 	}
5503 
5504 	return 0;
5505 }
5506 
niu_reset_rx_mac(struct niu * np)5507 static int niu_reset_rx_mac(struct niu *np)
5508 {
5509 	if (np->flags & NIU_FLAGS_XMAC)
5510 		return niu_reset_rx_xmac(np);
5511 	else
5512 		return niu_reset_rx_bmac(np);
5513 }
5514 
niu_init_rx_xmac(struct niu * np)5515 static void niu_init_rx_xmac(struct niu *np)
5516 {
5517 	struct niu_parent *parent = np->parent;
5518 	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5519 	int first_rdc_table = tp->first_table_num;
5520 	unsigned long i;
5521 	u64 val;
5522 
5523 	nw64_mac(XMAC_ADD_FILT0, 0);
5524 	nw64_mac(XMAC_ADD_FILT1, 0);
5525 	nw64_mac(XMAC_ADD_FILT2, 0);
5526 	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5527 	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5528 	for (i = 0; i < MAC_NUM_HASH; i++)
5529 		nw64_mac(XMAC_HASH_TBL(i), 0);
5530 	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5531 	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5532 	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5533 
5534 	val = nr64_mac(XMAC_CONFIG);
5535 	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5536 		 XMAC_CONFIG_PROMISCUOUS |
5537 		 XMAC_CONFIG_PROMISC_GROUP |
5538 		 XMAC_CONFIG_ERR_CHK_DIS |
5539 		 XMAC_CONFIG_RX_CRC_CHK_DIS |
5540 		 XMAC_CONFIG_RESERVED_MULTICAST |
5541 		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5542 		 XMAC_CONFIG_ADDR_FILTER_EN |
5543 		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5544 		 XMAC_CONFIG_STRIP_CRC |
5545 		 XMAC_CONFIG_PASS_FLOW_CTRL |
5546 		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5547 	val |= (XMAC_CONFIG_HASH_FILTER_EN);
5548 	nw64_mac(XMAC_CONFIG, val);
5549 
5550 	nw64_mac(RXMAC_BT_CNT, 0);
5551 	nw64_mac(RXMAC_BC_FRM_CNT, 0);
5552 	nw64_mac(RXMAC_MC_FRM_CNT, 0);
5553 	nw64_mac(RXMAC_FRAG_CNT, 0);
5554 	nw64_mac(RXMAC_HIST_CNT1, 0);
5555 	nw64_mac(RXMAC_HIST_CNT2, 0);
5556 	nw64_mac(RXMAC_HIST_CNT3, 0);
5557 	nw64_mac(RXMAC_HIST_CNT4, 0);
5558 	nw64_mac(RXMAC_HIST_CNT5, 0);
5559 	nw64_mac(RXMAC_HIST_CNT6, 0);
5560 	nw64_mac(RXMAC_HIST_CNT7, 0);
5561 	nw64_mac(RXMAC_MPSZER_CNT, 0);
5562 	nw64_mac(RXMAC_CRC_ER_CNT, 0);
5563 	nw64_mac(RXMAC_CD_VIO_CNT, 0);
5564 	nw64_mac(LINK_FAULT_CNT, 0);
5565 }
5566 
niu_init_rx_bmac(struct niu * np)5567 static void niu_init_rx_bmac(struct niu *np)
5568 {
5569 	struct niu_parent *parent = np->parent;
5570 	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5571 	int first_rdc_table = tp->first_table_num;
5572 	unsigned long i;
5573 	u64 val;
5574 
5575 	nw64_mac(BMAC_ADD_FILT0, 0);
5576 	nw64_mac(BMAC_ADD_FILT1, 0);
5577 	nw64_mac(BMAC_ADD_FILT2, 0);
5578 	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5579 	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5580 	for (i = 0; i < MAC_NUM_HASH; i++)
5581 		nw64_mac(BMAC_HASH_TBL(i), 0);
5582 	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5583 	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5584 	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5585 
5586 	val = nr64_mac(BRXMAC_CONFIG);
5587 	val &= ~(BRXMAC_CONFIG_ENABLE |
5588 		 BRXMAC_CONFIG_STRIP_PAD |
5589 		 BRXMAC_CONFIG_STRIP_FCS |
5590 		 BRXMAC_CONFIG_PROMISC |
5591 		 BRXMAC_CONFIG_PROMISC_GRP |
5592 		 BRXMAC_CONFIG_ADDR_FILT_EN |
5593 		 BRXMAC_CONFIG_DISCARD_DIS);
5594 	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5595 	nw64_mac(BRXMAC_CONFIG, val);
5596 
5597 	val = nr64_mac(BMAC_ADDR_CMPEN);
5598 	val |= BMAC_ADDR_CMPEN_EN0;
5599 	nw64_mac(BMAC_ADDR_CMPEN, val);
5600 }
5601 
niu_init_rx_mac(struct niu * np)5602 static void niu_init_rx_mac(struct niu *np)
5603 {
5604 	niu_set_primary_mac(np, np->dev->dev_addr);
5605 
5606 	if (np->flags & NIU_FLAGS_XMAC)
5607 		niu_init_rx_xmac(np);
5608 	else
5609 		niu_init_rx_bmac(np);
5610 }
5611 
niu_enable_tx_xmac(struct niu * np,int on)5612 static void niu_enable_tx_xmac(struct niu *np, int on)
5613 {
5614 	u64 val = nr64_mac(XMAC_CONFIG);
5615 
5616 	if (on)
5617 		val |= XMAC_CONFIG_TX_ENABLE;
5618 	else
5619 		val &= ~XMAC_CONFIG_TX_ENABLE;
5620 	nw64_mac(XMAC_CONFIG, val);
5621 }
5622 
niu_enable_tx_bmac(struct niu * np,int on)5623 static void niu_enable_tx_bmac(struct niu *np, int on)
5624 {
5625 	u64 val = nr64_mac(BTXMAC_CONFIG);
5626 
5627 	if (on)
5628 		val |= BTXMAC_CONFIG_ENABLE;
5629 	else
5630 		val &= ~BTXMAC_CONFIG_ENABLE;
5631 	nw64_mac(BTXMAC_CONFIG, val);
5632 }
5633 
niu_enable_tx_mac(struct niu * np,int on)5634 static void niu_enable_tx_mac(struct niu *np, int on)
5635 {
5636 	if (np->flags & NIU_FLAGS_XMAC)
5637 		niu_enable_tx_xmac(np, on);
5638 	else
5639 		niu_enable_tx_bmac(np, on);
5640 }
5641 
niu_enable_rx_xmac(struct niu * np,int on)5642 static void niu_enable_rx_xmac(struct niu *np, int on)
5643 {
5644 	u64 val = nr64_mac(XMAC_CONFIG);
5645 
5646 	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5647 		 XMAC_CONFIG_PROMISCUOUS);
5648 
5649 	if (np->flags & NIU_FLAGS_MCAST)
5650 		val |= XMAC_CONFIG_HASH_FILTER_EN;
5651 	if (np->flags & NIU_FLAGS_PROMISC)
5652 		val |= XMAC_CONFIG_PROMISCUOUS;
5653 
5654 	if (on)
5655 		val |= XMAC_CONFIG_RX_MAC_ENABLE;
5656 	else
5657 		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5658 	nw64_mac(XMAC_CONFIG, val);
5659 }
5660 
niu_enable_rx_bmac(struct niu * np,int on)5661 static void niu_enable_rx_bmac(struct niu *np, int on)
5662 {
5663 	u64 val = nr64_mac(BRXMAC_CONFIG);
5664 
5665 	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5666 		 BRXMAC_CONFIG_PROMISC);
5667 
5668 	if (np->flags & NIU_FLAGS_MCAST)
5669 		val |= BRXMAC_CONFIG_HASH_FILT_EN;
5670 	if (np->flags & NIU_FLAGS_PROMISC)
5671 		val |= BRXMAC_CONFIG_PROMISC;
5672 
5673 	if (on)
5674 		val |= BRXMAC_CONFIG_ENABLE;
5675 	else
5676 		val &= ~BRXMAC_CONFIG_ENABLE;
5677 	nw64_mac(BRXMAC_CONFIG, val);
5678 }
5679 
niu_enable_rx_mac(struct niu * np,int on)5680 static void niu_enable_rx_mac(struct niu *np, int on)
5681 {
5682 	if (np->flags & NIU_FLAGS_XMAC)
5683 		niu_enable_rx_xmac(np, on);
5684 	else
5685 		niu_enable_rx_bmac(np, on);
5686 }
5687 
niu_init_mac(struct niu * np)5688 static int niu_init_mac(struct niu *np)
5689 {
5690 	int err;
5691 
5692 	niu_init_xif(np);
5693 	err = niu_init_pcs(np);
5694 	if (err)
5695 		return err;
5696 
5697 	err = niu_reset_tx_mac(np);
5698 	if (err)
5699 		return err;
5700 	niu_init_tx_mac(np);
5701 	err = niu_reset_rx_mac(np);
5702 	if (err)
5703 		return err;
5704 	niu_init_rx_mac(np);
5705 
5706 	/* This looks hookey but the RX MAC reset we just did will
5707 	 * undo some of the state we setup in niu_init_tx_mac() so we
5708 	 * have to call it again.  In particular, the RX MAC reset will
5709 	 * set the XMAC_MAX register back to it's default value.
5710 	 */
5711 	niu_init_tx_mac(np);
5712 	niu_enable_tx_mac(np, 1);
5713 
5714 	niu_enable_rx_mac(np, 1);
5715 
5716 	return 0;
5717 }
5718 
niu_stop_one_tx_channel(struct niu * np,struct tx_ring_info * rp)5719 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5720 {
5721 	(void) niu_tx_channel_stop(np, rp->tx_channel);
5722 }
5723 
niu_stop_tx_channels(struct niu * np)5724 static void niu_stop_tx_channels(struct niu *np)
5725 {
5726 	int i;
5727 
5728 	for (i = 0; i < np->num_tx_rings; i++) {
5729 		struct tx_ring_info *rp = &np->tx_rings[i];
5730 
5731 		niu_stop_one_tx_channel(np, rp);
5732 	}
5733 }
5734 
niu_reset_one_tx_channel(struct niu * np,struct tx_ring_info * rp)5735 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5736 {
5737 	(void) niu_tx_channel_reset(np, rp->tx_channel);
5738 }
5739 
niu_reset_tx_channels(struct niu * np)5740 static void niu_reset_tx_channels(struct niu *np)
5741 {
5742 	int i;
5743 
5744 	for (i = 0; i < np->num_tx_rings; i++) {
5745 		struct tx_ring_info *rp = &np->tx_rings[i];
5746 
5747 		niu_reset_one_tx_channel(np, rp);
5748 	}
5749 }
5750 
niu_stop_one_rx_channel(struct niu * np,struct rx_ring_info * rp)5751 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5752 {
5753 	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5754 }
5755 
niu_stop_rx_channels(struct niu * np)5756 static void niu_stop_rx_channels(struct niu *np)
5757 {
5758 	int i;
5759 
5760 	for (i = 0; i < np->num_rx_rings; i++) {
5761 		struct rx_ring_info *rp = &np->rx_rings[i];
5762 
5763 		niu_stop_one_rx_channel(np, rp);
5764 	}
5765 }
5766 
niu_reset_one_rx_channel(struct niu * np,struct rx_ring_info * rp)5767 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5768 {
5769 	int channel = rp->rx_channel;
5770 
5771 	(void) niu_rx_channel_reset(np, channel);
5772 	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5773 	nw64(RX_DMA_CTL_STAT(channel), 0);
5774 	(void) niu_enable_rx_channel(np, channel, 0);
5775 }
5776 
niu_reset_rx_channels(struct niu * np)5777 static void niu_reset_rx_channels(struct niu *np)
5778 {
5779 	int i;
5780 
5781 	for (i = 0; i < np->num_rx_rings; i++) {
5782 		struct rx_ring_info *rp = &np->rx_rings[i];
5783 
5784 		niu_reset_one_rx_channel(np, rp);
5785 	}
5786 }
5787 
niu_disable_ipp(struct niu * np)5788 static void niu_disable_ipp(struct niu *np)
5789 {
5790 	u64 rd, wr, val;
5791 	int limit;
5792 
5793 	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5794 	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5795 	limit = 100;
5796 	while (--limit >= 0 && (rd != wr)) {
5797 		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5798 		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5799 	}
5800 	if (limit < 0 &&
5801 	    (rd != 0 && wr != 1)) {
5802 		dev_err(np->device, PFX "%s: IPP would not quiesce, "
5803 			"rd_ptr[%llx] wr_ptr[%llx]\n",
5804 			np->dev->name,
5805 			(unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5806 			(unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5807 	}
5808 
5809 	val = nr64_ipp(IPP_CFIG);
5810 	val &= ~(IPP_CFIG_IPP_ENABLE |
5811 		 IPP_CFIG_DFIFO_ECC_EN |
5812 		 IPP_CFIG_DROP_BAD_CRC |
5813 		 IPP_CFIG_CKSUM_EN);
5814 	nw64_ipp(IPP_CFIG, val);
5815 
5816 	(void) niu_ipp_reset(np);
5817 }
5818 
niu_init_hw(struct niu * np)5819 static int niu_init_hw(struct niu *np)
5820 {
5821 	int i, err;
5822 
5823 	niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
5824 	niu_txc_enable_port(np, 1);
5825 	niu_txc_port_dma_enable(np, 1);
5826 	niu_txc_set_imask(np, 0);
5827 
5828 	niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
5829 	for (i = 0; i < np->num_tx_rings; i++) {
5830 		struct tx_ring_info *rp = &np->tx_rings[i];
5831 
5832 		err = niu_init_one_tx_channel(np, rp);
5833 		if (err)
5834 			return err;
5835 	}
5836 
5837 	niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
5838 	err = niu_init_rx_channels(np);
5839 	if (err)
5840 		goto out_uninit_tx_channels;
5841 
5842 	niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
5843 	err = niu_init_classifier_hw(np);
5844 	if (err)
5845 		goto out_uninit_rx_channels;
5846 
5847 	niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
5848 	err = niu_init_zcp(np);
5849 	if (err)
5850 		goto out_uninit_rx_channels;
5851 
5852 	niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
5853 	err = niu_init_ipp(np);
5854 	if (err)
5855 		goto out_uninit_rx_channels;
5856 
5857 	niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
5858 	err = niu_init_mac(np);
5859 	if (err)
5860 		goto out_uninit_ipp;
5861 
5862 	return 0;
5863 
5864 out_uninit_ipp:
5865 	niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
5866 	niu_disable_ipp(np);
5867 
5868 out_uninit_rx_channels:
5869 	niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
5870 	niu_stop_rx_channels(np);
5871 	niu_reset_rx_channels(np);
5872 
5873 out_uninit_tx_channels:
5874 	niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
5875 	niu_stop_tx_channels(np);
5876 	niu_reset_tx_channels(np);
5877 
5878 	return err;
5879 }
5880 
niu_stop_hw(struct niu * np)5881 static void niu_stop_hw(struct niu *np)
5882 {
5883 	niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
5884 	niu_enable_interrupts(np, 0);
5885 
5886 	niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
5887 	niu_enable_rx_mac(np, 0);
5888 
5889 	niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
5890 	niu_disable_ipp(np);
5891 
5892 	niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
5893 	niu_stop_tx_channels(np);
5894 
5895 	niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
5896 	niu_stop_rx_channels(np);
5897 
5898 	niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
5899 	niu_reset_tx_channels(np);
5900 
5901 	niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
5902 	niu_reset_rx_channels(np);
5903 }
5904 
niu_set_irq_name(struct niu * np)5905 static void niu_set_irq_name(struct niu *np)
5906 {
5907 	int port = np->port;
5908 	int i, j = 1;
5909 
5910 	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
5911 
5912 	if (port == 0) {
5913 		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
5914 		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
5915 		j = 3;
5916 	}
5917 
5918 	for (i = 0; i < np->num_ldg - j; i++) {
5919 		if (i < np->num_rx_rings)
5920 			sprintf(np->irq_name[i+j], "%s-rx-%d",
5921 				np->dev->name, i);
5922 		else if (i < np->num_tx_rings + np->num_rx_rings)
5923 			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
5924 				i - np->num_rx_rings);
5925 	}
5926 }
5927 
niu_request_irq(struct niu * np)5928 static int niu_request_irq(struct niu *np)
5929 {
5930 	int i, j, err;
5931 
5932 	niu_set_irq_name(np);
5933 
5934 	err = 0;
5935 	for (i = 0; i < np->num_ldg; i++) {
5936 		struct niu_ldg *lp = &np->ldg[i];
5937 
5938 		err = request_irq(lp->irq, niu_interrupt,
5939 				  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5940 				  np->irq_name[i], lp);
5941 		if (err)
5942 			goto out_free_irqs;
5943 
5944 	}
5945 
5946 	return 0;
5947 
5948 out_free_irqs:
5949 	for (j = 0; j < i; j++) {
5950 		struct niu_ldg *lp = &np->ldg[j];
5951 
5952 		free_irq(lp->irq, lp);
5953 	}
5954 	return err;
5955 }
5956 
niu_free_irq(struct niu * np)5957 static void niu_free_irq(struct niu *np)
5958 {
5959 	int i;
5960 
5961 	for (i = 0; i < np->num_ldg; i++) {
5962 		struct niu_ldg *lp = &np->ldg[i];
5963 
5964 		free_irq(lp->irq, lp);
5965 	}
5966 }
5967 
niu_enable_napi(struct niu * np)5968 static void niu_enable_napi(struct niu *np)
5969 {
5970 	int i;
5971 
5972 	for (i = 0; i < np->num_ldg; i++)
5973 		napi_enable(&np->ldg[i].napi);
5974 }
5975 
niu_disable_napi(struct niu * np)5976 static void niu_disable_napi(struct niu *np)
5977 {
5978 	int i;
5979 
5980 	for (i = 0; i < np->num_ldg; i++)
5981 		napi_disable(&np->ldg[i].napi);
5982 }
5983 
niu_open(struct net_device * dev)5984 static int niu_open(struct net_device *dev)
5985 {
5986 	struct niu *np = netdev_priv(dev);
5987 	int err;
5988 
5989 	netif_carrier_off(dev);
5990 
5991 	err = niu_alloc_channels(np);
5992 	if (err)
5993 		goto out_err;
5994 
5995 	err = niu_enable_interrupts(np, 0);
5996 	if (err)
5997 		goto out_free_channels;
5998 
5999 	err = niu_request_irq(np);
6000 	if (err)
6001 		goto out_free_channels;
6002 
6003 	niu_enable_napi(np);
6004 
6005 	spin_lock_irq(&np->lock);
6006 
6007 	err = niu_init_hw(np);
6008 	if (!err) {
6009 		init_timer(&np->timer);
6010 		np->timer.expires = jiffies + HZ;
6011 		np->timer.data = (unsigned long) np;
6012 		np->timer.function = niu_timer;
6013 
6014 		err = niu_enable_interrupts(np, 1);
6015 		if (err)
6016 			niu_stop_hw(np);
6017 	}
6018 
6019 	spin_unlock_irq(&np->lock);
6020 
6021 	if (err) {
6022 		niu_disable_napi(np);
6023 		goto out_free_irq;
6024 	}
6025 
6026 	netif_tx_start_all_queues(dev);
6027 
6028 	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6029 		netif_carrier_on(dev);
6030 
6031 	add_timer(&np->timer);
6032 
6033 	return 0;
6034 
6035 out_free_irq:
6036 	niu_free_irq(np);
6037 
6038 out_free_channels:
6039 	niu_free_channels(np);
6040 
6041 out_err:
6042 	return err;
6043 }
6044 
niu_full_shutdown(struct niu * np,struct net_device * dev)6045 static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6046 {
6047 	cancel_work_sync(&np->reset_task);
6048 
6049 	niu_disable_napi(np);
6050 	netif_tx_stop_all_queues(dev);
6051 
6052 	del_timer_sync(&np->timer);
6053 
6054 	spin_lock_irq(&np->lock);
6055 
6056 	niu_stop_hw(np);
6057 
6058 	spin_unlock_irq(&np->lock);
6059 }
6060 
niu_close(struct net_device * dev)6061 static int niu_close(struct net_device *dev)
6062 {
6063 	struct niu *np = netdev_priv(dev);
6064 
6065 	niu_full_shutdown(np, dev);
6066 
6067 	niu_free_irq(np);
6068 
6069 	niu_free_channels(np);
6070 
6071 	niu_handle_led(np, 0);
6072 
6073 	return 0;
6074 }
6075 
niu_sync_xmac_stats(struct niu * np)6076 static void niu_sync_xmac_stats(struct niu *np)
6077 {
6078 	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6079 
6080 	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6081 	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6082 
6083 	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6084 	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6085 	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6086 	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6087 	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6088 	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6089 	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6090 	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6091 	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6092 	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6093 	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6094 	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6095 	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6096 	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6097 	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6098 	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6099 }
6100 
niu_sync_bmac_stats(struct niu * np)6101 static void niu_sync_bmac_stats(struct niu *np)
6102 {
6103 	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6104 
6105 	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6106 	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6107 
6108 	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6109 	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6110 	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6111 	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6112 }
6113 
niu_sync_mac_stats(struct niu * np)6114 static void niu_sync_mac_stats(struct niu *np)
6115 {
6116 	if (np->flags & NIU_FLAGS_XMAC)
6117 		niu_sync_xmac_stats(np);
6118 	else
6119 		niu_sync_bmac_stats(np);
6120 }
6121 
niu_get_rx_stats(struct niu * np)6122 static void niu_get_rx_stats(struct niu *np)
6123 {
6124 	unsigned long pkts, dropped, errors, bytes;
6125 	int i;
6126 
6127 	pkts = dropped = errors = bytes = 0;
6128 	for (i = 0; i < np->num_rx_rings; i++) {
6129 		struct rx_ring_info *rp = &np->rx_rings[i];
6130 
6131 		niu_sync_rx_discard_stats(np, rp, 0);
6132 
6133 		pkts += rp->rx_packets;
6134 		bytes += rp->rx_bytes;
6135 		dropped += rp->rx_dropped;
6136 		errors += rp->rx_errors;
6137 	}
6138 	np->dev->stats.rx_packets = pkts;
6139 	np->dev->stats.rx_bytes = bytes;
6140 	np->dev->stats.rx_dropped = dropped;
6141 	np->dev->stats.rx_errors = errors;
6142 }
6143 
niu_get_tx_stats(struct niu * np)6144 static void niu_get_tx_stats(struct niu *np)
6145 {
6146 	unsigned long pkts, errors, bytes;
6147 	int i;
6148 
6149 	pkts = errors = bytes = 0;
6150 	for (i = 0; i < np->num_tx_rings; i++) {
6151 		struct tx_ring_info *rp = &np->tx_rings[i];
6152 
6153 		pkts += rp->tx_packets;
6154 		bytes += rp->tx_bytes;
6155 		errors += rp->tx_errors;
6156 	}
6157 	np->dev->stats.tx_packets = pkts;
6158 	np->dev->stats.tx_bytes = bytes;
6159 	np->dev->stats.tx_errors = errors;
6160 }
6161 
niu_get_stats(struct net_device * dev)6162 static struct net_device_stats *niu_get_stats(struct net_device *dev)
6163 {
6164 	struct niu *np = netdev_priv(dev);
6165 
6166 	niu_get_rx_stats(np);
6167 	niu_get_tx_stats(np);
6168 
6169 	return &dev->stats;
6170 }
6171 
niu_load_hash_xmac(struct niu * np,u16 * hash)6172 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6173 {
6174 	int i;
6175 
6176 	for (i = 0; i < 16; i++)
6177 		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6178 }
6179 
niu_load_hash_bmac(struct niu * np,u16 * hash)6180 static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6181 {
6182 	int i;
6183 
6184 	for (i = 0; i < 16; i++)
6185 		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6186 }
6187 
niu_load_hash(struct niu * np,u16 * hash)6188 static void niu_load_hash(struct niu *np, u16 *hash)
6189 {
6190 	if (np->flags & NIU_FLAGS_XMAC)
6191 		niu_load_hash_xmac(np, hash);
6192 	else
6193 		niu_load_hash_bmac(np, hash);
6194 }
6195 
niu_set_rx_mode(struct net_device * dev)6196 static void niu_set_rx_mode(struct net_device *dev)
6197 {
6198 	struct niu *np = netdev_priv(dev);
6199 	int i, alt_cnt, err;
6200 	struct dev_addr_list *addr;
6201 	unsigned long flags;
6202 	u16 hash[16] = { 0, };
6203 
6204 	spin_lock_irqsave(&np->lock, flags);
6205 	niu_enable_rx_mac(np, 0);
6206 
6207 	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6208 	if (dev->flags & IFF_PROMISC)
6209 		np->flags |= NIU_FLAGS_PROMISC;
6210 	if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
6211 		np->flags |= NIU_FLAGS_MCAST;
6212 
6213 	alt_cnt = dev->uc_count;
6214 	if (alt_cnt > niu_num_alt_addr(np)) {
6215 		alt_cnt = 0;
6216 		np->flags |= NIU_FLAGS_PROMISC;
6217 	}
6218 
6219 	if (alt_cnt) {
6220 		int index = 0;
6221 
6222 		for (addr = dev->uc_list; addr; addr = addr->next) {
6223 			err = niu_set_alt_mac(np, index,
6224 					      addr->da_addr);
6225 			if (err)
6226 				printk(KERN_WARNING PFX "%s: Error %d "
6227 				       "adding alt mac %d\n",
6228 				       dev->name, err, index);
6229 			err = niu_enable_alt_mac(np, index, 1);
6230 			if (err)
6231 				printk(KERN_WARNING PFX "%s: Error %d "
6232 				       "enabling alt mac %d\n",
6233 				       dev->name, err, index);
6234 
6235 			index++;
6236 		}
6237 	} else {
6238 		int alt_start;
6239 		if (np->flags & NIU_FLAGS_XMAC)
6240 			alt_start = 0;
6241 		else
6242 			alt_start = 1;
6243 		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6244 			err = niu_enable_alt_mac(np, i, 0);
6245 			if (err)
6246 				printk(KERN_WARNING PFX "%s: Error %d "
6247 				       "disabling alt mac %d\n",
6248 				       dev->name, err, i);
6249 		}
6250 	}
6251 	if (dev->flags & IFF_ALLMULTI) {
6252 		for (i = 0; i < 16; i++)
6253 			hash[i] = 0xffff;
6254 	} else if (dev->mc_count > 0) {
6255 		for (addr = dev->mc_list; addr; addr = addr->next) {
6256 			u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
6257 
6258 			crc >>= 24;
6259 			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6260 		}
6261 	}
6262 
6263 	if (np->flags & NIU_FLAGS_MCAST)
6264 		niu_load_hash(np, hash);
6265 
6266 	niu_enable_rx_mac(np, 1);
6267 	spin_unlock_irqrestore(&np->lock, flags);
6268 }
6269 
niu_set_mac_addr(struct net_device * dev,void * p)6270 static int niu_set_mac_addr(struct net_device *dev, void *p)
6271 {
6272 	struct niu *np = netdev_priv(dev);
6273 	struct sockaddr *addr = p;
6274 	unsigned long flags;
6275 
6276 	if (!is_valid_ether_addr(addr->sa_data))
6277 		return -EINVAL;
6278 
6279 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6280 
6281 	if (!netif_running(dev))
6282 		return 0;
6283 
6284 	spin_lock_irqsave(&np->lock, flags);
6285 	niu_enable_rx_mac(np, 0);
6286 	niu_set_primary_mac(np, dev->dev_addr);
6287 	niu_enable_rx_mac(np, 1);
6288 	spin_unlock_irqrestore(&np->lock, flags);
6289 
6290 	return 0;
6291 }
6292 
niu_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)6293 static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6294 {
6295 	return -EOPNOTSUPP;
6296 }
6297 
niu_netif_stop(struct niu * np)6298 static void niu_netif_stop(struct niu *np)
6299 {
6300 	np->dev->trans_start = jiffies;	/* prevent tx timeout */
6301 
6302 	niu_disable_napi(np);
6303 
6304 	netif_tx_disable(np->dev);
6305 }
6306 
niu_netif_start(struct niu * np)6307 static void niu_netif_start(struct niu *np)
6308 {
6309 	/* NOTE: unconditional netif_wake_queue is only appropriate
6310 	 * so long as all callers are assured to have free tx slots
6311 	 * (such as after niu_init_hw).
6312 	 */
6313 	netif_tx_wake_all_queues(np->dev);
6314 
6315 	niu_enable_napi(np);
6316 
6317 	niu_enable_interrupts(np, 1);
6318 }
6319 
niu_reset_buffers(struct niu * np)6320 static void niu_reset_buffers(struct niu *np)
6321 {
6322 	int i, j, k, err;
6323 
6324 	if (np->rx_rings) {
6325 		for (i = 0; i < np->num_rx_rings; i++) {
6326 			struct rx_ring_info *rp = &np->rx_rings[i];
6327 
6328 			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6329 				struct page *page;
6330 
6331 				page = rp->rxhash[j];
6332 				while (page) {
6333 					struct page *next =
6334 						(struct page *) page->mapping;
6335 					u64 base = page->index;
6336 					base = base >> RBR_DESCR_ADDR_SHIFT;
6337 					rp->rbr[k++] = cpu_to_le32(base);
6338 					page = next;
6339 				}
6340 			}
6341 			for (; k < MAX_RBR_RING_SIZE; k++) {
6342 				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6343 				if (unlikely(err))
6344 					break;
6345 			}
6346 
6347 			rp->rbr_index = rp->rbr_table_size - 1;
6348 			rp->rcr_index = 0;
6349 			rp->rbr_pending = 0;
6350 			rp->rbr_refill_pending = 0;
6351 		}
6352 	}
6353 	if (np->tx_rings) {
6354 		for (i = 0; i < np->num_tx_rings; i++) {
6355 			struct tx_ring_info *rp = &np->tx_rings[i];
6356 
6357 			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6358 				if (rp->tx_buffs[j].skb)
6359 					(void) release_tx_packet(np, rp, j);
6360 			}
6361 
6362 			rp->pending = MAX_TX_RING_SIZE;
6363 			rp->prod = 0;
6364 			rp->cons = 0;
6365 			rp->wrap_bit = 0;
6366 		}
6367 	}
6368 }
6369 
niu_reset_task(struct work_struct * work)6370 static void niu_reset_task(struct work_struct *work)
6371 {
6372 	struct niu *np = container_of(work, struct niu, reset_task);
6373 	unsigned long flags;
6374 	int err;
6375 
6376 	spin_lock_irqsave(&np->lock, flags);
6377 	if (!netif_running(np->dev)) {
6378 		spin_unlock_irqrestore(&np->lock, flags);
6379 		return;
6380 	}
6381 
6382 	spin_unlock_irqrestore(&np->lock, flags);
6383 
6384 	del_timer_sync(&np->timer);
6385 
6386 	niu_netif_stop(np);
6387 
6388 	spin_lock_irqsave(&np->lock, flags);
6389 
6390 	niu_stop_hw(np);
6391 
6392 	spin_unlock_irqrestore(&np->lock, flags);
6393 
6394 	niu_reset_buffers(np);
6395 
6396 	spin_lock_irqsave(&np->lock, flags);
6397 
6398 	err = niu_init_hw(np);
6399 	if (!err) {
6400 		np->timer.expires = jiffies + HZ;
6401 		add_timer(&np->timer);
6402 		niu_netif_start(np);
6403 	}
6404 
6405 	spin_unlock_irqrestore(&np->lock, flags);
6406 }
6407 
niu_tx_timeout(struct net_device * dev)6408 static void niu_tx_timeout(struct net_device *dev)
6409 {
6410 	struct niu *np = netdev_priv(dev);
6411 
6412 	dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
6413 		dev->name);
6414 
6415 	schedule_work(&np->reset_task);
6416 }
6417 
niu_set_txd(struct tx_ring_info * rp,int index,u64 mapping,u64 len,u64 mark,u64 n_frags)6418 static void niu_set_txd(struct tx_ring_info *rp, int index,
6419 			u64 mapping, u64 len, u64 mark,
6420 			u64 n_frags)
6421 {
6422 	__le64 *desc = &rp->descr[index];
6423 
6424 	*desc = cpu_to_le64(mark |
6425 			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6426 			    (len << TX_DESC_TR_LEN_SHIFT) |
6427 			    (mapping & TX_DESC_SAD));
6428 }
6429 
niu_compute_tx_flags(struct sk_buff * skb,struct ethhdr * ehdr,u64 pad_bytes,u64 len)6430 static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6431 				u64 pad_bytes, u64 len)
6432 {
6433 	u16 eth_proto, eth_proto_inner;
6434 	u64 csum_bits, l3off, ihl, ret;
6435 	u8 ip_proto;
6436 	int ipv6;
6437 
6438 	eth_proto = be16_to_cpu(ehdr->h_proto);
6439 	eth_proto_inner = eth_proto;
6440 	if (eth_proto == ETH_P_8021Q) {
6441 		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6442 		__be16 val = vp->h_vlan_encapsulated_proto;
6443 
6444 		eth_proto_inner = be16_to_cpu(val);
6445 	}
6446 
6447 	ipv6 = ihl = 0;
6448 	switch (skb->protocol) {
6449 	case __constant_htons(ETH_P_IP):
6450 		ip_proto = ip_hdr(skb)->protocol;
6451 		ihl = ip_hdr(skb)->ihl;
6452 		break;
6453 	case __constant_htons(ETH_P_IPV6):
6454 		ip_proto = ipv6_hdr(skb)->nexthdr;
6455 		ihl = (40 >> 2);
6456 		ipv6 = 1;
6457 		break;
6458 	default:
6459 		ip_proto = ihl = 0;
6460 		break;
6461 	}
6462 
6463 	csum_bits = TXHDR_CSUM_NONE;
6464 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6465 		u64 start, stuff;
6466 
6467 		csum_bits = (ip_proto == IPPROTO_TCP ?
6468 			     TXHDR_CSUM_TCP :
6469 			     (ip_proto == IPPROTO_UDP ?
6470 			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6471 
6472 		start = skb_transport_offset(skb) -
6473 			(pad_bytes + sizeof(struct tx_pkt_hdr));
6474 		stuff = start + skb->csum_offset;
6475 
6476 		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6477 		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6478 	}
6479 
6480 	l3off = skb_network_offset(skb) -
6481 		(pad_bytes + sizeof(struct tx_pkt_hdr));
6482 
6483 	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6484 	       (len << TXHDR_LEN_SHIFT) |
6485 	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
6486 	       (ihl << TXHDR_IHL_SHIFT) |
6487 	       ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
6488 	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6489 	       (ipv6 ? TXHDR_IP_VER : 0) |
6490 	       csum_bits);
6491 
6492 	return ret;
6493 }
6494 
niu_start_xmit(struct sk_buff * skb,struct net_device * dev)6495 static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6496 {
6497 	struct niu *np = netdev_priv(dev);
6498 	unsigned long align, headroom;
6499 	struct netdev_queue *txq;
6500 	struct tx_ring_info *rp;
6501 	struct tx_pkt_hdr *tp;
6502 	unsigned int len, nfg;
6503 	struct ethhdr *ehdr;
6504 	int prod, i, tlen;
6505 	u64 mapping, mrk;
6506 
6507 	i = skb_get_queue_mapping(skb);
6508 	rp = &np->tx_rings[i];
6509 	txq = netdev_get_tx_queue(dev, i);
6510 
6511 	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6512 		netif_tx_stop_queue(txq);
6513 		dev_err(np->device, PFX "%s: BUG! Tx ring full when "
6514 			"queue awake!\n", dev->name);
6515 		rp->tx_errors++;
6516 		return NETDEV_TX_BUSY;
6517 	}
6518 
6519 	if (skb->len < ETH_ZLEN) {
6520 		unsigned int pad_bytes = ETH_ZLEN - skb->len;
6521 
6522 		if (skb_pad(skb, pad_bytes))
6523 			goto out;
6524 		skb_put(skb, pad_bytes);
6525 	}
6526 
6527 	len = sizeof(struct tx_pkt_hdr) + 15;
6528 	if (skb_headroom(skb) < len) {
6529 		struct sk_buff *skb_new;
6530 
6531 		skb_new = skb_realloc_headroom(skb, len);
6532 		if (!skb_new) {
6533 			rp->tx_errors++;
6534 			goto out_drop;
6535 		}
6536 		kfree_skb(skb);
6537 		skb = skb_new;
6538 	} else
6539 		skb_orphan(skb);
6540 
6541 	align = ((unsigned long) skb->data & (16 - 1));
6542 	headroom = align + sizeof(struct tx_pkt_hdr);
6543 
6544 	ehdr = (struct ethhdr *) skb->data;
6545 	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6546 
6547 	len = skb->len - sizeof(struct tx_pkt_hdr);
6548 	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6549 	tp->resv = 0;
6550 
6551 	len = skb_headlen(skb);
6552 	mapping = np->ops->map_single(np->device, skb->data,
6553 				      len, DMA_TO_DEVICE);
6554 
6555 	prod = rp->prod;
6556 
6557 	rp->tx_buffs[prod].skb = skb;
6558 	rp->tx_buffs[prod].mapping = mapping;
6559 
6560 	mrk = TX_DESC_SOP;
6561 	if (++rp->mark_counter == rp->mark_freq) {
6562 		rp->mark_counter = 0;
6563 		mrk |= TX_DESC_MARK;
6564 		rp->mark_pending++;
6565 	}
6566 
6567 	tlen = len;
6568 	nfg = skb_shinfo(skb)->nr_frags;
6569 	while (tlen > 0) {
6570 		tlen -= MAX_TX_DESC_LEN;
6571 		nfg++;
6572 	}
6573 
6574 	while (len > 0) {
6575 		unsigned int this_len = len;
6576 
6577 		if (this_len > MAX_TX_DESC_LEN)
6578 			this_len = MAX_TX_DESC_LEN;
6579 
6580 		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6581 		mrk = nfg = 0;
6582 
6583 		prod = NEXT_TX(rp, prod);
6584 		mapping += this_len;
6585 		len -= this_len;
6586 	}
6587 
6588 	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
6589 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6590 
6591 		len = frag->size;
6592 		mapping = np->ops->map_page(np->device, frag->page,
6593 					    frag->page_offset, len,
6594 					    DMA_TO_DEVICE);
6595 
6596 		rp->tx_buffs[prod].skb = NULL;
6597 		rp->tx_buffs[prod].mapping = mapping;
6598 
6599 		niu_set_txd(rp, prod, mapping, len, 0, 0);
6600 
6601 		prod = NEXT_TX(rp, prod);
6602 	}
6603 
6604 	if (prod < rp->prod)
6605 		rp->wrap_bit ^= TX_RING_KICK_WRAP;
6606 	rp->prod = prod;
6607 
6608 	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6609 
6610 	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6611 		netif_tx_stop_queue(txq);
6612 		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6613 			netif_tx_wake_queue(txq);
6614 	}
6615 
6616 	dev->trans_start = jiffies;
6617 
6618 out:
6619 	return NETDEV_TX_OK;
6620 
6621 out_drop:
6622 	rp->tx_errors++;
6623 	kfree_skb(skb);
6624 	goto out;
6625 }
6626 
niu_change_mtu(struct net_device * dev,int new_mtu)6627 static int niu_change_mtu(struct net_device *dev, int new_mtu)
6628 {
6629 	struct niu *np = netdev_priv(dev);
6630 	int err, orig_jumbo, new_jumbo;
6631 
6632 	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6633 		return -EINVAL;
6634 
6635 	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6636 	new_jumbo = (new_mtu > ETH_DATA_LEN);
6637 
6638 	dev->mtu = new_mtu;
6639 
6640 	if (!netif_running(dev) ||
6641 	    (orig_jumbo == new_jumbo))
6642 		return 0;
6643 
6644 	niu_full_shutdown(np, dev);
6645 
6646 	niu_free_channels(np);
6647 
6648 	niu_enable_napi(np);
6649 
6650 	err = niu_alloc_channels(np);
6651 	if (err)
6652 		return err;
6653 
6654 	spin_lock_irq(&np->lock);
6655 
6656 	err = niu_init_hw(np);
6657 	if (!err) {
6658 		init_timer(&np->timer);
6659 		np->timer.expires = jiffies + HZ;
6660 		np->timer.data = (unsigned long) np;
6661 		np->timer.function = niu_timer;
6662 
6663 		err = niu_enable_interrupts(np, 1);
6664 		if (err)
6665 			niu_stop_hw(np);
6666 	}
6667 
6668 	spin_unlock_irq(&np->lock);
6669 
6670 	if (!err) {
6671 		netif_tx_start_all_queues(dev);
6672 		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6673 			netif_carrier_on(dev);
6674 
6675 		add_timer(&np->timer);
6676 	}
6677 
6678 	return err;
6679 }
6680 
niu_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)6681 static void niu_get_drvinfo(struct net_device *dev,
6682 			    struct ethtool_drvinfo *info)
6683 {
6684 	struct niu *np = netdev_priv(dev);
6685 	struct niu_vpd *vpd = &np->vpd;
6686 
6687 	strcpy(info->driver, DRV_MODULE_NAME);
6688 	strcpy(info->version, DRV_MODULE_VERSION);
6689 	sprintf(info->fw_version, "%d.%d",
6690 		vpd->fcode_major, vpd->fcode_minor);
6691 	if (np->parent->plat_type != PLAT_TYPE_NIU)
6692 		strcpy(info->bus_info, pci_name(np->pdev));
6693 }
6694 
niu_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)6695 static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6696 {
6697 	struct niu *np = netdev_priv(dev);
6698 	struct niu_link_config *lp;
6699 
6700 	lp = &np->link_config;
6701 
6702 	memset(cmd, 0, sizeof(*cmd));
6703 	cmd->phy_address = np->phy_addr;
6704 	cmd->supported = lp->supported;
6705 	cmd->advertising = lp->advertising;
6706 	cmd->autoneg = lp->autoneg;
6707 	cmd->speed = lp->active_speed;
6708 	cmd->duplex = lp->active_duplex;
6709 
6710 	return 0;
6711 }
6712 
niu_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)6713 static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6714 {
6715 	return -EINVAL;
6716 }
6717 
niu_get_msglevel(struct net_device * dev)6718 static u32 niu_get_msglevel(struct net_device *dev)
6719 {
6720 	struct niu *np = netdev_priv(dev);
6721 	return np->msg_enable;
6722 }
6723 
niu_set_msglevel(struct net_device * dev,u32 value)6724 static void niu_set_msglevel(struct net_device *dev, u32 value)
6725 {
6726 	struct niu *np = netdev_priv(dev);
6727 	np->msg_enable = value;
6728 }
6729 
niu_get_eeprom_len(struct net_device * dev)6730 static int niu_get_eeprom_len(struct net_device *dev)
6731 {
6732 	struct niu *np = netdev_priv(dev);
6733 
6734 	return np->eeprom_len;
6735 }
6736 
niu_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)6737 static int niu_get_eeprom(struct net_device *dev,
6738 			  struct ethtool_eeprom *eeprom, u8 *data)
6739 {
6740 	struct niu *np = netdev_priv(dev);
6741 	u32 offset, len, val;
6742 
6743 	offset = eeprom->offset;
6744 	len = eeprom->len;
6745 
6746 	if (offset + len < offset)
6747 		return -EINVAL;
6748 	if (offset >= np->eeprom_len)
6749 		return -EINVAL;
6750 	if (offset + len > np->eeprom_len)
6751 		len = eeprom->len = np->eeprom_len - offset;
6752 
6753 	if (offset & 3) {
6754 		u32 b_offset, b_count;
6755 
6756 		b_offset = offset & 3;
6757 		b_count = 4 - b_offset;
6758 		if (b_count > len)
6759 			b_count = len;
6760 
6761 		val = nr64(ESPC_NCR((offset - b_offset) / 4));
6762 		memcpy(data, ((char *)&val) + b_offset, b_count);
6763 		data += b_count;
6764 		len -= b_count;
6765 		offset += b_count;
6766 	}
6767 	while (len >= 4) {
6768 		val = nr64(ESPC_NCR(offset / 4));
6769 		memcpy(data, &val, 4);
6770 		data += 4;
6771 		len -= 4;
6772 		offset += 4;
6773 	}
6774 	if (len) {
6775 		val = nr64(ESPC_NCR(offset / 4));
6776 		memcpy(data, &val, len);
6777 	}
6778 	return 0;
6779 }
6780 
niu_ethflow_to_class(int flow_type,u64 * class)6781 static int niu_ethflow_to_class(int flow_type, u64 *class)
6782 {
6783 	switch (flow_type) {
6784 	case TCP_V4_FLOW:
6785 		*class = CLASS_CODE_TCP_IPV4;
6786 		break;
6787 	case UDP_V4_FLOW:
6788 		*class = CLASS_CODE_UDP_IPV4;
6789 		break;
6790 	case AH_ESP_V4_FLOW:
6791 		*class = CLASS_CODE_AH_ESP_IPV4;
6792 		break;
6793 	case SCTP_V4_FLOW:
6794 		*class = CLASS_CODE_SCTP_IPV4;
6795 		break;
6796 	case TCP_V6_FLOW:
6797 		*class = CLASS_CODE_TCP_IPV6;
6798 		break;
6799 	case UDP_V6_FLOW:
6800 		*class = CLASS_CODE_UDP_IPV6;
6801 		break;
6802 	case AH_ESP_V6_FLOW:
6803 		*class = CLASS_CODE_AH_ESP_IPV6;
6804 		break;
6805 	case SCTP_V6_FLOW:
6806 		*class = CLASS_CODE_SCTP_IPV6;
6807 		break;
6808 	default:
6809 		return 0;
6810 	}
6811 
6812 	return 1;
6813 }
6814 
niu_flowkey_to_ethflow(u64 flow_key)6815 static u64 niu_flowkey_to_ethflow(u64 flow_key)
6816 {
6817 	u64 ethflow = 0;
6818 
6819 	if (flow_key & FLOW_KEY_PORT)
6820 		ethflow |= RXH_DEV_PORT;
6821 	if (flow_key & FLOW_KEY_L2DA)
6822 		ethflow |= RXH_L2DA;
6823 	if (flow_key & FLOW_KEY_VLAN)
6824 		ethflow |= RXH_VLAN;
6825 	if (flow_key & FLOW_KEY_IPSA)
6826 		ethflow |= RXH_IP_SRC;
6827 	if (flow_key & FLOW_KEY_IPDA)
6828 		ethflow |= RXH_IP_DST;
6829 	if (flow_key & FLOW_KEY_PROTO)
6830 		ethflow |= RXH_L3_PROTO;
6831 	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
6832 		ethflow |= RXH_L4_B_0_1;
6833 	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
6834 		ethflow |= RXH_L4_B_2_3;
6835 
6836 	return ethflow;
6837 
6838 }
6839 
niu_ethflow_to_flowkey(u64 ethflow,u64 * flow_key)6840 static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
6841 {
6842 	u64 key = 0;
6843 
6844 	if (ethflow & RXH_DEV_PORT)
6845 		key |= FLOW_KEY_PORT;
6846 	if (ethflow & RXH_L2DA)
6847 		key |= FLOW_KEY_L2DA;
6848 	if (ethflow & RXH_VLAN)
6849 		key |= FLOW_KEY_VLAN;
6850 	if (ethflow & RXH_IP_SRC)
6851 		key |= FLOW_KEY_IPSA;
6852 	if (ethflow & RXH_IP_DST)
6853 		key |= FLOW_KEY_IPDA;
6854 	if (ethflow & RXH_L3_PROTO)
6855 		key |= FLOW_KEY_PROTO;
6856 	if (ethflow & RXH_L4_B_0_1)
6857 		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
6858 	if (ethflow & RXH_L4_B_2_3)
6859 		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
6860 
6861 	*flow_key = key;
6862 
6863 	return 1;
6864 
6865 }
6866 
niu_get_hash_opts(struct net_device * dev,struct ethtool_rxnfc * cmd)6867 static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
6868 {
6869 	struct niu *np = netdev_priv(dev);
6870 	u64 class;
6871 
6872 	cmd->data = 0;
6873 
6874 	if (!niu_ethflow_to_class(cmd->flow_type, &class))
6875 		return -EINVAL;
6876 
6877 	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6878 	    TCAM_KEY_DISC)
6879 		cmd->data = RXH_DISCARD;
6880 	else
6881 
6882 		cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
6883 						      CLASS_CODE_USER_PROG1]);
6884 	return 0;
6885 }
6886 
niu_set_hash_opts(struct net_device * dev,struct ethtool_rxnfc * cmd)6887 static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
6888 {
6889 	struct niu *np = netdev_priv(dev);
6890 	u64 class;
6891 	u64 flow_key = 0;
6892 	unsigned long flags;
6893 
6894 	if (!niu_ethflow_to_class(cmd->flow_type, &class))
6895 		return -EINVAL;
6896 
6897 	if (class < CLASS_CODE_USER_PROG1 ||
6898 	    class > CLASS_CODE_SCTP_IPV6)
6899 		return -EINVAL;
6900 
6901 	if (cmd->data & RXH_DISCARD) {
6902 		niu_lock_parent(np, flags);
6903 		flow_key = np->parent->tcam_key[class -
6904 					       CLASS_CODE_USER_PROG1];
6905 		flow_key |= TCAM_KEY_DISC;
6906 		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
6907 		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
6908 		niu_unlock_parent(np, flags);
6909 		return 0;
6910 	} else {
6911 		/* Discard was set before, but is not set now */
6912 		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6913 		    TCAM_KEY_DISC) {
6914 			niu_lock_parent(np, flags);
6915 			flow_key = np->parent->tcam_key[class -
6916 					       CLASS_CODE_USER_PROG1];
6917 			flow_key &= ~TCAM_KEY_DISC;
6918 			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
6919 			     flow_key);
6920 			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
6921 				flow_key;
6922 			niu_unlock_parent(np, flags);
6923 		}
6924 	}
6925 
6926 	if (!niu_ethflow_to_flowkey(cmd->data, &flow_key))
6927 		return -EINVAL;
6928 
6929 	niu_lock_parent(np, flags);
6930 	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
6931 	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
6932 	niu_unlock_parent(np, flags);
6933 
6934 	return 0;
6935 }
6936 
6937 static const struct {
6938 	const char string[ETH_GSTRING_LEN];
6939 } niu_xmac_stat_keys[] = {
6940 	{ "tx_frames" },
6941 	{ "tx_bytes" },
6942 	{ "tx_fifo_errors" },
6943 	{ "tx_overflow_errors" },
6944 	{ "tx_max_pkt_size_errors" },
6945 	{ "tx_underflow_errors" },
6946 	{ "rx_local_faults" },
6947 	{ "rx_remote_faults" },
6948 	{ "rx_link_faults" },
6949 	{ "rx_align_errors" },
6950 	{ "rx_frags" },
6951 	{ "rx_mcasts" },
6952 	{ "rx_bcasts" },
6953 	{ "rx_hist_cnt1" },
6954 	{ "rx_hist_cnt2" },
6955 	{ "rx_hist_cnt3" },
6956 	{ "rx_hist_cnt4" },
6957 	{ "rx_hist_cnt5" },
6958 	{ "rx_hist_cnt6" },
6959 	{ "rx_hist_cnt7" },
6960 	{ "rx_octets" },
6961 	{ "rx_code_violations" },
6962 	{ "rx_len_errors" },
6963 	{ "rx_crc_errors" },
6964 	{ "rx_underflows" },
6965 	{ "rx_overflows" },
6966 	{ "pause_off_state" },
6967 	{ "pause_on_state" },
6968 	{ "pause_received" },
6969 };
6970 
6971 #define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
6972 
6973 static const struct {
6974 	const char string[ETH_GSTRING_LEN];
6975 } niu_bmac_stat_keys[] = {
6976 	{ "tx_underflow_errors" },
6977 	{ "tx_max_pkt_size_errors" },
6978 	{ "tx_bytes" },
6979 	{ "tx_frames" },
6980 	{ "rx_overflows" },
6981 	{ "rx_frames" },
6982 	{ "rx_align_errors" },
6983 	{ "rx_crc_errors" },
6984 	{ "rx_len_errors" },
6985 	{ "pause_off_state" },
6986 	{ "pause_on_state" },
6987 	{ "pause_received" },
6988 };
6989 
6990 #define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
6991 
6992 static const struct {
6993 	const char string[ETH_GSTRING_LEN];
6994 } niu_rxchan_stat_keys[] = {
6995 	{ "rx_channel" },
6996 	{ "rx_packets" },
6997 	{ "rx_bytes" },
6998 	{ "rx_dropped" },
6999 	{ "rx_errors" },
7000 };
7001 
7002 #define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
7003 
7004 static const struct {
7005 	const char string[ETH_GSTRING_LEN];
7006 } niu_txchan_stat_keys[] = {
7007 	{ "tx_channel" },
7008 	{ "tx_packets" },
7009 	{ "tx_bytes" },
7010 	{ "tx_errors" },
7011 };
7012 
7013 #define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
7014 
niu_get_strings(struct net_device * dev,u32 stringset,u8 * data)7015 static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7016 {
7017 	struct niu *np = netdev_priv(dev);
7018 	int i;
7019 
7020 	if (stringset != ETH_SS_STATS)
7021 		return;
7022 
7023 	if (np->flags & NIU_FLAGS_XMAC) {
7024 		memcpy(data, niu_xmac_stat_keys,
7025 		       sizeof(niu_xmac_stat_keys));
7026 		data += sizeof(niu_xmac_stat_keys);
7027 	} else {
7028 		memcpy(data, niu_bmac_stat_keys,
7029 		       sizeof(niu_bmac_stat_keys));
7030 		data += sizeof(niu_bmac_stat_keys);
7031 	}
7032 	for (i = 0; i < np->num_rx_rings; i++) {
7033 		memcpy(data, niu_rxchan_stat_keys,
7034 		       sizeof(niu_rxchan_stat_keys));
7035 		data += sizeof(niu_rxchan_stat_keys);
7036 	}
7037 	for (i = 0; i < np->num_tx_rings; i++) {
7038 		memcpy(data, niu_txchan_stat_keys,
7039 		       sizeof(niu_txchan_stat_keys));
7040 		data += sizeof(niu_txchan_stat_keys);
7041 	}
7042 }
7043 
niu_get_stats_count(struct net_device * dev)7044 static int niu_get_stats_count(struct net_device *dev)
7045 {
7046 	struct niu *np = netdev_priv(dev);
7047 
7048 	return ((np->flags & NIU_FLAGS_XMAC ?
7049 		 NUM_XMAC_STAT_KEYS :
7050 		 NUM_BMAC_STAT_KEYS) +
7051 		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7052 		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS));
7053 }
7054 
niu_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)7055 static void niu_get_ethtool_stats(struct net_device *dev,
7056 				  struct ethtool_stats *stats, u64 *data)
7057 {
7058 	struct niu *np = netdev_priv(dev);
7059 	int i;
7060 
7061 	niu_sync_mac_stats(np);
7062 	if (np->flags & NIU_FLAGS_XMAC) {
7063 		memcpy(data, &np->mac_stats.xmac,
7064 		       sizeof(struct niu_xmac_stats));
7065 		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
7066 	} else {
7067 		memcpy(data, &np->mac_stats.bmac,
7068 		       sizeof(struct niu_bmac_stats));
7069 		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
7070 	}
7071 	for (i = 0; i < np->num_rx_rings; i++) {
7072 		struct rx_ring_info *rp = &np->rx_rings[i];
7073 
7074 		niu_sync_rx_discard_stats(np, rp, 0);
7075 
7076 		data[0] = rp->rx_channel;
7077 		data[1] = rp->rx_packets;
7078 		data[2] = rp->rx_bytes;
7079 		data[3] = rp->rx_dropped;
7080 		data[4] = rp->rx_errors;
7081 		data += 5;
7082 	}
7083 	for (i = 0; i < np->num_tx_rings; i++) {
7084 		struct tx_ring_info *rp = &np->tx_rings[i];
7085 
7086 		data[0] = rp->tx_channel;
7087 		data[1] = rp->tx_packets;
7088 		data[2] = rp->tx_bytes;
7089 		data[3] = rp->tx_errors;
7090 		data += 4;
7091 	}
7092 }
7093 
niu_led_state_save(struct niu * np)7094 static u64 niu_led_state_save(struct niu *np)
7095 {
7096 	if (np->flags & NIU_FLAGS_XMAC)
7097 		return nr64_mac(XMAC_CONFIG);
7098 	else
7099 		return nr64_mac(BMAC_XIF_CONFIG);
7100 }
7101 
niu_led_state_restore(struct niu * np,u64 val)7102 static void niu_led_state_restore(struct niu *np, u64 val)
7103 {
7104 	if (np->flags & NIU_FLAGS_XMAC)
7105 		nw64_mac(XMAC_CONFIG, val);
7106 	else
7107 		nw64_mac(BMAC_XIF_CONFIG, val);
7108 }
7109 
niu_force_led(struct niu * np,int on)7110 static void niu_force_led(struct niu *np, int on)
7111 {
7112 	u64 val, reg, bit;
7113 
7114 	if (np->flags & NIU_FLAGS_XMAC) {
7115 		reg = XMAC_CONFIG;
7116 		bit = XMAC_CONFIG_FORCE_LED_ON;
7117 	} else {
7118 		reg = BMAC_XIF_CONFIG;
7119 		bit = BMAC_XIF_CONFIG_LINK_LED;
7120 	}
7121 
7122 	val = nr64_mac(reg);
7123 	if (on)
7124 		val |= bit;
7125 	else
7126 		val &= ~bit;
7127 	nw64_mac(reg, val);
7128 }
7129 
niu_phys_id(struct net_device * dev,u32 data)7130 static int niu_phys_id(struct net_device *dev, u32 data)
7131 {
7132 	struct niu *np = netdev_priv(dev);
7133 	u64 orig_led_state;
7134 	int i;
7135 
7136 	if (!netif_running(dev))
7137 		return -EAGAIN;
7138 
7139 	if (data == 0)
7140 		data = 2;
7141 
7142 	orig_led_state = niu_led_state_save(np);
7143 	for (i = 0; i < (data * 2); i++) {
7144 		int on = ((i % 2) == 0);
7145 
7146 		niu_force_led(np, on);
7147 
7148 		if (msleep_interruptible(500))
7149 			break;
7150 	}
7151 	niu_led_state_restore(np, orig_led_state);
7152 
7153 	return 0;
7154 }
7155 
7156 static const struct ethtool_ops niu_ethtool_ops = {
7157 	.get_drvinfo		= niu_get_drvinfo,
7158 	.get_link		= ethtool_op_get_link,
7159 	.get_msglevel		= niu_get_msglevel,
7160 	.set_msglevel		= niu_set_msglevel,
7161 	.get_eeprom_len		= niu_get_eeprom_len,
7162 	.get_eeprom		= niu_get_eeprom,
7163 	.get_settings		= niu_get_settings,
7164 	.set_settings		= niu_set_settings,
7165 	.get_strings		= niu_get_strings,
7166 	.get_stats_count	= niu_get_stats_count,
7167 	.get_ethtool_stats	= niu_get_ethtool_stats,
7168 	.phys_id		= niu_phys_id,
7169 	.get_rxhash		= niu_get_hash_opts,
7170 	.set_rxhash		= niu_set_hash_opts,
7171 };
7172 
niu_ldg_assign_ldn(struct niu * np,struct niu_parent * parent,int ldg,int ldn)7173 static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7174 			      int ldg, int ldn)
7175 {
7176 	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
7177 		return -EINVAL;
7178 	if (ldn < 0 || ldn > LDN_MAX)
7179 		return -EINVAL;
7180 
7181 	parent->ldg_map[ldn] = ldg;
7182 
7183 	if (np->parent->plat_type == PLAT_TYPE_NIU) {
7184 		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7185 		 * the firmware, and we're not supposed to change them.
7186 		 * Validate the mapping, because if it's wrong we probably
7187 		 * won't get any interrupts and that's painful to debug.
7188 		 */
7189 		if (nr64(LDG_NUM(ldn)) != ldg) {
7190 			dev_err(np->device, PFX "Port %u, mis-matched "
7191 				"LDG assignment "
7192 				"for ldn %d, should be %d is %llu\n",
7193 				np->port, ldn, ldg,
7194 				(unsigned long long) nr64(LDG_NUM(ldn)));
7195 			return -EINVAL;
7196 		}
7197 	} else
7198 		nw64(LDG_NUM(ldn), ldg);
7199 
7200 	return 0;
7201 }
7202 
niu_set_ldg_timer_res(struct niu * np,int res)7203 static int niu_set_ldg_timer_res(struct niu *np, int res)
7204 {
7205 	if (res < 0 || res > LDG_TIMER_RES_VAL)
7206 		return -EINVAL;
7207 
7208 
7209 	nw64(LDG_TIMER_RES, res);
7210 
7211 	return 0;
7212 }
7213 
niu_set_ldg_sid(struct niu * np,int ldg,int func,int vector)7214 static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7215 {
7216 	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7217 	    (func < 0 || func > 3) ||
7218 	    (vector < 0 || vector > 0x1f))
7219 		return -EINVAL;
7220 
7221 	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
7222 
7223 	return 0;
7224 }
7225 
niu_pci_eeprom_read(struct niu * np,u32 addr)7226 static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
7227 {
7228 	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7229 				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
7230 	int limit;
7231 
7232 	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
7233 		return -EINVAL;
7234 
7235 	frame = frame_base;
7236 	nw64(ESPC_PIO_STAT, frame);
7237 	limit = 64;
7238 	do {
7239 		udelay(5);
7240 		frame = nr64(ESPC_PIO_STAT);
7241 		if (frame & ESPC_PIO_STAT_READ_END)
7242 			break;
7243 	} while (limit--);
7244 	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7245 		dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
7246 			(unsigned long long) frame);
7247 		return -ENODEV;
7248 	}
7249 
7250 	frame = frame_base;
7251 	nw64(ESPC_PIO_STAT, frame);
7252 	limit = 64;
7253 	do {
7254 		udelay(5);
7255 		frame = nr64(ESPC_PIO_STAT);
7256 		if (frame & ESPC_PIO_STAT_READ_END)
7257 			break;
7258 	} while (limit--);
7259 	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7260 		dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
7261 			(unsigned long long) frame);
7262 		return -ENODEV;
7263 	}
7264 
7265 	frame = nr64(ESPC_PIO_STAT);
7266 	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
7267 }
7268 
niu_pci_eeprom_read16(struct niu * np,u32 off)7269 static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
7270 {
7271 	int err = niu_pci_eeprom_read(np, off);
7272 	u16 val;
7273 
7274 	if (err < 0)
7275 		return err;
7276 	val = (err << 8);
7277 	err = niu_pci_eeprom_read(np, off + 1);
7278 	if (err < 0)
7279 		return err;
7280 	val |= (err & 0xff);
7281 
7282 	return val;
7283 }
7284 
niu_pci_eeprom_read16_swp(struct niu * np,u32 off)7285 static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
7286 {
7287 	int err = niu_pci_eeprom_read(np, off);
7288 	u16 val;
7289 
7290 	if (err < 0)
7291 		return err;
7292 
7293 	val = (err & 0xff);
7294 	err = niu_pci_eeprom_read(np, off + 1);
7295 	if (err < 0)
7296 		return err;
7297 
7298 	val |= (err & 0xff) << 8;
7299 
7300 	return val;
7301 }
7302 
niu_pci_vpd_get_propname(struct niu * np,u32 off,char * namebuf,int namebuf_len)7303 static int __devinit niu_pci_vpd_get_propname(struct niu *np,
7304 					      u32 off,
7305 					      char *namebuf,
7306 					      int namebuf_len)
7307 {
7308 	int i;
7309 
7310 	for (i = 0; i < namebuf_len; i++) {
7311 		int err = niu_pci_eeprom_read(np, off + i);
7312 		if (err < 0)
7313 			return err;
7314 		*namebuf++ = err;
7315 		if (!err)
7316 			break;
7317 	}
7318 	if (i >= namebuf_len)
7319 		return -EINVAL;
7320 
7321 	return i + 1;
7322 }
7323 
niu_vpd_parse_version(struct niu * np)7324 static void __devinit niu_vpd_parse_version(struct niu *np)
7325 {
7326 	struct niu_vpd *vpd = &np->vpd;
7327 	int len = strlen(vpd->version) + 1;
7328 	const char *s = vpd->version;
7329 	int i;
7330 
7331 	for (i = 0; i < len - 5; i++) {
7332 		if (!strncmp(s + i, "FCode ", 5))
7333 			break;
7334 	}
7335 	if (i >= len - 5)
7336 		return;
7337 
7338 	s += i + 5;
7339 	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
7340 
7341 	niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
7342 	       vpd->fcode_major, vpd->fcode_minor);
7343 	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
7344 	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
7345 	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
7346 		np->flags |= NIU_FLAGS_VPD_VALID;
7347 }
7348 
7349 /* ESPC_PIO_EN_ENABLE must be set */
niu_pci_vpd_scan_props(struct niu * np,u32 start,u32 end)7350 static int __devinit niu_pci_vpd_scan_props(struct niu *np,
7351 					    u32 start, u32 end)
7352 {
7353 	unsigned int found_mask = 0;
7354 #define FOUND_MASK_MODEL	0x00000001
7355 #define FOUND_MASK_BMODEL	0x00000002
7356 #define FOUND_MASK_VERS		0x00000004
7357 #define FOUND_MASK_MAC		0x00000008
7358 #define FOUND_MASK_NMAC		0x00000010
7359 #define FOUND_MASK_PHY		0x00000020
7360 #define FOUND_MASK_ALL		0x0000003f
7361 
7362 	niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
7363 	       start, end);
7364 	while (start < end) {
7365 		int len, err, instance, type, prop_len;
7366 		char namebuf[64];
7367 		u8 *prop_buf;
7368 		int max_len;
7369 
7370 		if (found_mask == FOUND_MASK_ALL) {
7371 			niu_vpd_parse_version(np);
7372 			return 1;
7373 		}
7374 
7375 		err = niu_pci_eeprom_read(np, start + 2);
7376 		if (err < 0)
7377 			return err;
7378 		len = err;
7379 		start += 3;
7380 
7381 		instance = niu_pci_eeprom_read(np, start);
7382 		type = niu_pci_eeprom_read(np, start + 3);
7383 		prop_len = niu_pci_eeprom_read(np, start + 4);
7384 		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
7385 		if (err < 0)
7386 			return err;
7387 
7388 		prop_buf = NULL;
7389 		max_len = 0;
7390 		if (!strcmp(namebuf, "model")) {
7391 			prop_buf = np->vpd.model;
7392 			max_len = NIU_VPD_MODEL_MAX;
7393 			found_mask |= FOUND_MASK_MODEL;
7394 		} else if (!strcmp(namebuf, "board-model")) {
7395 			prop_buf = np->vpd.board_model;
7396 			max_len = NIU_VPD_BD_MODEL_MAX;
7397 			found_mask |= FOUND_MASK_BMODEL;
7398 		} else if (!strcmp(namebuf, "version")) {
7399 			prop_buf = np->vpd.version;
7400 			max_len = NIU_VPD_VERSION_MAX;
7401 			found_mask |= FOUND_MASK_VERS;
7402 		} else if (!strcmp(namebuf, "local-mac-address")) {
7403 			prop_buf = np->vpd.local_mac;
7404 			max_len = ETH_ALEN;
7405 			found_mask |= FOUND_MASK_MAC;
7406 		} else if (!strcmp(namebuf, "num-mac-addresses")) {
7407 			prop_buf = &np->vpd.mac_num;
7408 			max_len = 1;
7409 			found_mask |= FOUND_MASK_NMAC;
7410 		} else if (!strcmp(namebuf, "phy-type")) {
7411 			prop_buf = np->vpd.phy_type;
7412 			max_len = NIU_VPD_PHY_TYPE_MAX;
7413 			found_mask |= FOUND_MASK_PHY;
7414 		}
7415 
7416 		if (max_len && prop_len > max_len) {
7417 			dev_err(np->device, PFX "Property '%s' length (%d) is "
7418 				"too long.\n", namebuf, prop_len);
7419 			return -EINVAL;
7420 		}
7421 
7422 		if (prop_buf) {
7423 			u32 off = start + 5 + err;
7424 			int i;
7425 
7426 			niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
7427 			       "len[%d]\n", namebuf, prop_len);
7428 			for (i = 0; i < prop_len; i++)
7429 				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
7430 		}
7431 
7432 		start += len;
7433 	}
7434 
7435 	return 0;
7436 }
7437 
7438 /* ESPC_PIO_EN_ENABLE must be set */
niu_pci_vpd_fetch(struct niu * np,u32 start)7439 static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
7440 {
7441 	u32 offset;
7442 	int err;
7443 
7444 	err = niu_pci_eeprom_read16_swp(np, start + 1);
7445 	if (err < 0)
7446 		return;
7447 
7448 	offset = err + 3;
7449 
7450 	while (start + offset < ESPC_EEPROM_SIZE) {
7451 		u32 here = start + offset;
7452 		u32 end;
7453 
7454 		err = niu_pci_eeprom_read(np, here);
7455 		if (err != 0x90)
7456 			return;
7457 
7458 		err = niu_pci_eeprom_read16_swp(np, here + 1);
7459 		if (err < 0)
7460 			return;
7461 
7462 		here = start + offset + 3;
7463 		end = start + offset + err;
7464 
7465 		offset += err;
7466 
7467 		err = niu_pci_vpd_scan_props(np, here, end);
7468 		if (err < 0 || err == 1)
7469 			return;
7470 	}
7471 }
7472 
7473 /* ESPC_PIO_EN_ENABLE must be set */
niu_pci_vpd_offset(struct niu * np)7474 static u32 __devinit niu_pci_vpd_offset(struct niu *np)
7475 {
7476 	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
7477 	int err;
7478 
7479 	while (start < end) {
7480 		ret = start;
7481 
7482 		/* ROM header signature?  */
7483 		err = niu_pci_eeprom_read16(np, start +  0);
7484 		if (err != 0x55aa)
7485 			return 0;
7486 
7487 		/* Apply offset to PCI data structure.  */
7488 		err = niu_pci_eeprom_read16(np, start + 23);
7489 		if (err < 0)
7490 			return 0;
7491 		start += err;
7492 
7493 		/* Check for "PCIR" signature.  */
7494 		err = niu_pci_eeprom_read16(np, start +  0);
7495 		if (err != 0x5043)
7496 			return 0;
7497 		err = niu_pci_eeprom_read16(np, start +  2);
7498 		if (err != 0x4952)
7499 			return 0;
7500 
7501 		/* Check for OBP image type.  */
7502 		err = niu_pci_eeprom_read(np, start + 20);
7503 		if (err < 0)
7504 			return 0;
7505 		if (err != 0x01) {
7506 			err = niu_pci_eeprom_read(np, ret + 2);
7507 			if (err < 0)
7508 				return 0;
7509 
7510 			start = ret + (err * 512);
7511 			continue;
7512 		}
7513 
7514 		err = niu_pci_eeprom_read16_swp(np, start + 8);
7515 		if (err < 0)
7516 			return err;
7517 		ret += err;
7518 
7519 		err = niu_pci_eeprom_read(np, ret + 0);
7520 		if (err != 0x82)
7521 			return 0;
7522 
7523 		return ret;
7524 	}
7525 
7526 	return 0;
7527 }
7528 
niu_phy_type_prop_decode(struct niu * np,const char * phy_prop)7529 static int __devinit niu_phy_type_prop_decode(struct niu *np,
7530 					      const char *phy_prop)
7531 {
7532 	if (!strcmp(phy_prop, "mif")) {
7533 		/* 1G copper, MII */
7534 		np->flags &= ~(NIU_FLAGS_FIBER |
7535 			       NIU_FLAGS_10G);
7536 		np->mac_xcvr = MAC_XCVR_MII;
7537 	} else if (!strcmp(phy_prop, "xgf")) {
7538 		/* 10G fiber, XPCS */
7539 		np->flags |= (NIU_FLAGS_10G |
7540 			      NIU_FLAGS_FIBER);
7541 		np->mac_xcvr = MAC_XCVR_XPCS;
7542 	} else if (!strcmp(phy_prop, "pcs")) {
7543 		/* 1G fiber, PCS */
7544 		np->flags &= ~NIU_FLAGS_10G;
7545 		np->flags |= NIU_FLAGS_FIBER;
7546 		np->mac_xcvr = MAC_XCVR_PCS;
7547 	} else if (!strcmp(phy_prop, "xgc")) {
7548 		/* 10G copper, XPCS */
7549 		np->flags |= NIU_FLAGS_10G;
7550 		np->flags &= ~NIU_FLAGS_FIBER;
7551 		np->mac_xcvr = MAC_XCVR_XPCS;
7552 	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
7553 		/* 10G Serdes or 1G Serdes, default to 10G */
7554 		np->flags |= NIU_FLAGS_10G;
7555 		np->flags &= ~NIU_FLAGS_FIBER;
7556 		np->flags |= NIU_FLAGS_XCVR_SERDES;
7557 		np->mac_xcvr = MAC_XCVR_XPCS;
7558 	} else {
7559 		return -EINVAL;
7560 	}
7561 	return 0;
7562 }
7563 
niu_pci_vpd_get_nports(struct niu * np)7564 static int niu_pci_vpd_get_nports(struct niu *np)
7565 {
7566 	int ports = 0;
7567 
7568 	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
7569 	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
7570 	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
7571 	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
7572 	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
7573 		ports = 4;
7574 	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
7575 		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
7576 		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
7577 		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
7578 		ports = 2;
7579 	}
7580 
7581 	return ports;
7582 }
7583 
niu_pci_vpd_validate(struct niu * np)7584 static void __devinit niu_pci_vpd_validate(struct niu *np)
7585 {
7586 	struct net_device *dev = np->dev;
7587 	struct niu_vpd *vpd = &np->vpd;
7588 	u8 val8;
7589 
7590 	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
7591 		dev_err(np->device, PFX "VPD MAC invalid, "
7592 			"falling back to SPROM.\n");
7593 
7594 		np->flags &= ~NIU_FLAGS_VPD_VALID;
7595 		return;
7596 	}
7597 
7598 	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
7599 	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
7600 		np->flags |= NIU_FLAGS_10G;
7601 		np->flags &= ~NIU_FLAGS_FIBER;
7602 		np->flags |= NIU_FLAGS_XCVR_SERDES;
7603 		np->mac_xcvr = MAC_XCVR_PCS;
7604 		if (np->port > 1) {
7605 			np->flags |= NIU_FLAGS_FIBER;
7606 			np->flags &= ~NIU_FLAGS_10G;
7607 		}
7608 		if (np->flags & NIU_FLAGS_10G)
7609 			 np->mac_xcvr = MAC_XCVR_XPCS;
7610 	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
7611 		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
7612 			      NIU_FLAGS_HOTPLUG_PHY);
7613 	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
7614 		dev_err(np->device, PFX "Illegal phy string [%s].\n",
7615 			np->vpd.phy_type);
7616 		dev_err(np->device, PFX "Falling back to SPROM.\n");
7617 		np->flags &= ~NIU_FLAGS_VPD_VALID;
7618 		return;
7619 	}
7620 
7621 	memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
7622 
7623 	val8 = dev->perm_addr[5];
7624 	dev->perm_addr[5] += np->port;
7625 	if (dev->perm_addr[5] < val8)
7626 		dev->perm_addr[4]++;
7627 
7628 	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7629 }
7630 
niu_pci_probe_sprom(struct niu * np)7631 static int __devinit niu_pci_probe_sprom(struct niu *np)
7632 {
7633 	struct net_device *dev = np->dev;
7634 	int len, i;
7635 	u64 val, sum;
7636 	u8 val8;
7637 
7638 	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
7639 	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
7640 	len = val / 4;
7641 
7642 	np->eeprom_len = len;
7643 
7644 	niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
7645 
7646 	sum = 0;
7647 	for (i = 0; i < len; i++) {
7648 		val = nr64(ESPC_NCR(i));
7649 		sum += (val >>  0) & 0xff;
7650 		sum += (val >>  8) & 0xff;
7651 		sum += (val >> 16) & 0xff;
7652 		sum += (val >> 24) & 0xff;
7653 	}
7654 	niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
7655 	if ((sum & 0xff) != 0xab) {
7656 		dev_err(np->device, PFX "Bad SPROM checksum "
7657 			"(%x, should be 0xab)\n", (int) (sum & 0xff));
7658 		return -EINVAL;
7659 	}
7660 
7661 	val = nr64(ESPC_PHY_TYPE);
7662 	switch (np->port) {
7663 	case 0:
7664 		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
7665 			ESPC_PHY_TYPE_PORT0_SHIFT;
7666 		break;
7667 	case 1:
7668 		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
7669 			ESPC_PHY_TYPE_PORT1_SHIFT;
7670 		break;
7671 	case 2:
7672 		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
7673 			ESPC_PHY_TYPE_PORT2_SHIFT;
7674 		break;
7675 	case 3:
7676 		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
7677 			ESPC_PHY_TYPE_PORT3_SHIFT;
7678 		break;
7679 	default:
7680 		dev_err(np->device, PFX "Bogus port number %u\n",
7681 			np->port);
7682 		return -EINVAL;
7683 	}
7684 	niudbg(PROBE, "SPROM: PHY type %x\n", val8);
7685 
7686 	switch (val8) {
7687 	case ESPC_PHY_TYPE_1G_COPPER:
7688 		/* 1G copper, MII */
7689 		np->flags &= ~(NIU_FLAGS_FIBER |
7690 			       NIU_FLAGS_10G);
7691 		np->mac_xcvr = MAC_XCVR_MII;
7692 		break;
7693 
7694 	case ESPC_PHY_TYPE_1G_FIBER:
7695 		/* 1G fiber, PCS */
7696 		np->flags &= ~NIU_FLAGS_10G;
7697 		np->flags |= NIU_FLAGS_FIBER;
7698 		np->mac_xcvr = MAC_XCVR_PCS;
7699 		break;
7700 
7701 	case ESPC_PHY_TYPE_10G_COPPER:
7702 		/* 10G copper, XPCS */
7703 		np->flags |= NIU_FLAGS_10G;
7704 		np->flags &= ~NIU_FLAGS_FIBER;
7705 		np->mac_xcvr = MAC_XCVR_XPCS;
7706 		break;
7707 
7708 	case ESPC_PHY_TYPE_10G_FIBER:
7709 		/* 10G fiber, XPCS */
7710 		np->flags |= (NIU_FLAGS_10G |
7711 			      NIU_FLAGS_FIBER);
7712 		np->mac_xcvr = MAC_XCVR_XPCS;
7713 		break;
7714 
7715 	default:
7716 		dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
7717 		return -EINVAL;
7718 	}
7719 
7720 	val = nr64(ESPC_MAC_ADDR0);
7721 	niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
7722 	       (unsigned long long) val);
7723 	dev->perm_addr[0] = (val >>  0) & 0xff;
7724 	dev->perm_addr[1] = (val >>  8) & 0xff;
7725 	dev->perm_addr[2] = (val >> 16) & 0xff;
7726 	dev->perm_addr[3] = (val >> 24) & 0xff;
7727 
7728 	val = nr64(ESPC_MAC_ADDR1);
7729 	niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
7730 	       (unsigned long long) val);
7731 	dev->perm_addr[4] = (val >>  0) & 0xff;
7732 	dev->perm_addr[5] = (val >>  8) & 0xff;
7733 
7734 	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
7735 		dev_err(np->device, PFX "SPROM MAC address invalid\n");
7736 		dev_err(np->device, PFX "[ \n");
7737 		for (i = 0; i < 6; i++)
7738 			printk("%02x ", dev->perm_addr[i]);
7739 		printk("]\n");
7740 		return -EINVAL;
7741 	}
7742 
7743 	val8 = dev->perm_addr[5];
7744 	dev->perm_addr[5] += np->port;
7745 	if (dev->perm_addr[5] < val8)
7746 		dev->perm_addr[4]++;
7747 
7748 	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
7749 
7750 	val = nr64(ESPC_MOD_STR_LEN);
7751 	niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
7752 	       (unsigned long long) val);
7753 	if (val >= 8 * 4)
7754 		return -EINVAL;
7755 
7756 	for (i = 0; i < val; i += 4) {
7757 		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
7758 
7759 		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
7760 		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
7761 		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
7762 		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
7763 	}
7764 	np->vpd.model[val] = '\0';
7765 
7766 	val = nr64(ESPC_BD_MOD_STR_LEN);
7767 	niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
7768 	       (unsigned long long) val);
7769 	if (val >= 4 * 4)
7770 		return -EINVAL;
7771 
7772 	for (i = 0; i < val; i += 4) {
7773 		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
7774 
7775 		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
7776 		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
7777 		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
7778 		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
7779 	}
7780 	np->vpd.board_model[val] = '\0';
7781 
7782 	np->vpd.mac_num =
7783 		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
7784 	niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
7785 	       np->vpd.mac_num);
7786 
7787 	return 0;
7788 }
7789 
niu_get_and_validate_port(struct niu * np)7790 static int __devinit niu_get_and_validate_port(struct niu *np)
7791 {
7792 	struct niu_parent *parent = np->parent;
7793 
7794 	if (np->port <= 1)
7795 		np->flags |= NIU_FLAGS_XMAC;
7796 
7797 	if (!parent->num_ports) {
7798 		if (parent->plat_type == PLAT_TYPE_NIU) {
7799 			parent->num_ports = 2;
7800 		} else {
7801 			parent->num_ports = niu_pci_vpd_get_nports(np);
7802 			if (!parent->num_ports) {
7803 				/* Fall back to SPROM as last resort.
7804 				 * This will fail on most cards.
7805 				 */
7806 				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
7807 					ESPC_NUM_PORTS_MACS_VAL;
7808 
7809 				/* All of the current probing methods fail on
7810 				 * Maramba on-board parts.
7811 				 */
7812 				if (!parent->num_ports)
7813 					parent->num_ports = 4;
7814 			}
7815 		}
7816 	}
7817 
7818 	niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
7819 	       np->port, parent->num_ports);
7820 	if (np->port >= parent->num_ports)
7821 		return -ENODEV;
7822 
7823 	return 0;
7824 }
7825 
phy_record(struct niu_parent * parent,struct phy_probe_info * p,int dev_id_1,int dev_id_2,u8 phy_port,int type)7826 static int __devinit phy_record(struct niu_parent *parent,
7827 				struct phy_probe_info *p,
7828 				int dev_id_1, int dev_id_2, u8 phy_port,
7829 				int type)
7830 {
7831 	u32 id = (dev_id_1 << 16) | dev_id_2;
7832 	u8 idx;
7833 
7834 	if (dev_id_1 < 0 || dev_id_2 < 0)
7835 		return 0;
7836 	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
7837 		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
7838 		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
7839 		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
7840 			return 0;
7841 	} else {
7842 		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
7843 			return 0;
7844 	}
7845 
7846 	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
7847 		parent->index, id,
7848 		(type == PHY_TYPE_PMA_PMD ?
7849 		 "PMA/PMD" :
7850 		 (type == PHY_TYPE_PCS ?
7851 		  "PCS" : "MII")),
7852 		phy_port);
7853 
7854 	if (p->cur[type] >= NIU_MAX_PORTS) {
7855 		printk(KERN_ERR PFX "Too many PHY ports.\n");
7856 		return -EINVAL;
7857 	}
7858 	idx = p->cur[type];
7859 	p->phy_id[type][idx] = id;
7860 	p->phy_port[type][idx] = phy_port;
7861 	p->cur[type] = idx + 1;
7862 	return 0;
7863 }
7864 
port_has_10g(struct phy_probe_info * p,int port)7865 static int __devinit port_has_10g(struct phy_probe_info *p, int port)
7866 {
7867 	int i;
7868 
7869 	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
7870 		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
7871 			return 1;
7872 	}
7873 	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
7874 		if (p->phy_port[PHY_TYPE_PCS][i] == port)
7875 			return 1;
7876 	}
7877 
7878 	return 0;
7879 }
7880 
count_10g_ports(struct phy_probe_info * p,int * lowest)7881 static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
7882 {
7883 	int port, cnt;
7884 
7885 	cnt = 0;
7886 	*lowest = 32;
7887 	for (port = 8; port < 32; port++) {
7888 		if (port_has_10g(p, port)) {
7889 			if (!cnt)
7890 				*lowest = port;
7891 			cnt++;
7892 		}
7893 	}
7894 
7895 	return cnt;
7896 }
7897 
count_1g_ports(struct phy_probe_info * p,int * lowest)7898 static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
7899 {
7900 	*lowest = 32;
7901 	if (p->cur[PHY_TYPE_MII])
7902 		*lowest = p->phy_port[PHY_TYPE_MII][0];
7903 
7904 	return p->cur[PHY_TYPE_MII];
7905 }
7906 
niu_n2_divide_channels(struct niu_parent * parent)7907 static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
7908 {
7909 	int num_ports = parent->num_ports;
7910 	int i;
7911 
7912 	for (i = 0; i < num_ports; i++) {
7913 		parent->rxchan_per_port[i] = (16 / num_ports);
7914 		parent->txchan_per_port[i] = (16 / num_ports);
7915 
7916 		pr_info(PFX "niu%d: Port %u [%u RX chans] "
7917 			"[%u TX chans]\n",
7918 			parent->index, i,
7919 			parent->rxchan_per_port[i],
7920 			parent->txchan_per_port[i]);
7921 	}
7922 }
7923 
niu_divide_channels(struct niu_parent * parent,int num_10g,int num_1g)7924 static void __devinit niu_divide_channels(struct niu_parent *parent,
7925 					  int num_10g, int num_1g)
7926 {
7927 	int num_ports = parent->num_ports;
7928 	int rx_chans_per_10g, rx_chans_per_1g;
7929 	int tx_chans_per_10g, tx_chans_per_1g;
7930 	int i, tot_rx, tot_tx;
7931 
7932 	if (!num_10g || !num_1g) {
7933 		rx_chans_per_10g = rx_chans_per_1g =
7934 			(NIU_NUM_RXCHAN / num_ports);
7935 		tx_chans_per_10g = tx_chans_per_1g =
7936 			(NIU_NUM_TXCHAN / num_ports);
7937 	} else {
7938 		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
7939 		rx_chans_per_10g = (NIU_NUM_RXCHAN -
7940 				    (rx_chans_per_1g * num_1g)) /
7941 			num_10g;
7942 
7943 		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
7944 		tx_chans_per_10g = (NIU_NUM_TXCHAN -
7945 				    (tx_chans_per_1g * num_1g)) /
7946 			num_10g;
7947 	}
7948 
7949 	tot_rx = tot_tx = 0;
7950 	for (i = 0; i < num_ports; i++) {
7951 		int type = phy_decode(parent->port_phy, i);
7952 
7953 		if (type == PORT_TYPE_10G) {
7954 			parent->rxchan_per_port[i] = rx_chans_per_10g;
7955 			parent->txchan_per_port[i] = tx_chans_per_10g;
7956 		} else {
7957 			parent->rxchan_per_port[i] = rx_chans_per_1g;
7958 			parent->txchan_per_port[i] = tx_chans_per_1g;
7959 		}
7960 		pr_info(PFX "niu%d: Port %u [%u RX chans] "
7961 			"[%u TX chans]\n",
7962 			parent->index, i,
7963 			parent->rxchan_per_port[i],
7964 			parent->txchan_per_port[i]);
7965 		tot_rx += parent->rxchan_per_port[i];
7966 		tot_tx += parent->txchan_per_port[i];
7967 	}
7968 
7969 	if (tot_rx > NIU_NUM_RXCHAN) {
7970 		printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
7971 		       "resetting to one per port.\n",
7972 		       parent->index, tot_rx);
7973 		for (i = 0; i < num_ports; i++)
7974 			parent->rxchan_per_port[i] = 1;
7975 	}
7976 	if (tot_tx > NIU_NUM_TXCHAN) {
7977 		printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
7978 		       "resetting to one per port.\n",
7979 		       parent->index, tot_tx);
7980 		for (i = 0; i < num_ports; i++)
7981 			parent->txchan_per_port[i] = 1;
7982 	}
7983 	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
7984 		printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
7985 		       "RX[%d] TX[%d]\n",
7986 		       parent->index, tot_rx, tot_tx);
7987 	}
7988 }
7989 
niu_divide_rdc_groups(struct niu_parent * parent,int num_10g,int num_1g)7990 static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
7991 					    int num_10g, int num_1g)
7992 {
7993 	int i, num_ports = parent->num_ports;
7994 	int rdc_group, rdc_groups_per_port;
7995 	int rdc_channel_base;
7996 
7997 	rdc_group = 0;
7998 	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
7999 
8000 	rdc_channel_base = 0;
8001 
8002 	for (i = 0; i < num_ports; i++) {
8003 		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
8004 		int grp, num_channels = parent->rxchan_per_port[i];
8005 		int this_channel_offset;
8006 
8007 		tp->first_table_num = rdc_group;
8008 		tp->num_tables = rdc_groups_per_port;
8009 		this_channel_offset = 0;
8010 		for (grp = 0; grp < tp->num_tables; grp++) {
8011 			struct rdc_table *rt = &tp->tables[grp];
8012 			int slot;
8013 
8014 			pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
8015 				parent->index, i, tp->first_table_num + grp);
8016 			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
8017 				rt->rxdma_channel[slot] =
8018 					rdc_channel_base + this_channel_offset;
8019 
8020 				printk("%d ", rt->rxdma_channel[slot]);
8021 
8022 				if (++this_channel_offset == num_channels)
8023 					this_channel_offset = 0;
8024 			}
8025 			printk("]\n");
8026 		}
8027 
8028 		parent->rdc_default[i] = rdc_channel_base;
8029 
8030 		rdc_channel_base += num_channels;
8031 		rdc_group += rdc_groups_per_port;
8032 	}
8033 }
8034 
fill_phy_probe_info(struct niu * np,struct niu_parent * parent,struct phy_probe_info * info)8035 static int __devinit fill_phy_probe_info(struct niu *np,
8036 					 struct niu_parent *parent,
8037 					 struct phy_probe_info *info)
8038 {
8039 	unsigned long flags;
8040 	int port, err;
8041 
8042 	memset(info, 0, sizeof(*info));
8043 
8044 	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
8045 	niu_lock_parent(np, flags);
8046 	err = 0;
8047 	for (port = 8; port < 32; port++) {
8048 		int dev_id_1, dev_id_2;
8049 
8050 		dev_id_1 = mdio_read(np, port,
8051 				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
8052 		dev_id_2 = mdio_read(np, port,
8053 				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
8054 		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8055 				 PHY_TYPE_PMA_PMD);
8056 		if (err)
8057 			break;
8058 		dev_id_1 = mdio_read(np, port,
8059 				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
8060 		dev_id_2 = mdio_read(np, port,
8061 				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
8062 		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8063 				 PHY_TYPE_PCS);
8064 		if (err)
8065 			break;
8066 		dev_id_1 = mii_read(np, port, MII_PHYSID1);
8067 		dev_id_2 = mii_read(np, port, MII_PHYSID2);
8068 		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8069 				 PHY_TYPE_MII);
8070 		if (err)
8071 			break;
8072 	}
8073 	niu_unlock_parent(np, flags);
8074 
8075 	return err;
8076 }
8077 
walk_phys(struct niu * np,struct niu_parent * parent)8078 static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
8079 {
8080 	struct phy_probe_info *info = &parent->phy_probe_info;
8081 	int lowest_10g, lowest_1g;
8082 	int num_10g, num_1g;
8083 	u32 val;
8084 	int err;
8085 
8086 	num_10g = num_1g = 0;
8087 
8088 	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8089 	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8090 		num_10g = 0;
8091 		num_1g = 2;
8092 		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
8093 		parent->num_ports = 4;
8094 		val = (phy_encode(PORT_TYPE_1G, 0) |
8095 		       phy_encode(PORT_TYPE_1G, 1) |
8096 		       phy_encode(PORT_TYPE_1G, 2) |
8097 		       phy_encode(PORT_TYPE_1G, 3));
8098 	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8099 		num_10g = 2;
8100 		num_1g = 0;
8101 		parent->num_ports = 2;
8102 		val = (phy_encode(PORT_TYPE_10G, 0) |
8103 		       phy_encode(PORT_TYPE_10G, 1));
8104 	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8105 		   (parent->plat_type == PLAT_TYPE_NIU)) {
8106 		/* this is the Monza case */
8107 		if (np->flags & NIU_FLAGS_10G) {
8108 			val = (phy_encode(PORT_TYPE_10G, 0) |
8109 			       phy_encode(PORT_TYPE_10G, 1));
8110 		} else {
8111 			val = (phy_encode(PORT_TYPE_1G, 0) |
8112 			       phy_encode(PORT_TYPE_1G, 1));
8113 		}
8114 	} else {
8115 		err = fill_phy_probe_info(np, parent, info);
8116 		if (err)
8117 			return err;
8118 
8119 		num_10g = count_10g_ports(info, &lowest_10g);
8120 		num_1g = count_1g_ports(info, &lowest_1g);
8121 
8122 		switch ((num_10g << 4) | num_1g) {
8123 		case 0x24:
8124 			if (lowest_1g == 10)
8125 				parent->plat_type = PLAT_TYPE_VF_P0;
8126 			else if (lowest_1g == 26)
8127 				parent->plat_type = PLAT_TYPE_VF_P1;
8128 			else
8129 				goto unknown_vg_1g_port;
8130 
8131 			/* fallthru */
8132 		case 0x22:
8133 			val = (phy_encode(PORT_TYPE_10G, 0) |
8134 			       phy_encode(PORT_TYPE_10G, 1) |
8135 			       phy_encode(PORT_TYPE_1G, 2) |
8136 			       phy_encode(PORT_TYPE_1G, 3));
8137 			break;
8138 
8139 		case 0x20:
8140 			val = (phy_encode(PORT_TYPE_10G, 0) |
8141 			       phy_encode(PORT_TYPE_10G, 1));
8142 			break;
8143 
8144 		case 0x10:
8145 			val = phy_encode(PORT_TYPE_10G, np->port);
8146 			break;
8147 
8148 		case 0x14:
8149 			if (lowest_1g == 10)
8150 				parent->plat_type = PLAT_TYPE_VF_P0;
8151 			else if (lowest_1g == 26)
8152 				parent->plat_type = PLAT_TYPE_VF_P1;
8153 			else
8154 				goto unknown_vg_1g_port;
8155 
8156 			/* fallthru */
8157 		case 0x13:
8158 			if ((lowest_10g & 0x7) == 0)
8159 				val = (phy_encode(PORT_TYPE_10G, 0) |
8160 				       phy_encode(PORT_TYPE_1G, 1) |
8161 				       phy_encode(PORT_TYPE_1G, 2) |
8162 				       phy_encode(PORT_TYPE_1G, 3));
8163 			else
8164 				val = (phy_encode(PORT_TYPE_1G, 0) |
8165 				       phy_encode(PORT_TYPE_10G, 1) |
8166 				       phy_encode(PORT_TYPE_1G, 2) |
8167 				       phy_encode(PORT_TYPE_1G, 3));
8168 			break;
8169 
8170 		case 0x04:
8171 			if (lowest_1g == 10)
8172 				parent->plat_type = PLAT_TYPE_VF_P0;
8173 			else if (lowest_1g == 26)
8174 				parent->plat_type = PLAT_TYPE_VF_P1;
8175 			else
8176 				goto unknown_vg_1g_port;
8177 
8178 			val = (phy_encode(PORT_TYPE_1G, 0) |
8179 			       phy_encode(PORT_TYPE_1G, 1) |
8180 			       phy_encode(PORT_TYPE_1G, 2) |
8181 			       phy_encode(PORT_TYPE_1G, 3));
8182 			break;
8183 
8184 		default:
8185 			printk(KERN_ERR PFX "Unsupported port config "
8186 			       "10G[%d] 1G[%d]\n",
8187 			       num_10g, num_1g);
8188 			return -EINVAL;
8189 		}
8190 	}
8191 
8192 	parent->port_phy = val;
8193 
8194 	if (parent->plat_type == PLAT_TYPE_NIU)
8195 		niu_n2_divide_channels(parent);
8196 	else
8197 		niu_divide_channels(parent, num_10g, num_1g);
8198 
8199 	niu_divide_rdc_groups(parent, num_10g, num_1g);
8200 
8201 	return 0;
8202 
8203 unknown_vg_1g_port:
8204 	printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
8205 	       lowest_1g);
8206 	return -EINVAL;
8207 }
8208 
niu_probe_ports(struct niu * np)8209 static int __devinit niu_probe_ports(struct niu *np)
8210 {
8211 	struct niu_parent *parent = np->parent;
8212 	int err, i;
8213 
8214 	niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
8215 	       parent->port_phy);
8216 
8217 	if (parent->port_phy == PORT_PHY_UNKNOWN) {
8218 		err = walk_phys(np, parent);
8219 		if (err)
8220 			return err;
8221 
8222 		niu_set_ldg_timer_res(np, 2);
8223 		for (i = 0; i <= LDN_MAX; i++)
8224 			niu_ldn_irq_enable(np, i, 0);
8225 	}
8226 
8227 	if (parent->port_phy == PORT_PHY_INVALID)
8228 		return -EINVAL;
8229 
8230 	return 0;
8231 }
8232 
niu_classifier_swstate_init(struct niu * np)8233 static int __devinit niu_classifier_swstate_init(struct niu *np)
8234 {
8235 	struct niu_classifier *cp = &np->clas;
8236 
8237 	niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
8238 	       np->parent->tcam_num_entries);
8239 
8240 	cp->tcam_index = (u16) np->port;
8241 	cp->h1_init = 0xffffffff;
8242 	cp->h2_init = 0xffff;
8243 
8244 	return fflp_early_init(np);
8245 }
8246 
niu_link_config_init(struct niu * np)8247 static void __devinit niu_link_config_init(struct niu *np)
8248 {
8249 	struct niu_link_config *lp = &np->link_config;
8250 
8251 	lp->advertising = (ADVERTISED_10baseT_Half |
8252 			   ADVERTISED_10baseT_Full |
8253 			   ADVERTISED_100baseT_Half |
8254 			   ADVERTISED_100baseT_Full |
8255 			   ADVERTISED_1000baseT_Half |
8256 			   ADVERTISED_1000baseT_Full |
8257 			   ADVERTISED_10000baseT_Full |
8258 			   ADVERTISED_Autoneg);
8259 	lp->speed = lp->active_speed = SPEED_INVALID;
8260 	lp->duplex = lp->active_duplex = DUPLEX_INVALID;
8261 #if 0
8262 	lp->loopback_mode = LOOPBACK_MAC;
8263 	lp->active_speed = SPEED_10000;
8264 	lp->active_duplex = DUPLEX_FULL;
8265 #else
8266 	lp->loopback_mode = LOOPBACK_DISABLED;
8267 #endif
8268 }
8269 
niu_init_mac_ipp_pcs_base(struct niu * np)8270 static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
8271 {
8272 	switch (np->port) {
8273 	case 0:
8274 		np->mac_regs = np->regs + XMAC_PORT0_OFF;
8275 		np->ipp_off  = 0x00000;
8276 		np->pcs_off  = 0x04000;
8277 		np->xpcs_off = 0x02000;
8278 		break;
8279 
8280 	case 1:
8281 		np->mac_regs = np->regs + XMAC_PORT1_OFF;
8282 		np->ipp_off  = 0x08000;
8283 		np->pcs_off  = 0x0a000;
8284 		np->xpcs_off = 0x08000;
8285 		break;
8286 
8287 	case 2:
8288 		np->mac_regs = np->regs + BMAC_PORT2_OFF;
8289 		np->ipp_off  = 0x04000;
8290 		np->pcs_off  = 0x0e000;
8291 		np->xpcs_off = ~0UL;
8292 		break;
8293 
8294 	case 3:
8295 		np->mac_regs = np->regs + BMAC_PORT3_OFF;
8296 		np->ipp_off  = 0x0c000;
8297 		np->pcs_off  = 0x12000;
8298 		np->xpcs_off = ~0UL;
8299 		break;
8300 
8301 	default:
8302 		dev_err(np->device, PFX "Port %u is invalid, cannot "
8303 			"compute MAC block offset.\n", np->port);
8304 		return -EINVAL;
8305 	}
8306 
8307 	return 0;
8308 }
8309 
niu_try_msix(struct niu * np,u8 * ldg_num_map)8310 static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
8311 {
8312 	struct msix_entry msi_vec[NIU_NUM_LDG];
8313 	struct niu_parent *parent = np->parent;
8314 	struct pci_dev *pdev = np->pdev;
8315 	int i, num_irqs, err;
8316 	u8 first_ldg;
8317 
8318 	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
8319 	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
8320 		ldg_num_map[i] = first_ldg + i;
8321 
8322 	num_irqs = (parent->rxchan_per_port[np->port] +
8323 		    parent->txchan_per_port[np->port] +
8324 		    (np->port == 0 ? 3 : 1));
8325 	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
8326 
8327 retry:
8328 	for (i = 0; i < num_irqs; i++) {
8329 		msi_vec[i].vector = 0;
8330 		msi_vec[i].entry = i;
8331 	}
8332 
8333 	err = pci_enable_msix(pdev, msi_vec, num_irqs);
8334 	if (err < 0) {
8335 		np->flags &= ~NIU_FLAGS_MSIX;
8336 		return;
8337 	}
8338 	if (err > 0) {
8339 		num_irqs = err;
8340 		goto retry;
8341 	}
8342 
8343 	np->flags |= NIU_FLAGS_MSIX;
8344 	for (i = 0; i < num_irqs; i++)
8345 		np->ldg[i].irq = msi_vec[i].vector;
8346 	np->num_ldg = num_irqs;
8347 }
8348 
niu_n2_irq_init(struct niu * np,u8 * ldg_num_map)8349 static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
8350 {
8351 #ifdef CONFIG_SPARC64
8352 	struct of_device *op = np->op;
8353 	const u32 *int_prop;
8354 	int i;
8355 
8356 	int_prop = of_get_property(op->node, "interrupts", NULL);
8357 	if (!int_prop)
8358 		return -ENODEV;
8359 
8360 	for (i = 0; i < op->num_irqs; i++) {
8361 		ldg_num_map[i] = int_prop[i];
8362 		np->ldg[i].irq = op->irqs[i];
8363 	}
8364 
8365 	np->num_ldg = op->num_irqs;
8366 
8367 	return 0;
8368 #else
8369 	return -EINVAL;
8370 #endif
8371 }
8372 
niu_ldg_init(struct niu * np)8373 static int __devinit niu_ldg_init(struct niu *np)
8374 {
8375 	struct niu_parent *parent = np->parent;
8376 	u8 ldg_num_map[NIU_NUM_LDG];
8377 	int first_chan, num_chan;
8378 	int i, err, ldg_rotor;
8379 	u8 port;
8380 
8381 	np->num_ldg = 1;
8382 	np->ldg[0].irq = np->dev->irq;
8383 	if (parent->plat_type == PLAT_TYPE_NIU) {
8384 		err = niu_n2_irq_init(np, ldg_num_map);
8385 		if (err)
8386 			return err;
8387 	} else
8388 		niu_try_msix(np, ldg_num_map);
8389 
8390 	port = np->port;
8391 	for (i = 0; i < np->num_ldg; i++) {
8392 		struct niu_ldg *lp = &np->ldg[i];
8393 
8394 		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
8395 
8396 		lp->np = np;
8397 		lp->ldg_num = ldg_num_map[i];
8398 		lp->timer = 2; /* XXX */
8399 
8400 		/* On N2 NIU the firmware has setup the SID mappings so they go
8401 		 * to the correct values that will route the LDG to the proper
8402 		 * interrupt in the NCU interrupt table.
8403 		 */
8404 		if (np->parent->plat_type != PLAT_TYPE_NIU) {
8405 			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
8406 			if (err)
8407 				return err;
8408 		}
8409 	}
8410 
8411 	/* We adopt the LDG assignment ordering used by the N2 NIU
8412 	 * 'interrupt' properties because that simplifies a lot of
8413 	 * things.  This ordering is:
8414 	 *
8415 	 *	MAC
8416 	 *	MIF	(if port zero)
8417 	 *	SYSERR	(if port zero)
8418 	 *	RX channels
8419 	 *	TX channels
8420 	 */
8421 
8422 	ldg_rotor = 0;
8423 
8424 	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
8425 				  LDN_MAC(port));
8426 	if (err)
8427 		return err;
8428 
8429 	ldg_rotor++;
8430 	if (ldg_rotor == np->num_ldg)
8431 		ldg_rotor = 0;
8432 
8433 	if (port == 0) {
8434 		err = niu_ldg_assign_ldn(np, parent,
8435 					 ldg_num_map[ldg_rotor],
8436 					 LDN_MIF);
8437 		if (err)
8438 			return err;
8439 
8440 		ldg_rotor++;
8441 		if (ldg_rotor == np->num_ldg)
8442 			ldg_rotor = 0;
8443 
8444 		err = niu_ldg_assign_ldn(np, parent,
8445 					 ldg_num_map[ldg_rotor],
8446 					 LDN_DEVICE_ERROR);
8447 		if (err)
8448 			return err;
8449 
8450 		ldg_rotor++;
8451 		if (ldg_rotor == np->num_ldg)
8452 			ldg_rotor = 0;
8453 
8454 	}
8455 
8456 	first_chan = 0;
8457 	for (i = 0; i < port; i++)
8458 		first_chan += parent->rxchan_per_port[port];
8459 	num_chan = parent->rxchan_per_port[port];
8460 
8461 	for (i = first_chan; i < (first_chan + num_chan); i++) {
8462 		err = niu_ldg_assign_ldn(np, parent,
8463 					 ldg_num_map[ldg_rotor],
8464 					 LDN_RXDMA(i));
8465 		if (err)
8466 			return err;
8467 		ldg_rotor++;
8468 		if (ldg_rotor == np->num_ldg)
8469 			ldg_rotor = 0;
8470 	}
8471 
8472 	first_chan = 0;
8473 	for (i = 0; i < port; i++)
8474 		first_chan += parent->txchan_per_port[port];
8475 	num_chan = parent->txchan_per_port[port];
8476 	for (i = first_chan; i < (first_chan + num_chan); i++) {
8477 		err = niu_ldg_assign_ldn(np, parent,
8478 					 ldg_num_map[ldg_rotor],
8479 					 LDN_TXDMA(i));
8480 		if (err)
8481 			return err;
8482 		ldg_rotor++;
8483 		if (ldg_rotor == np->num_ldg)
8484 			ldg_rotor = 0;
8485 	}
8486 
8487 	return 0;
8488 }
8489 
niu_ldg_free(struct niu * np)8490 static void __devexit niu_ldg_free(struct niu *np)
8491 {
8492 	if (np->flags & NIU_FLAGS_MSIX)
8493 		pci_disable_msix(np->pdev);
8494 }
8495 
niu_get_of_props(struct niu * np)8496 static int __devinit niu_get_of_props(struct niu *np)
8497 {
8498 #ifdef CONFIG_SPARC64
8499 	struct net_device *dev = np->dev;
8500 	struct device_node *dp;
8501 	const char *phy_type;
8502 	const u8 *mac_addr;
8503 	const char *model;
8504 	int prop_len;
8505 
8506 	if (np->parent->plat_type == PLAT_TYPE_NIU)
8507 		dp = np->op->node;
8508 	else
8509 		dp = pci_device_to_OF_node(np->pdev);
8510 
8511 	phy_type = of_get_property(dp, "phy-type", &prop_len);
8512 	if (!phy_type) {
8513 		dev_err(np->device, PFX "%s: OF node lacks "
8514 			"phy-type property\n",
8515 			dp->full_name);
8516 		return -EINVAL;
8517 	}
8518 
8519 	if (!strcmp(phy_type, "none"))
8520 		return -ENODEV;
8521 
8522 	strcpy(np->vpd.phy_type, phy_type);
8523 
8524 	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8525 		dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
8526 			dp->full_name, np->vpd.phy_type);
8527 		return -EINVAL;
8528 	}
8529 
8530 	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
8531 	if (!mac_addr) {
8532 		dev_err(np->device, PFX "%s: OF node lacks "
8533 			"local-mac-address property\n",
8534 			dp->full_name);
8535 		return -EINVAL;
8536 	}
8537 	if (prop_len != dev->addr_len) {
8538 		dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
8539 			"is wrong.\n",
8540 			dp->full_name, prop_len);
8541 	}
8542 	memcpy(dev->perm_addr, mac_addr, dev->addr_len);
8543 	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
8544 		int i;
8545 
8546 		dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
8547 			dp->full_name);
8548 		dev_err(np->device, PFX "%s: [ \n",
8549 			dp->full_name);
8550 		for (i = 0; i < 6; i++)
8551 			printk("%02x ", dev->perm_addr[i]);
8552 		printk("]\n");
8553 		return -EINVAL;
8554 	}
8555 
8556 	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8557 
8558 	model = of_get_property(dp, "model", &prop_len);
8559 
8560 	if (model)
8561 		strcpy(np->vpd.model, model);
8562 
8563 	return 0;
8564 #else
8565 	return -EINVAL;
8566 #endif
8567 }
8568 
niu_get_invariants(struct niu * np)8569 static int __devinit niu_get_invariants(struct niu *np)
8570 {
8571 	int err, have_props;
8572 	u32 offset;
8573 
8574 	err = niu_get_of_props(np);
8575 	if (err == -ENODEV)
8576 		return err;
8577 
8578 	have_props = !err;
8579 
8580 	err = niu_init_mac_ipp_pcs_base(np);
8581 	if (err)
8582 		return err;
8583 
8584 	if (have_props) {
8585 		err = niu_get_and_validate_port(np);
8586 		if (err)
8587 			return err;
8588 
8589 	} else  {
8590 		if (np->parent->plat_type == PLAT_TYPE_NIU)
8591 			return -EINVAL;
8592 
8593 		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
8594 		offset = niu_pci_vpd_offset(np);
8595 		niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
8596 		       offset);
8597 		if (offset)
8598 			niu_pci_vpd_fetch(np, offset);
8599 		nw64(ESPC_PIO_EN, 0);
8600 
8601 		if (np->flags & NIU_FLAGS_VPD_VALID) {
8602 			niu_pci_vpd_validate(np);
8603 			err = niu_get_and_validate_port(np);
8604 			if (err)
8605 				return err;
8606 		}
8607 
8608 		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
8609 			err = niu_get_and_validate_port(np);
8610 			if (err)
8611 				return err;
8612 			err = niu_pci_probe_sprom(np);
8613 			if (err)
8614 				return err;
8615 		}
8616 	}
8617 
8618 	err = niu_probe_ports(np);
8619 	if (err)
8620 		return err;
8621 
8622 	niu_ldg_init(np);
8623 
8624 	niu_classifier_swstate_init(np);
8625 	niu_link_config_init(np);
8626 
8627 	err = niu_determine_phy_disposition(np);
8628 	if (!err)
8629 		err = niu_init_link(np);
8630 
8631 	return err;
8632 }
8633 
8634 static LIST_HEAD(niu_parent_list);
8635 static DEFINE_MUTEX(niu_parent_lock);
8636 static int niu_parent_index;
8637 
show_port_phy(struct device * dev,struct device_attribute * attr,char * buf)8638 static ssize_t show_port_phy(struct device *dev,
8639 			     struct device_attribute *attr, char *buf)
8640 {
8641 	struct platform_device *plat_dev = to_platform_device(dev);
8642 	struct niu_parent *p = plat_dev->dev.platform_data;
8643 	u32 port_phy = p->port_phy;
8644 	char *orig_buf = buf;
8645 	int i;
8646 
8647 	if (port_phy == PORT_PHY_UNKNOWN ||
8648 	    port_phy == PORT_PHY_INVALID)
8649 		return 0;
8650 
8651 	for (i = 0; i < p->num_ports; i++) {
8652 		const char *type_str;
8653 		int type;
8654 
8655 		type = phy_decode(port_phy, i);
8656 		if (type == PORT_TYPE_10G)
8657 			type_str = "10G";
8658 		else
8659 			type_str = "1G";
8660 		buf += sprintf(buf,
8661 			       (i == 0) ? "%s" : " %s",
8662 			       type_str);
8663 	}
8664 	buf += sprintf(buf, "\n");
8665 	return buf - orig_buf;
8666 }
8667 
show_plat_type(struct device * dev,struct device_attribute * attr,char * buf)8668 static ssize_t show_plat_type(struct device *dev,
8669 			      struct device_attribute *attr, char *buf)
8670 {
8671 	struct platform_device *plat_dev = to_platform_device(dev);
8672 	struct niu_parent *p = plat_dev->dev.platform_data;
8673 	const char *type_str;
8674 
8675 	switch (p->plat_type) {
8676 	case PLAT_TYPE_ATLAS:
8677 		type_str = "atlas";
8678 		break;
8679 	case PLAT_TYPE_NIU:
8680 		type_str = "niu";
8681 		break;
8682 	case PLAT_TYPE_VF_P0:
8683 		type_str = "vf_p0";
8684 		break;
8685 	case PLAT_TYPE_VF_P1:
8686 		type_str = "vf_p1";
8687 		break;
8688 	default:
8689 		type_str = "unknown";
8690 		break;
8691 	}
8692 
8693 	return sprintf(buf, "%s\n", type_str);
8694 }
8695 
__show_chan_per_port(struct device * dev,struct device_attribute * attr,char * buf,int rx)8696 static ssize_t __show_chan_per_port(struct device *dev,
8697 				    struct device_attribute *attr, char *buf,
8698 				    int rx)
8699 {
8700 	struct platform_device *plat_dev = to_platform_device(dev);
8701 	struct niu_parent *p = plat_dev->dev.platform_data;
8702 	char *orig_buf = buf;
8703 	u8 *arr;
8704 	int i;
8705 
8706 	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
8707 
8708 	for (i = 0; i < p->num_ports; i++) {
8709 		buf += sprintf(buf,
8710 			       (i == 0) ? "%d" : " %d",
8711 			       arr[i]);
8712 	}
8713 	buf += sprintf(buf, "\n");
8714 
8715 	return buf - orig_buf;
8716 }
8717 
show_rxchan_per_port(struct device * dev,struct device_attribute * attr,char * buf)8718 static ssize_t show_rxchan_per_port(struct device *dev,
8719 				    struct device_attribute *attr, char *buf)
8720 {
8721 	return __show_chan_per_port(dev, attr, buf, 1);
8722 }
8723 
show_txchan_per_port(struct device * dev,struct device_attribute * attr,char * buf)8724 static ssize_t show_txchan_per_port(struct device *dev,
8725 				    struct device_attribute *attr, char *buf)
8726 {
8727 	return __show_chan_per_port(dev, attr, buf, 1);
8728 }
8729 
show_num_ports(struct device * dev,struct device_attribute * attr,char * buf)8730 static ssize_t show_num_ports(struct device *dev,
8731 			      struct device_attribute *attr, char *buf)
8732 {
8733 	struct platform_device *plat_dev = to_platform_device(dev);
8734 	struct niu_parent *p = plat_dev->dev.platform_data;
8735 
8736 	return sprintf(buf, "%d\n", p->num_ports);
8737 }
8738 
8739 static struct device_attribute niu_parent_attributes[] = {
8740 	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
8741 	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
8742 	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
8743 	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
8744 	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
8745 	{}
8746 };
8747 
niu_new_parent(struct niu * np,union niu_parent_id * id,u8 ptype)8748 static struct niu_parent * __devinit niu_new_parent(struct niu *np,
8749 						    union niu_parent_id *id,
8750 						    u8 ptype)
8751 {
8752 	struct platform_device *plat_dev;
8753 	struct niu_parent *p;
8754 	int i;
8755 
8756 	niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
8757 
8758 	plat_dev = platform_device_register_simple("niu", niu_parent_index,
8759 						   NULL, 0);
8760 	if (!plat_dev)
8761 		return NULL;
8762 
8763 	for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
8764 		int err = device_create_file(&plat_dev->dev,
8765 					     &niu_parent_attributes[i]);
8766 		if (err)
8767 			goto fail_unregister;
8768 	}
8769 
8770 	p = kzalloc(sizeof(*p), GFP_KERNEL);
8771 	if (!p)
8772 		goto fail_unregister;
8773 
8774 	p->index = niu_parent_index++;
8775 
8776 	plat_dev->dev.platform_data = p;
8777 	p->plat_dev = plat_dev;
8778 
8779 	memcpy(&p->id, id, sizeof(*id));
8780 	p->plat_type = ptype;
8781 	INIT_LIST_HEAD(&p->list);
8782 	atomic_set(&p->refcnt, 0);
8783 	list_add(&p->list, &niu_parent_list);
8784 	spin_lock_init(&p->lock);
8785 
8786 	p->rxdma_clock_divider = 7500;
8787 
8788 	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
8789 	if (p->plat_type == PLAT_TYPE_NIU)
8790 		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
8791 
8792 	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
8793 		int index = i - CLASS_CODE_USER_PROG1;
8794 
8795 		p->tcam_key[index] = TCAM_KEY_TSEL;
8796 		p->flow_key[index] = (FLOW_KEY_IPSA |
8797 				      FLOW_KEY_IPDA |
8798 				      FLOW_KEY_PROTO |
8799 				      (FLOW_KEY_L4_BYTE12 <<
8800 				       FLOW_KEY_L4_0_SHIFT) |
8801 				      (FLOW_KEY_L4_BYTE12 <<
8802 				       FLOW_KEY_L4_1_SHIFT));
8803 	}
8804 
8805 	for (i = 0; i < LDN_MAX + 1; i++)
8806 		p->ldg_map[i] = LDG_INVALID;
8807 
8808 	return p;
8809 
8810 fail_unregister:
8811 	platform_device_unregister(plat_dev);
8812 	return NULL;
8813 }
8814 
niu_get_parent(struct niu * np,union niu_parent_id * id,u8 ptype)8815 static struct niu_parent * __devinit niu_get_parent(struct niu *np,
8816 						    union niu_parent_id *id,
8817 						    u8 ptype)
8818 {
8819 	struct niu_parent *p, *tmp;
8820 	int port = np->port;
8821 
8822 	niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
8823 	       ptype, port);
8824 
8825 	mutex_lock(&niu_parent_lock);
8826 	p = NULL;
8827 	list_for_each_entry(tmp, &niu_parent_list, list) {
8828 		if (!memcmp(id, &tmp->id, sizeof(*id))) {
8829 			p = tmp;
8830 			break;
8831 		}
8832 	}
8833 	if (!p)
8834 		p = niu_new_parent(np, id, ptype);
8835 
8836 	if (p) {
8837 		char port_name[6];
8838 		int err;
8839 
8840 		sprintf(port_name, "port%d", port);
8841 		err = sysfs_create_link(&p->plat_dev->dev.kobj,
8842 					&np->device->kobj,
8843 					port_name);
8844 		if (!err) {
8845 			p->ports[port] = np;
8846 			atomic_inc(&p->refcnt);
8847 		}
8848 	}
8849 	mutex_unlock(&niu_parent_lock);
8850 
8851 	return p;
8852 }
8853 
niu_put_parent(struct niu * np)8854 static void niu_put_parent(struct niu *np)
8855 {
8856 	struct niu_parent *p = np->parent;
8857 	u8 port = np->port;
8858 	char port_name[6];
8859 
8860 	BUG_ON(!p || p->ports[port] != np);
8861 
8862 	niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
8863 
8864 	sprintf(port_name, "port%d", port);
8865 
8866 	mutex_lock(&niu_parent_lock);
8867 
8868 	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
8869 
8870 	p->ports[port] = NULL;
8871 	np->parent = NULL;
8872 
8873 	if (atomic_dec_and_test(&p->refcnt)) {
8874 		list_del(&p->list);
8875 		platform_device_unregister(p->plat_dev);
8876 	}
8877 
8878 	mutex_unlock(&niu_parent_lock);
8879 }
8880 
niu_pci_alloc_coherent(struct device * dev,size_t size,u64 * handle,gfp_t flag)8881 static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
8882 				    u64 *handle, gfp_t flag)
8883 {
8884 	dma_addr_t dh;
8885 	void *ret;
8886 
8887 	ret = dma_alloc_coherent(dev, size, &dh, flag);
8888 	if (ret)
8889 		*handle = dh;
8890 	return ret;
8891 }
8892 
niu_pci_free_coherent(struct device * dev,size_t size,void * cpu_addr,u64 handle)8893 static void niu_pci_free_coherent(struct device *dev, size_t size,
8894 				  void *cpu_addr, u64 handle)
8895 {
8896 	dma_free_coherent(dev, size, cpu_addr, handle);
8897 }
8898 
niu_pci_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)8899 static u64 niu_pci_map_page(struct device *dev, struct page *page,
8900 			    unsigned long offset, size_t size,
8901 			    enum dma_data_direction direction)
8902 {
8903 	return dma_map_page(dev, page, offset, size, direction);
8904 }
8905 
niu_pci_unmap_page(struct device * dev,u64 dma_address,size_t size,enum dma_data_direction direction)8906 static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
8907 			       size_t size, enum dma_data_direction direction)
8908 {
8909 	dma_unmap_page(dev, dma_address, size, direction);
8910 }
8911 
niu_pci_map_single(struct device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)8912 static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
8913 			      size_t size,
8914 			      enum dma_data_direction direction)
8915 {
8916 	return dma_map_single(dev, cpu_addr, size, direction);
8917 }
8918 
niu_pci_unmap_single(struct device * dev,u64 dma_address,size_t size,enum dma_data_direction direction)8919 static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
8920 				 size_t size,
8921 				 enum dma_data_direction direction)
8922 {
8923 	dma_unmap_single(dev, dma_address, size, direction);
8924 }
8925 
8926 static const struct niu_ops niu_pci_ops = {
8927 	.alloc_coherent	= niu_pci_alloc_coherent,
8928 	.free_coherent	= niu_pci_free_coherent,
8929 	.map_page	= niu_pci_map_page,
8930 	.unmap_page	= niu_pci_unmap_page,
8931 	.map_single	= niu_pci_map_single,
8932 	.unmap_single	= niu_pci_unmap_single,
8933 };
8934 
niu_driver_version(void)8935 static void __devinit niu_driver_version(void)
8936 {
8937 	static int niu_version_printed;
8938 
8939 	if (niu_version_printed++ == 0)
8940 		pr_info("%s", version);
8941 }
8942 
niu_alloc_and_init(struct device * gen_dev,struct pci_dev * pdev,struct of_device * op,const struct niu_ops * ops,u8 port)8943 static struct net_device * __devinit niu_alloc_and_init(
8944 	struct device *gen_dev, struct pci_dev *pdev,
8945 	struct of_device *op, const struct niu_ops *ops,
8946 	u8 port)
8947 {
8948 	struct net_device *dev;
8949 	struct niu *np;
8950 
8951 	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
8952 	if (!dev) {
8953 		dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
8954 		return NULL;
8955 	}
8956 
8957 	SET_NETDEV_DEV(dev, gen_dev);
8958 
8959 	np = netdev_priv(dev);
8960 	np->dev = dev;
8961 	np->pdev = pdev;
8962 	np->op = op;
8963 	np->device = gen_dev;
8964 	np->ops = ops;
8965 
8966 	np->msg_enable = niu_debug;
8967 
8968 	spin_lock_init(&np->lock);
8969 	INIT_WORK(&np->reset_task, niu_reset_task);
8970 
8971 	np->port = port;
8972 
8973 	return dev;
8974 }
8975 
8976 static const struct net_device_ops niu_netdev_ops = {
8977 	.ndo_open		= niu_open,
8978 	.ndo_stop		= niu_close,
8979 	.ndo_start_xmit		= niu_start_xmit,
8980 	.ndo_get_stats		= niu_get_stats,
8981 	.ndo_set_multicast_list	= niu_set_rx_mode,
8982 	.ndo_validate_addr	= eth_validate_addr,
8983 	.ndo_set_mac_address	= niu_set_mac_addr,
8984 	.ndo_do_ioctl		= niu_ioctl,
8985 	.ndo_tx_timeout		= niu_tx_timeout,
8986 	.ndo_change_mtu		= niu_change_mtu,
8987 };
8988 
niu_assign_netdev_ops(struct net_device * dev)8989 static void __devinit niu_assign_netdev_ops(struct net_device *dev)
8990 {
8991 	dev->netdev_ops = &niu_netdev_ops;
8992 	dev->ethtool_ops = &niu_ethtool_ops;
8993 	dev->watchdog_timeo = NIU_TX_TIMEOUT;
8994 }
8995 
niu_device_announce(struct niu * np)8996 static void __devinit niu_device_announce(struct niu *np)
8997 {
8998 	struct net_device *dev = np->dev;
8999 
9000 	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
9001 
9002 	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
9003 		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9004 				dev->name,
9005 				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9006 				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9007 				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
9008 				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9009 				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9010 				np->vpd.phy_type);
9011 	} else {
9012 		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9013 				dev->name,
9014 				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9015 				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9016 				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
9017 				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
9018 				  "COPPER")),
9019 				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9020 				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9021 				np->vpd.phy_type);
9022 	}
9023 }
9024 
niu_pci_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)9025 static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9026 				      const struct pci_device_id *ent)
9027 {
9028 	union niu_parent_id parent_id;
9029 	struct net_device *dev;
9030 	struct niu *np;
9031 	int err, pos;
9032 	u64 dma_mask;
9033 	u16 val16;
9034 
9035 	niu_driver_version();
9036 
9037 	err = pci_enable_device(pdev);
9038 	if (err) {
9039 		dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
9040 			"aborting.\n");
9041 		return err;
9042 	}
9043 
9044 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
9045 	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9046 		dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
9047 			"base addresses, aborting.\n");
9048 		err = -ENODEV;
9049 		goto err_out_disable_pdev;
9050 	}
9051 
9052 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
9053 	if (err) {
9054 		dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
9055 			"aborting.\n");
9056 		goto err_out_disable_pdev;
9057 	}
9058 
9059 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9060 	if (pos <= 0) {
9061 		dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
9062 			"aborting.\n");
9063 		goto err_out_free_res;
9064 	}
9065 
9066 	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
9067 				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
9068 	if (!dev) {
9069 		err = -ENOMEM;
9070 		goto err_out_free_res;
9071 	}
9072 	np = netdev_priv(dev);
9073 
9074 	memset(&parent_id, 0, sizeof(parent_id));
9075 	parent_id.pci.domain = pci_domain_nr(pdev->bus);
9076 	parent_id.pci.bus = pdev->bus->number;
9077 	parent_id.pci.device = PCI_SLOT(pdev->devfn);
9078 
9079 	np->parent = niu_get_parent(np, &parent_id,
9080 				    PLAT_TYPE_ATLAS);
9081 	if (!np->parent) {
9082 		err = -ENOMEM;
9083 		goto err_out_free_dev;
9084 	}
9085 
9086 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
9087 	val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
9088 	val16 |= (PCI_EXP_DEVCTL_CERE |
9089 		  PCI_EXP_DEVCTL_NFERE |
9090 		  PCI_EXP_DEVCTL_FERE |
9091 		  PCI_EXP_DEVCTL_URRE |
9092 		  PCI_EXP_DEVCTL_RELAX_EN);
9093 	pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
9094 
9095 	dma_mask = DMA_44BIT_MASK;
9096 	err = pci_set_dma_mask(pdev, dma_mask);
9097 	if (!err) {
9098 		dev->features |= NETIF_F_HIGHDMA;
9099 		err = pci_set_consistent_dma_mask(pdev, dma_mask);
9100 		if (err) {
9101 			dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
9102 				"DMA for consistent allocations, "
9103 				"aborting.\n");
9104 			goto err_out_release_parent;
9105 		}
9106 	}
9107 	if (err || dma_mask == DMA_32BIT_MASK) {
9108 		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9109 		if (err) {
9110 			dev_err(&pdev->dev, PFX "No usable DMA configuration, "
9111 				"aborting.\n");
9112 			goto err_out_release_parent;
9113 		}
9114 	}
9115 
9116 	dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
9117 
9118 	np->regs = pci_ioremap_bar(pdev, 0);
9119 	if (!np->regs) {
9120 		dev_err(&pdev->dev, PFX "Cannot map device registers, "
9121 			"aborting.\n");
9122 		err = -ENOMEM;
9123 		goto err_out_release_parent;
9124 	}
9125 
9126 	pci_set_master(pdev);
9127 	pci_save_state(pdev);
9128 
9129 	dev->irq = pdev->irq;
9130 
9131 	niu_assign_netdev_ops(dev);
9132 
9133 	err = niu_get_invariants(np);
9134 	if (err) {
9135 		if (err != -ENODEV)
9136 			dev_err(&pdev->dev, PFX "Problem fetching invariants "
9137 				"of chip, aborting.\n");
9138 		goto err_out_iounmap;
9139 	}
9140 
9141 	err = register_netdev(dev);
9142 	if (err) {
9143 		dev_err(&pdev->dev, PFX "Cannot register net device, "
9144 			"aborting.\n");
9145 		goto err_out_iounmap;
9146 	}
9147 
9148 	pci_set_drvdata(pdev, dev);
9149 
9150 	niu_device_announce(np);
9151 
9152 	return 0;
9153 
9154 err_out_iounmap:
9155 	if (np->regs) {
9156 		iounmap(np->regs);
9157 		np->regs = NULL;
9158 	}
9159 
9160 err_out_release_parent:
9161 	niu_put_parent(np);
9162 
9163 err_out_free_dev:
9164 	free_netdev(dev);
9165 
9166 err_out_free_res:
9167 	pci_release_regions(pdev);
9168 
9169 err_out_disable_pdev:
9170 	pci_disable_device(pdev);
9171 	pci_set_drvdata(pdev, NULL);
9172 
9173 	return err;
9174 }
9175 
niu_pci_remove_one(struct pci_dev * pdev)9176 static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
9177 {
9178 	struct net_device *dev = pci_get_drvdata(pdev);
9179 
9180 	if (dev) {
9181 		struct niu *np = netdev_priv(dev);
9182 
9183 		unregister_netdev(dev);
9184 		if (np->regs) {
9185 			iounmap(np->regs);
9186 			np->regs = NULL;
9187 		}
9188 
9189 		niu_ldg_free(np);
9190 
9191 		niu_put_parent(np);
9192 
9193 		free_netdev(dev);
9194 		pci_release_regions(pdev);
9195 		pci_disable_device(pdev);
9196 		pci_set_drvdata(pdev, NULL);
9197 	}
9198 }
9199 
niu_suspend(struct pci_dev * pdev,pm_message_t state)9200 static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9201 {
9202 	struct net_device *dev = pci_get_drvdata(pdev);
9203 	struct niu *np = netdev_priv(dev);
9204 	unsigned long flags;
9205 
9206 	if (!netif_running(dev))
9207 		return 0;
9208 
9209 	flush_scheduled_work();
9210 	niu_netif_stop(np);
9211 
9212 	del_timer_sync(&np->timer);
9213 
9214 	spin_lock_irqsave(&np->lock, flags);
9215 	niu_enable_interrupts(np, 0);
9216 	spin_unlock_irqrestore(&np->lock, flags);
9217 
9218 	netif_device_detach(dev);
9219 
9220 	spin_lock_irqsave(&np->lock, flags);
9221 	niu_stop_hw(np);
9222 	spin_unlock_irqrestore(&np->lock, flags);
9223 
9224 	pci_save_state(pdev);
9225 
9226 	return 0;
9227 }
9228 
niu_resume(struct pci_dev * pdev)9229 static int niu_resume(struct pci_dev *pdev)
9230 {
9231 	struct net_device *dev = pci_get_drvdata(pdev);
9232 	struct niu *np = netdev_priv(dev);
9233 	unsigned long flags;
9234 	int err;
9235 
9236 	if (!netif_running(dev))
9237 		return 0;
9238 
9239 	pci_restore_state(pdev);
9240 
9241 	netif_device_attach(dev);
9242 
9243 	spin_lock_irqsave(&np->lock, flags);
9244 
9245 	err = niu_init_hw(np);
9246 	if (!err) {
9247 		np->timer.expires = jiffies + HZ;
9248 		add_timer(&np->timer);
9249 		niu_netif_start(np);
9250 	}
9251 
9252 	spin_unlock_irqrestore(&np->lock, flags);
9253 
9254 	return err;
9255 }
9256 
9257 static struct pci_driver niu_pci_driver = {
9258 	.name		= DRV_MODULE_NAME,
9259 	.id_table	= niu_pci_tbl,
9260 	.probe		= niu_pci_init_one,
9261 	.remove		= __devexit_p(niu_pci_remove_one),
9262 	.suspend	= niu_suspend,
9263 	.resume		= niu_resume,
9264 };
9265 
9266 #ifdef CONFIG_SPARC64
niu_phys_alloc_coherent(struct device * dev,size_t size,u64 * dma_addr,gfp_t flag)9267 static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
9268 				     u64 *dma_addr, gfp_t flag)
9269 {
9270 	unsigned long order = get_order(size);
9271 	unsigned long page = __get_free_pages(flag, order);
9272 
9273 	if (page == 0UL)
9274 		return NULL;
9275 	memset((char *)page, 0, PAGE_SIZE << order);
9276 	*dma_addr = __pa(page);
9277 
9278 	return (void *) page;
9279 }
9280 
niu_phys_free_coherent(struct device * dev,size_t size,void * cpu_addr,u64 handle)9281 static void niu_phys_free_coherent(struct device *dev, size_t size,
9282 				   void *cpu_addr, u64 handle)
9283 {
9284 	unsigned long order = get_order(size);
9285 
9286 	free_pages((unsigned long) cpu_addr, order);
9287 }
9288 
niu_phys_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)9289 static u64 niu_phys_map_page(struct device *dev, struct page *page,
9290 			     unsigned long offset, size_t size,
9291 			     enum dma_data_direction direction)
9292 {
9293 	return page_to_phys(page) + offset;
9294 }
9295 
niu_phys_unmap_page(struct device * dev,u64 dma_address,size_t size,enum dma_data_direction direction)9296 static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
9297 				size_t size, enum dma_data_direction direction)
9298 {
9299 	/* Nothing to do.  */
9300 }
9301 
niu_phys_map_single(struct device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)9302 static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
9303 			       size_t size,
9304 			       enum dma_data_direction direction)
9305 {
9306 	return __pa(cpu_addr);
9307 }
9308 
niu_phys_unmap_single(struct device * dev,u64 dma_address,size_t size,enum dma_data_direction direction)9309 static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
9310 				  size_t size,
9311 				  enum dma_data_direction direction)
9312 {
9313 	/* Nothing to do.  */
9314 }
9315 
9316 static const struct niu_ops niu_phys_ops = {
9317 	.alloc_coherent	= niu_phys_alloc_coherent,
9318 	.free_coherent	= niu_phys_free_coherent,
9319 	.map_page	= niu_phys_map_page,
9320 	.unmap_page	= niu_phys_unmap_page,
9321 	.map_single	= niu_phys_map_single,
9322 	.unmap_single	= niu_phys_unmap_single,
9323 };
9324 
res_size(struct resource * r)9325 static unsigned long res_size(struct resource *r)
9326 {
9327 	return r->end - r->start + 1UL;
9328 }
9329 
niu_of_probe(struct of_device * op,const struct of_device_id * match)9330 static int __devinit niu_of_probe(struct of_device *op,
9331 				  const struct of_device_id *match)
9332 {
9333 	union niu_parent_id parent_id;
9334 	struct net_device *dev;
9335 	struct niu *np;
9336 	const u32 *reg;
9337 	int err;
9338 
9339 	niu_driver_version();
9340 
9341 	reg = of_get_property(op->node, "reg", NULL);
9342 	if (!reg) {
9343 		dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
9344 			op->node->full_name);
9345 		return -ENODEV;
9346 	}
9347 
9348 	dev = niu_alloc_and_init(&op->dev, NULL, op,
9349 				 &niu_phys_ops, reg[0] & 0x1);
9350 	if (!dev) {
9351 		err = -ENOMEM;
9352 		goto err_out;
9353 	}
9354 	np = netdev_priv(dev);
9355 
9356 	memset(&parent_id, 0, sizeof(parent_id));
9357 	parent_id.of = of_get_parent(op->node);
9358 
9359 	np->parent = niu_get_parent(np, &parent_id,
9360 				    PLAT_TYPE_NIU);
9361 	if (!np->parent) {
9362 		err = -ENOMEM;
9363 		goto err_out_free_dev;
9364 	}
9365 
9366 	dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
9367 
9368 	np->regs = of_ioremap(&op->resource[1], 0,
9369 			      res_size(&op->resource[1]),
9370 			      "niu regs");
9371 	if (!np->regs) {
9372 		dev_err(&op->dev, PFX "Cannot map device registers, "
9373 			"aborting.\n");
9374 		err = -ENOMEM;
9375 		goto err_out_release_parent;
9376 	}
9377 
9378 	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
9379 				    res_size(&op->resource[2]),
9380 				    "niu vregs-1");
9381 	if (!np->vir_regs_1) {
9382 		dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
9383 			"aborting.\n");
9384 		err = -ENOMEM;
9385 		goto err_out_iounmap;
9386 	}
9387 
9388 	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
9389 				    res_size(&op->resource[3]),
9390 				    "niu vregs-2");
9391 	if (!np->vir_regs_2) {
9392 		dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
9393 			"aborting.\n");
9394 		err = -ENOMEM;
9395 		goto err_out_iounmap;
9396 	}
9397 
9398 	niu_assign_netdev_ops(dev);
9399 
9400 	err = niu_get_invariants(np);
9401 	if (err) {
9402 		if (err != -ENODEV)
9403 			dev_err(&op->dev, PFX "Problem fetching invariants "
9404 				"of chip, aborting.\n");
9405 		goto err_out_iounmap;
9406 	}
9407 
9408 	err = register_netdev(dev);
9409 	if (err) {
9410 		dev_err(&op->dev, PFX "Cannot register net device, "
9411 			"aborting.\n");
9412 		goto err_out_iounmap;
9413 	}
9414 
9415 	dev_set_drvdata(&op->dev, dev);
9416 
9417 	niu_device_announce(np);
9418 
9419 	return 0;
9420 
9421 err_out_iounmap:
9422 	if (np->vir_regs_1) {
9423 		of_iounmap(&op->resource[2], np->vir_regs_1,
9424 			   res_size(&op->resource[2]));
9425 		np->vir_regs_1 = NULL;
9426 	}
9427 
9428 	if (np->vir_regs_2) {
9429 		of_iounmap(&op->resource[3], np->vir_regs_2,
9430 			   res_size(&op->resource[3]));
9431 		np->vir_regs_2 = NULL;
9432 	}
9433 
9434 	if (np->regs) {
9435 		of_iounmap(&op->resource[1], np->regs,
9436 			   res_size(&op->resource[1]));
9437 		np->regs = NULL;
9438 	}
9439 
9440 err_out_release_parent:
9441 	niu_put_parent(np);
9442 
9443 err_out_free_dev:
9444 	free_netdev(dev);
9445 
9446 err_out:
9447 	return err;
9448 }
9449 
niu_of_remove(struct of_device * op)9450 static int __devexit niu_of_remove(struct of_device *op)
9451 {
9452 	struct net_device *dev = dev_get_drvdata(&op->dev);
9453 
9454 	if (dev) {
9455 		struct niu *np = netdev_priv(dev);
9456 
9457 		unregister_netdev(dev);
9458 
9459 		if (np->vir_regs_1) {
9460 			of_iounmap(&op->resource[2], np->vir_regs_1,
9461 				   res_size(&op->resource[2]));
9462 			np->vir_regs_1 = NULL;
9463 		}
9464 
9465 		if (np->vir_regs_2) {
9466 			of_iounmap(&op->resource[3], np->vir_regs_2,
9467 				   res_size(&op->resource[3]));
9468 			np->vir_regs_2 = NULL;
9469 		}
9470 
9471 		if (np->regs) {
9472 			of_iounmap(&op->resource[1], np->regs,
9473 				   res_size(&op->resource[1]));
9474 			np->regs = NULL;
9475 		}
9476 
9477 		niu_ldg_free(np);
9478 
9479 		niu_put_parent(np);
9480 
9481 		free_netdev(dev);
9482 		dev_set_drvdata(&op->dev, NULL);
9483 	}
9484 	return 0;
9485 }
9486 
9487 static const struct of_device_id niu_match[] = {
9488 	{
9489 		.name = "network",
9490 		.compatible = "SUNW,niusl",
9491 	},
9492 	{},
9493 };
9494 MODULE_DEVICE_TABLE(of, niu_match);
9495 
9496 static struct of_platform_driver niu_of_driver = {
9497 	.name		= "niu",
9498 	.match_table	= niu_match,
9499 	.probe		= niu_of_probe,
9500 	.remove		= __devexit_p(niu_of_remove),
9501 };
9502 
9503 #endif /* CONFIG_SPARC64 */
9504 
niu_init(void)9505 static int __init niu_init(void)
9506 {
9507 	int err = 0;
9508 
9509 	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
9510 
9511 	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
9512 
9513 #ifdef CONFIG_SPARC64
9514 	err = of_register_driver(&niu_of_driver, &of_bus_type);
9515 #endif
9516 
9517 	if (!err) {
9518 		err = pci_register_driver(&niu_pci_driver);
9519 #ifdef CONFIG_SPARC64
9520 		if (err)
9521 			of_unregister_driver(&niu_of_driver);
9522 #endif
9523 	}
9524 
9525 	return err;
9526 }
9527 
niu_exit(void)9528 static void __exit niu_exit(void)
9529 {
9530 	pci_unregister_driver(&niu_pci_driver);
9531 #ifdef CONFIG_SPARC64
9532 	of_unregister_driver(&niu_of_driver);
9533 #endif
9534 }
9535 
9536 module_init(niu_init);
9537 module_exit(niu_exit);
9538