Lines Matching refs:np
72 #define nr64(reg) readq(np->regs + (reg))
73 #define nw64(reg, val) writeq((val), np->regs + (reg))
75 #define nr64_mac(reg) readq(np->mac_regs + (reg))
76 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
78 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
79 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
81 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
82 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
84 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
85 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
94 #define niu_lock_parent(np, flags) \ argument
95 spin_lock_irqsave(&np->parent->lock, flags)
96 #define niu_unlock_parent(np, flags) \ argument
97 spin_unlock_irqrestore(&np->parent->lock, flags)
99 static int serdes_init_10g_serdes(struct niu *np);
101 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, in __niu_wait_bits_clear_mac() argument
116 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, in __niu_set_and_wait_clear_mac() argument
123 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); in __niu_set_and_wait_clear_mac()
125 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear_mac()
136 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, in __niu_wait_bits_clear_ipp() argument
151 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, in __niu_set_and_wait_clear_ipp() argument
162 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); in __niu_set_and_wait_clear_ipp()
164 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear_ipp()
175 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, in __niu_wait_bits_clear() argument
195 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, in __niu_set_and_wait_clear() argument
202 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); in __niu_set_and_wait_clear()
204 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear()
215 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) in niu_ldg_rearm() argument
225 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) in niu_ldn_irq_enable() argument
251 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) in niu_enable_ldn_in_ldg() argument
253 struct niu_parent *parent = np->parent; in niu_enable_ldn_in_ldg()
262 err = niu_ldn_irq_enable(np, i, on); in niu_enable_ldn_in_ldg()
269 static int niu_enable_interrupts(struct niu *np, int on) in niu_enable_interrupts() argument
273 for (i = 0; i < np->num_ldg; i++) { in niu_enable_interrupts()
274 struct niu_ldg *lp = &np->ldg[i]; in niu_enable_interrupts()
277 err = niu_enable_ldn_in_ldg(np, lp, on); in niu_enable_interrupts()
281 for (i = 0; i < np->num_ldg; i++) in niu_enable_interrupts()
282 niu_ldg_rearm(np, &np->ldg[i], on); in niu_enable_interrupts()
297 static int mdio_wait(struct niu *np) in mdio_wait() argument
313 static int mdio_read(struct niu *np, int port, int dev, int reg) in mdio_read() argument
318 err = mdio_wait(np); in mdio_read()
323 return mdio_wait(np); in mdio_read()
326 static int mdio_write(struct niu *np, int port, int dev, int reg, int data) in mdio_write() argument
331 err = mdio_wait(np); in mdio_write()
336 err = mdio_wait(np); in mdio_write()
343 static int mii_read(struct niu *np, int port, int reg) in mii_read() argument
346 return mdio_wait(np); in mii_read()
349 static int mii_write(struct niu *np, int port, int reg, int data) in mii_write() argument
354 err = mdio_wait(np); in mii_write()
361 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) in esr2_set_tx_cfg() argument
365 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_tx_cfg()
369 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_tx_cfg()
375 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) in esr2_set_rx_cfg() argument
379 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_rx_cfg()
383 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_rx_cfg()
390 static int serdes_init_niu_10g_fiber(struct niu *np) in serdes_init_niu_10g_fiber() argument
392 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_10g_fiber()
404 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_fiber()
413 int err = esr2_set_tx_cfg(np, i, tx_cfg); in serdes_init_niu_10g_fiber()
419 int err = esr2_set_rx_cfg(np, i, rx_cfg); in serdes_init_niu_10g_fiber()
427 static int serdes_init_niu_1g_serdes(struct niu *np) in serdes_init_niu_1g_serdes() argument
429 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_1g_serdes()
443 if (np->port == 0) in serdes_init_niu_1g_serdes()
449 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
459 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
462 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", in serdes_init_niu_1g_serdes()
463 np->port, __func__); in serdes_init_niu_1g_serdes()
469 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
472 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", in serdes_init_niu_1g_serdes()
473 np->port, __func__); in serdes_init_niu_1g_serdes()
481 err = esr2_set_tx_cfg(np, i, tx_cfg); in serdes_init_niu_1g_serdes()
487 err = esr2_set_rx_cfg(np, i, rx_cfg); in serdes_init_niu_1g_serdes()
492 switch (np->port) { in serdes_init_niu_1g_serdes()
516 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_niu_1g_serdes()
517 np->port, (int)(sig & mask), (int)val); in serdes_init_niu_1g_serdes()
524 static int serdes_init_niu_10g_serdes(struct niu *np) in serdes_init_niu_10g_serdes() argument
526 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_10g_serdes()
541 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
551 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
554 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", in serdes_init_niu_10g_serdes()
555 np->port, __func__); in serdes_init_niu_10g_serdes()
561 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
564 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", in serdes_init_niu_10g_serdes()
565 np->port, __func__); in serdes_init_niu_10g_serdes()
573 err = esr2_set_tx_cfg(np, i, tx_cfg); in serdes_init_niu_10g_serdes()
579 err = esr2_set_rx_cfg(np, i, rx_cfg); in serdes_init_niu_10g_serdes()
586 switch (np->port) { in serdes_init_niu_10g_serdes()
623 np->port, (int)(sig & mask), (int)val); in serdes_init_niu_10g_serdes()
626 err = serdes_init_niu_1g_serdes(np); in serdes_init_niu_10g_serdes()
628 np->flags &= ~NIU_FLAGS_10G; in serdes_init_niu_10g_serdes()
629 np->mac_xcvr = MAC_XCVR_PCS; in serdes_init_niu_10g_serdes()
631 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", in serdes_init_niu_10g_serdes()
632 np->port); in serdes_init_niu_10g_serdes()
639 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) in esr_read_rxtx_ctrl() argument
643 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); in esr_read_rxtx_ctrl()
646 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_rxtx_ctrl()
655 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) in esr_read_glue0() argument
659 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_glue0()
663 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_glue0()
673 static int esr_read_reset(struct niu *np, u32 *val) in esr_read_reset() argument
677 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_reset()
681 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_reset()
691 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) in esr_write_rxtx_ctrl() argument
695 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_rxtx_ctrl()
698 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_rxtx_ctrl()
703 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) in esr_write_glue0() argument
707 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_glue0()
710 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_glue0()
715 static int esr_reset(struct niu *np) in esr_reset() argument
720 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
724 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
730 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
736 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
742 err = esr_read_reset(np, &reset); in esr_reset()
746 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", in esr_reset()
747 np->port, reset); in esr_reset()
754 static int serdes_init_10g(struct niu *np) in serdes_init_10g() argument
756 struct niu_link_config *lp = &np->link_config; in serdes_init_10g()
761 switch (np->port) { in serdes_init_10g()
806 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); in serdes_init_10g()
809 err = esr_read_glue0(np, i, &glue0); in serdes_init_10g()
826 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); in serdes_init_10g()
829 err = esr_write_glue0(np, i, glue0); in serdes_init_10g()
834 err = esr_reset(np); in serdes_init_10g()
839 switch (np->port) { in serdes_init_10g()
867 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in serdes_init_10g()
868 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in serdes_init_10g()
871 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_10g()
872 np->port, (int)(sig & mask), (int)val); in serdes_init_10g()
875 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) in serdes_init_10g()
876 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; in serdes_init_10g()
880 static int serdes_init_1g(struct niu *np) in serdes_init_1g() argument
886 switch (np->port) { in serdes_init_1g()
907 static int serdes_init_1g_serdes(struct niu *np) in serdes_init_1g_serdes() argument
909 struct niu_link_config *lp = &np->link_config; in serdes_init_1g_serdes()
918 switch (np->port) { in serdes_init_1g_serdes()
974 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); in serdes_init_1g_serdes()
977 err = esr_read_glue0(np, i, &glue0); in serdes_init_1g_serdes()
994 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); in serdes_init_1g_serdes()
997 err = esr_write_glue0(np, i, glue0); in serdes_init_1g_serdes()
1004 switch (np->port) { in serdes_init_1g_serdes()
1020 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_1g_serdes()
1021 np->port, (int)(sig & mask), (int)val); in serdes_init_1g_serdes()
1028 static int link_status_1g_serdes(struct niu *np, int *link_up_p) in link_status_1g_serdes() argument
1030 struct niu_link_config *lp = &np->link_config; in link_status_1g_serdes()
1041 spin_lock_irqsave(&np->lock, flags); in link_status_1g_serdes()
1053 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g_serdes()
1059 static int link_status_10g_serdes(struct niu *np, int *link_up_p) in link_status_10g_serdes() argument
1062 struct niu_link_config *lp = &np->link_config; in link_status_10g_serdes()
1069 if (!(np->flags & NIU_FLAGS_10G)) in link_status_10g_serdes()
1070 return link_status_1g_serdes(np, link_up_p); in link_status_10g_serdes()
1074 spin_lock_irqsave(&np->lock, flags); in link_status_10g_serdes()
1088 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g_serdes()
1093 static int link_status_mii(struct niu *np, int *link_up_p) in link_status_mii() argument
1095 struct niu_link_config *lp = &np->link_config; in link_status_mii()
1100 err = mii_read(np, np->phy_addr, MII_BMCR); in link_status_mii()
1105 err = mii_read(np, np->phy_addr, MII_BMSR); in link_status_mii()
1110 err = mii_read(np, np->phy_addr, MII_ADVERTISE); in link_status_mii()
1115 err = mii_read(np, np->phy_addr, MII_LPA); in link_status_mii()
1121 err = mii_read(np, np->phy_addr, MII_ESTATUS); in link_status_mii()
1126 err = mii_read(np, np->phy_addr, MII_CTRL1000); in link_status_mii()
1131 err = mii_read(np, np->phy_addr, MII_STAT1000); in link_status_mii()
1206 static int link_status_1g_rgmii(struct niu *np, int *link_up_p) in link_status_1g_rgmii() argument
1208 struct niu_link_config *lp = &np->link_config; in link_status_1g_rgmii()
1218 spin_lock_irqsave(&np->lock, flags); in link_status_1g_rgmii()
1220 err = mii_read(np, np->phy_addr, MII_BMSR); in link_status_1g_rgmii()
1235 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g_rgmii()
1241 static int link_status_1g(struct niu *np, int *link_up_p) in link_status_1g() argument
1243 struct niu_link_config *lp = &np->link_config; in link_status_1g()
1247 spin_lock_irqsave(&np->lock, flags); in link_status_1g()
1249 err = link_status_mii(np, link_up_p); in link_status_1g()
1253 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g()
1257 static int bcm8704_reset(struct niu *np) in bcm8704_reset() argument
1261 err = mdio_read(np, np->phy_addr, in bcm8704_reset()
1266 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in bcm8704_reset()
1273 err = mdio_read(np, np->phy_addr, in bcm8704_reset()
1281 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", in bcm8704_reset()
1282 np->port, (err & 0xffff)); in bcm8704_reset()
1291 static int bcm8704_user_dev3_readback(struct niu *np, int reg) in bcm8704_user_dev3_readback() argument
1293 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); in bcm8704_user_dev3_readback()
1296 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); in bcm8704_user_dev3_readback()
1302 static int bcm8706_init_user_dev3(struct niu *np) in bcm8706_init_user_dev3() argument
1307 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8706_init_user_dev3()
1314 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8706_init_user_dev3()
1324 static int bcm8704_init_user_dev3(struct niu *np) in bcm8704_init_user_dev3() argument
1328 err = mdio_write(np, np->phy_addr, in bcm8704_init_user_dev3()
1342 err = mdio_write(np, np->phy_addr, in bcm8704_init_user_dev3()
1351 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); in bcm8704_init_user_dev3()
1354 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); in bcm8704_init_user_dev3()
1358 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8704_init_user_dev3()
1364 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8704_init_user_dev3()
1374 static int mrvl88x2011_act_led(struct niu *np, int val) in mrvl88x2011_act_led() argument
1378 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_act_led()
1386 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_act_led()
1390 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) in mrvl88x2011_led_blink_rate() argument
1394 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_led_blink_rate()
1400 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_led_blink_rate()
1407 static int xcvr_init_10g_mrvl88x2011(struct niu *np) in xcvr_init_10g_mrvl88x2011() argument
1412 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); in xcvr_init_10g_mrvl88x2011()
1417 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); in xcvr_init_10g_mrvl88x2011()
1421 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in xcvr_init_10g_mrvl88x2011()
1428 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in xcvr_init_10g_mrvl88x2011()
1433 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1438 if (np->link_config.loopback_mode == LOOPBACK_MAC) in xcvr_init_10g_mrvl88x2011()
1443 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1449 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1454 static int xcvr_diag_bcm870x(struct niu *np) in xcvr_diag_bcm870x() argument
1460 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in xcvr_diag_bcm870x()
1464 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1466 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); in xcvr_diag_bcm870x()
1469 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1471 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in xcvr_diag_bcm870x()
1475 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1479 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1483 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1489 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1493 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1502 np->port); in xcvr_diag_bcm870x()
1505 np->port); in xcvr_diag_bcm870x()
1512 static int xcvr_10g_set_lb_bcm870x(struct niu *np) in xcvr_10g_set_lb_bcm870x() argument
1514 struct niu_link_config *lp = &np->link_config; in xcvr_10g_set_lb_bcm870x()
1517 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in xcvr_10g_set_lb_bcm870x()
1527 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in xcvr_10g_set_lb_bcm870x()
1535 static int xcvr_init_10g_bcm8706(struct niu *np) in xcvr_init_10g_bcm8706() argument
1540 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && in xcvr_init_10g_bcm8706()
1541 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) in xcvr_init_10g_bcm8706()
1553 err = bcm8704_reset(np); in xcvr_init_10g_bcm8706()
1557 err = xcvr_10g_set_lb_bcm870x(np); in xcvr_init_10g_bcm8706()
1561 err = bcm8706_init_user_dev3(np); in xcvr_init_10g_bcm8706()
1565 err = xcvr_diag_bcm870x(np); in xcvr_init_10g_bcm8706()
1572 static int xcvr_init_10g_bcm8704(struct niu *np) in xcvr_init_10g_bcm8704() argument
1576 err = bcm8704_reset(np); in xcvr_init_10g_bcm8704()
1580 err = bcm8704_init_user_dev3(np); in xcvr_init_10g_bcm8704()
1584 err = xcvr_10g_set_lb_bcm870x(np); in xcvr_init_10g_bcm8704()
1588 err = xcvr_diag_bcm870x(np); in xcvr_init_10g_bcm8704()
1595 static int xcvr_init_10g(struct niu *np) in xcvr_init_10g() argument
1610 phy_id = phy_decode(np->parent->port_phy, np->port); in xcvr_init_10g()
1611 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; in xcvr_init_10g()
1616 err = xcvr_init_10g_mrvl88x2011(np); in xcvr_init_10g()
1620 err = xcvr_init_10g_bcm8704(np); in xcvr_init_10g()
1627 static int mii_reset(struct niu *np) in mii_reset() argument
1631 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); in mii_reset()
1638 err = mii_read(np, np->phy_addr, MII_BMCR); in mii_reset()
1645 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", in mii_reset()
1646 np->port, err); in mii_reset()
1653 static int xcvr_init_1g_rgmii(struct niu *np) in xcvr_init_1g_rgmii() argument
1663 err = mii_reset(np); in xcvr_init_1g_rgmii()
1667 err = mii_read(np, np->phy_addr, MII_BMSR); in xcvr_init_1g_rgmii()
1674 err = mii_read(np, np->phy_addr, MII_ESTATUS); in xcvr_init_1g_rgmii()
1681 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in xcvr_init_1g_rgmii()
1690 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); in xcvr_init_1g_rgmii()
1697 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in xcvr_init_1g_rgmii()
1701 err = mii_read(np, np->phy_addr, MII_BMCR); in xcvr_init_1g_rgmii()
1704 bmcr = mii_read(np, np->phy_addr, MII_BMCR); in xcvr_init_1g_rgmii()
1706 err = mii_read(np, np->phy_addr, MII_BMSR); in xcvr_init_1g_rgmii()
1713 static int mii_init_common(struct niu *np) in mii_init_common() argument
1715 struct niu_link_config *lp = &np->link_config; in mii_init_common()
1719 err = mii_reset(np); in mii_init_common()
1723 err = mii_read(np, np->phy_addr, MII_BMSR); in mii_init_common()
1730 err = mii_read(np, np->phy_addr, MII_ESTATUS); in mii_init_common()
1737 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in mii_init_common()
1754 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); in mii_init_common()
1775 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); in mii_init_common()
1787 err = mii_write(np, np->phy_addr, in mii_init_common()
1826 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in mii_init_common()
1831 err = mii_read(np, np->phy_addr, MII_BMCR); in mii_init_common()
1836 err = mii_read(np, np->phy_addr, MII_BMSR); in mii_init_common()
1842 np->port, bmcr, bmsr); in mii_init_common()
1848 static int xcvr_init_1g(struct niu *np) in xcvr_init_1g() argument
1857 return mii_init_common(np); in xcvr_init_1g()
1860 static int niu_xcvr_init(struct niu *np) in niu_xcvr_init() argument
1862 const struct niu_phy_ops *ops = np->phy_ops; in niu_xcvr_init()
1867 err = ops->xcvr_init(np); in niu_xcvr_init()
1872 static int niu_serdes_init(struct niu *np) in niu_serdes_init() argument
1874 const struct niu_phy_ops *ops = np->phy_ops; in niu_serdes_init()
1879 err = ops->serdes_init(np); in niu_serdes_init()
1887 static int niu_link_status_common(struct niu *np, int link_up) in niu_link_status_common() argument
1889 struct niu_link_config *lp = &np->link_config; in niu_link_status_common()
1890 struct net_device *dev = np->dev; in niu_link_status_common()
1894 netif_info(np, link, dev, "Link is up at %s, %s duplex\n", in niu_link_status_common()
1901 spin_lock_irqsave(&np->lock, flags); in niu_link_status_common()
1902 niu_init_xif(np); in niu_link_status_common()
1903 niu_handle_led(np, 1); in niu_link_status_common()
1904 spin_unlock_irqrestore(&np->lock, flags); in niu_link_status_common()
1908 netif_warn(np, link, dev, "Link is down\n"); in niu_link_status_common()
1909 spin_lock_irqsave(&np->lock, flags); in niu_link_status_common()
1910 niu_handle_led(np, 0); in niu_link_status_common()
1911 spin_unlock_irqrestore(&np->lock, flags); in niu_link_status_common()
1918 static int link_status_10g_mrvl(struct niu *np, int *link_up_p) in link_status_10g_mrvl() argument
1924 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in link_status_10g_mrvl()
1930 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in link_status_10g_mrvl()
1938 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in link_status_10g_mrvl()
1943 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in link_status_10g_mrvl()
1951 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, in link_status_10g_mrvl()
1962 np->link_config.active_speed = SPEED_10000; in link_status_10g_mrvl()
1963 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_mrvl()
1966 mrvl88x2011_act_led(np, (link_up ? in link_status_10g_mrvl()
1974 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) in link_status_10g_bcm8706() argument
1979 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in link_status_10g_bcm8706()
1988 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in link_status_10g_bcm8706()
1998 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in link_status_10g_bcm8706()
2010 np->link_config.active_speed = SPEED_INVALID; in link_status_10g_bcm8706()
2011 np->link_config.active_duplex = DUPLEX_INVALID; in link_status_10g_bcm8706()
2016 np->link_config.active_speed = SPEED_10000; in link_status_10g_bcm8706()
2017 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_bcm8706()
2025 static int link_status_10g_bcom(struct niu *np, int *link_up_p) in link_status_10g_bcom() argument
2031 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in link_status_10g_bcom()
2040 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in link_status_10g_bcom()
2049 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in link_status_10g_bcom()
2065 np->link_config.active_speed = SPEED_10000; in link_status_10g_bcom()
2066 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_bcom()
2074 static int link_status_10g(struct niu *np, int *link_up_p) in link_status_10g() argument
2079 spin_lock_irqsave(&np->lock, flags); in link_status_10g()
2081 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { in link_status_10g()
2084 phy_id = phy_decode(np->parent->port_phy, np->port); in link_status_10g()
2085 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; in link_status_10g()
2090 err = link_status_10g_mrvl(np, link_up_p); in link_status_10g()
2094 err = link_status_10g_bcom(np, link_up_p); in link_status_10g()
2099 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g()
2104 static int niu_10g_phy_present(struct niu *np) in niu_10g_phy_present() argument
2109 switch (np->port) { in niu_10g_phy_present()
2141 static int link_status_10g_hotplug(struct niu *np, int *link_up_p) in link_status_10g_hotplug() argument
2148 spin_lock_irqsave(&np->lock, flags); in link_status_10g_hotplug()
2150 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { in link_status_10g_hotplug()
2151 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? in link_status_10g_hotplug()
2153 phy_present = niu_10g_phy_present(np); in link_status_10g_hotplug()
2158 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2159 if (np->phy_ops->xcvr_init) in link_status_10g_hotplug()
2160 err = np->phy_ops->xcvr_init(np); in link_status_10g_hotplug()
2162 err = mdio_read(np, np->phy_addr, in link_status_10g_hotplug()
2169 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2172 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2174 netif_warn(np, link, np->dev, in link_status_10g_hotplug()
2179 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { in link_status_10g_hotplug()
2180 err = link_status_10g_bcm8706(np, link_up_p); in link_status_10g_hotplug()
2184 np->link_config.active_speed = SPEED_10000; in link_status_10g_hotplug()
2185 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_hotplug()
2190 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g_hotplug()
2195 static int niu_link_status(struct niu *np, int *link_up_p) in niu_link_status() argument
2197 const struct niu_phy_ops *ops = np->phy_ops; in niu_link_status()
2202 err = ops->link_status(np, link_up_p); in niu_link_status()
2209 struct niu *np = from_timer(np, t, timer); in niu_timer() local
2213 err = niu_link_status(np, &link_up); in niu_timer()
2215 niu_link_status_common(np, link_up); in niu_timer()
2217 if (netif_carrier_ok(np->dev)) in niu_timer()
2221 np->timer.expires = jiffies + off; in niu_timer()
2223 add_timer(&np->timer); in niu_timer()
2350 static int serdes_init_10g_serdes(struct niu *np) in serdes_init_10g_serdes() argument
2352 struct niu_link_config *lp = &np->link_config; in serdes_init_10g_serdes()
2356 switch (np->port) { in serdes_init_10g_serdes()
2396 esr_reset(np); in serdes_init_10g_serdes()
2406 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); in serdes_init_10g_serdes()
2409 err = esr_read_glue0(np, i, &glue0); in serdes_init_10g_serdes()
2426 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); in serdes_init_10g_serdes()
2429 err = esr_write_glue0(np, i, glue0); in serdes_init_10g_serdes()
2436 switch (np->port) { in serdes_init_10g_serdes()
2465 err = serdes_init_1g_serdes(np); in serdes_init_10g_serdes()
2467 np->flags &= ~NIU_FLAGS_10G; in serdes_init_10g_serdes()
2468 np->mac_xcvr = MAC_XCVR_PCS; in serdes_init_10g_serdes()
2470 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", in serdes_init_10g_serdes()
2471 np->port); in serdes_init_10g_serdes()
2479 static int niu_determine_phy_disposition(struct niu *np) in niu_determine_phy_disposition() argument
2481 struct niu_parent *parent = np->parent; in niu_determine_phy_disposition()
2487 switch (np->flags & in niu_determine_phy_disposition()
2502 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in niu_determine_phy_disposition()
2504 if (np->port == 0) in niu_determine_phy_disposition()
2506 if (np->port == 1) in niu_determine_phy_disposition()
2510 phy_addr_off += np->port; in niu_determine_phy_disposition()
2515 switch (np->flags & in niu_determine_phy_disposition()
2527 phy_addr_off += (np->port ^ 0x3); in niu_determine_phy_disposition()
2546 phy_addr_off += np->port; in niu_determine_phy_disposition()
2547 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in niu_determine_phy_disposition()
2549 if (np->port == 0) in niu_determine_phy_disposition()
2551 if (np->port == 1) in niu_determine_phy_disposition()
2559 switch(np->port) { in niu_determine_phy_disposition()
2571 phy_addr_off = niu_atca_port_num[np->port]; in niu_determine_phy_disposition()
2579 np->phy_ops = tp->ops; in niu_determine_phy_disposition()
2580 np->phy_addr = tp->phy_addr_base + phy_addr_off; in niu_determine_phy_disposition()
2585 static int niu_init_link(struct niu *np) in niu_init_link() argument
2587 struct niu_parent *parent = np->parent; in niu_init_link()
2591 err = niu_xcvr_init(np); in niu_init_link()
2596 err = niu_serdes_init(np); in niu_init_link()
2597 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) in niu_init_link()
2600 err = niu_xcvr_init(np); in niu_init_link()
2601 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) in niu_init_link()
2602 niu_link_status(np, &ignore); in niu_init_link()
2606 static void niu_set_primary_mac(struct niu *np, unsigned char *addr) in niu_set_primary_mac() argument
2612 if (np->flags & NIU_FLAGS_XMAC) { in niu_set_primary_mac()
2623 static int niu_num_alt_addr(struct niu *np) in niu_num_alt_addr() argument
2625 if (np->flags & NIU_FLAGS_XMAC) in niu_num_alt_addr()
2631 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) in niu_set_alt_mac() argument
2637 if (index >= niu_num_alt_addr(np)) in niu_set_alt_mac()
2640 if (np->flags & NIU_FLAGS_XMAC) { in niu_set_alt_mac()
2653 static int niu_enable_alt_mac(struct niu *np, int index, int on) in niu_enable_alt_mac() argument
2658 if (index >= niu_num_alt_addr(np)) in niu_enable_alt_mac()
2661 if (np->flags & NIU_FLAGS_XMAC) { in niu_enable_alt_mac()
2679 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, in __set_rdc_table_num_hw() argument
2690 static int __set_rdc_table_num(struct niu *np, in __set_rdc_table_num() argument
2698 if (np->flags & NIU_FLAGS_XMAC) in __set_rdc_table_num()
2702 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); in __set_rdc_table_num()
2706 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, in niu_set_primary_mac_rdc_table() argument
2709 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); in niu_set_primary_mac_rdc_table()
2712 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, in niu_set_multicast_mac_rdc_table() argument
2715 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); in niu_set_multicast_mac_rdc_table()
2718 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, in niu_set_alt_mac_rdc_table() argument
2721 if (idx >= niu_num_alt_addr(np)) in niu_set_alt_mac_rdc_table()
2723 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); in niu_set_alt_mac_rdc_table()
2747 static void vlan_tbl_write(struct niu *np, unsigned long index, in vlan_tbl_write() argument
2765 static void vlan_tbl_clear(struct niu *np) in vlan_tbl_clear() argument
2773 static int tcam_wait_bit(struct niu *np, u64 bit) in tcam_wait_bit() argument
2788 static int tcam_flush(struct niu *np, int index) in tcam_flush() argument
2794 return tcam_wait_bit(np, TCAM_CTL_STAT); in tcam_flush()
2798 static int tcam_read(struct niu *np, int index,
2804 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2819 static int tcam_write(struct niu *np, int index, in tcam_write() argument
2832 return tcam_wait_bit(np, TCAM_CTL_STAT); in tcam_write()
2836 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2841 err = tcam_wait_bit(np, TCAM_CTL_STAT);
2849 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) in tcam_assoc_write() argument
2854 return tcam_wait_bit(np, TCAM_CTL_STAT); in tcam_assoc_write()
2857 static void tcam_enable(struct niu *np, int on) in tcam_enable() argument
2868 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) in tcam_set_lat_and_ratio() argument
2884 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, in tcam_user_eth_class_enable() argument
2906 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2927 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, in tcam_user_ip_class_enable() argument
2948 static int tcam_user_ip_class_set(struct niu *np, unsigned long class, in tcam_user_ip_class_set() argument
2976 static int tcam_early_init(struct niu *np) in tcam_early_init() argument
2981 tcam_enable(np, 0); in tcam_early_init()
2982 tcam_set_lat_and_ratio(np, in tcam_early_init()
2986 err = tcam_user_eth_class_enable(np, i, 0); in tcam_early_init()
2991 err = tcam_user_ip_class_enable(np, i, 0); in tcam_early_init()
2999 static int tcam_flush_all(struct niu *np) in tcam_flush_all() argument
3003 for (i = 0; i < np->parent->tcam_num_entries; i++) { in tcam_flush_all()
3004 int err = tcam_flush(np, i); in tcam_flush_all()
3017 static int hash_read(struct niu *np, unsigned long partition,
3036 static int hash_write(struct niu *np, unsigned long partition, in hash_write() argument
3054 static void fflp_reset(struct niu *np) in fflp_reset() argument
3066 static void fflp_set_timings(struct niu *np) in fflp_set_timings() argument
3085 static int fflp_set_partition(struct niu *np, u64 partition, in fflp_set_partition() argument
3109 static int fflp_disable_all_partitions(struct niu *np) in fflp_disable_all_partitions() argument
3114 int err = fflp_set_partition(np, 0, 0, 0, 0); in fflp_disable_all_partitions()
3121 static void fflp_llcsnap_enable(struct niu *np, int on) in fflp_llcsnap_enable() argument
3132 static void fflp_errors_enable(struct niu *np, int on) in fflp_errors_enable() argument
3143 static int fflp_hash_clear(struct niu *np) in fflp_hash_clear() argument
3153 int err = hash_write(np, 0, i, 1, (u64 *) &ent); in fflp_hash_clear()
3160 static int fflp_early_init(struct niu *np) in fflp_early_init() argument
3166 niu_lock_parent(np, flags); in fflp_early_init()
3168 parent = np->parent; in fflp_early_init()
3171 if (np->parent->plat_type != PLAT_TYPE_NIU) { in fflp_early_init()
3172 fflp_reset(np); in fflp_early_init()
3173 fflp_set_timings(np); in fflp_early_init()
3174 err = fflp_disable_all_partitions(np); in fflp_early_init()
3176 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3183 err = tcam_early_init(np); in fflp_early_init()
3185 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3189 fflp_llcsnap_enable(np, 1); in fflp_early_init()
3190 fflp_errors_enable(np, 0); in fflp_early_init()
3194 err = tcam_flush_all(np); in fflp_early_init()
3196 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3200 if (np->parent->plat_type != PLAT_TYPE_NIU) { in fflp_early_init()
3201 err = fflp_hash_clear(np); in fflp_early_init()
3203 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3210 vlan_tbl_clear(np); in fflp_early_init()
3215 niu_unlock_parent(np, flags); in fflp_early_init()
3219 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) in niu_set_flow_key() argument
3229 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) in niu_set_tcam_key() argument
3240 static u16 tcam_get_index(struct niu *np, u16 idx) in tcam_get_index() argument
3243 if (idx >= (np->clas.tcam_sz - 1)) in tcam_get_index()
3245 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); in tcam_get_index()
3248 static u16 tcam_get_size(struct niu *np) in tcam_get_size() argument
3251 return np->clas.tcam_sz - 1; in tcam_get_size()
3254 static u16 tcam_get_valid_entry_cnt(struct niu *np) in tcam_get_valid_entry_cnt() argument
3257 return np->clas.tcam_valid_entries - 1; in tcam_get_valid_entry_cnt()
3307 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, in niu_rbr_add_page() argument
3318 addr = np->ops->map_page(np->device, page, 0, in niu_rbr_add_page()
3339 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) in niu_rbr_refill() argument
3345 int err = niu_rbr_add_page(np, rp, mask, index); in niu_rbr_refill()
3364 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) in niu_rx_pkt_ignore() argument
3386 np->ops->unmap_page(np->device, page->index, in niu_rx_pkt_ignore()
3404 static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, in niu_process_rx_pkt() argument
3412 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); in niu_process_rx_pkt()
3414 return niu_rx_pkt_ignore(np, rp); in niu_process_rx_pkt()
3455 np->ops->unmap_page(np->device, page->index, in niu_process_rx_pkt()
3475 if (np->dev->features & NETIF_F_RXHASH) in niu_process_rx_pkt()
3487 skb->protocol = eth_type_trans(skb, np->dev); in niu_process_rx_pkt()
3494 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) in niu_rbr_fill() argument
3501 err = niu_rbr_add_page(np, rp, mask, index); in niu_rbr_fill()
3512 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) in niu_rbr_free() argument
3524 np->ops->unmap_page(np->device, base, PAGE_SIZE, in niu_rbr_free()
3540 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) in release_tx_packet() argument
3556 np->ops->unmap_single(np->device, tb->mapping, in release_tx_packet()
3571 np->ops->unmap_page(np->device, tb->mapping, in release_tx_packet()
3584 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) in niu_tx_work() argument
3591 index = (rp - np->tx_rings); in niu_tx_work()
3592 txq = netdev_get_tx_queue(np->dev, index); in niu_tx_work()
3606 netif_printk(np, tx_done, KERN_DEBUG, np->dev, in niu_tx_work()
3610 cons = release_tx_packet(np, rp, cons); in niu_tx_work()
3626 static inline void niu_sync_rx_discard_stats(struct niu *np, in niu_sync_rx_discard_stats() argument
3655 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", in niu_sync_rx_discard_stats()
3658 netif_printk(np, rx_err, KERN_DEBUG, np->dev, in niu_sync_rx_discard_stats()
3670 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); in niu_sync_rx_discard_stats()
3672 netif_printk(np, rx_err, KERN_DEBUG, np->dev, in niu_sync_rx_discard_stats()
3678 static int niu_rx_work(struct napi_struct *napi, struct niu *np, in niu_rx_work() argument
3695 netif_printk(np, rx_status, KERN_DEBUG, np->dev, in niu_rx_work()
3702 rcr_done += niu_process_rx_pkt(napi, np, rp); in niu_rx_work()
3710 niu_rbr_refill(np, rp, GFP_ATOMIC); in niu_rx_work()
3722 niu_sync_rx_discard_stats(np, rp, 0x7FFF); in niu_rx_work()
3727 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) in niu_poll_core() argument
3734 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_poll_core()
3737 for (i = 0; i < np->num_tx_rings; i++) { in niu_poll_core()
3738 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_poll_core()
3740 niu_tx_work(np, rp); in niu_poll_core()
3744 for (i = 0; i < np->num_rx_rings; i++) { in niu_poll_core()
3745 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_poll_core()
3750 this_work_done = niu_rx_work(&lp->napi, np, rp, in niu_poll_core()
3765 struct niu *np = lp->np; in niu_poll() local
3768 work_done = niu_poll_core(np, lp, budget); in niu_poll()
3772 niu_ldg_rearm(np, lp, 1); in niu_poll()
3777 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, in niu_log_rxchan_errors() argument
3780 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); in niu_log_rxchan_errors()
3814 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) in niu_rx_error() argument
3825 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", in niu_rx_error()
3829 niu_log_rxchan_errors(np, rp, stat); in niu_rx_error()
3838 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, in niu_log_txchan_errors() argument
3841 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); in niu_log_txchan_errors()
3863 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) in niu_tx_error() argument
3871 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", in niu_tx_error()
3877 niu_log_txchan_errors(np, rp, cs); in niu_tx_error()
3882 static int niu_mif_interrupt(struct niu *np) in niu_mif_interrupt() argument
3887 if (np->flags & NIU_FLAGS_XMAC) { in niu_mif_interrupt()
3894 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", in niu_mif_interrupt()
3900 static void niu_xmac_interrupt(struct niu *np) in niu_xmac_interrupt() argument
3902 struct niu_xmac_stats *mp = &np->mac_stats.xmac; in niu_xmac_interrupt()
3970 static void niu_bmac_interrupt(struct niu *np) in niu_bmac_interrupt() argument
3972 struct niu_bmac_stats *mp = &np->mac_stats.bmac; in niu_bmac_interrupt()
4006 static int niu_mac_interrupt(struct niu *np) in niu_mac_interrupt() argument
4008 if (np->flags & NIU_FLAGS_XMAC) in niu_mac_interrupt()
4009 niu_xmac_interrupt(np); in niu_mac_interrupt()
4011 niu_bmac_interrupt(np); in niu_mac_interrupt()
4016 static void niu_log_device_error(struct niu *np, u64 stat) in niu_log_device_error() argument
4018 netdev_err(np->dev, "Core device errors ( "); in niu_log_device_error()
4046 static int niu_device_error(struct niu *np) in niu_device_error() argument
4050 netdev_err(np->dev, "Core device error, stat[%llx]\n", in niu_device_error()
4053 niu_log_device_error(np, stat); in niu_device_error()
4058 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, in niu_slowpath_interrupt() argument
4071 for (i = 0; i < np->num_rx_rings; i++) { in niu_slowpath_interrupt()
4072 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_slowpath_interrupt()
4075 int r = niu_rx_error(np, rp); in niu_slowpath_interrupt()
4089 for (i = 0; i < np->num_tx_rings; i++) { in niu_slowpath_interrupt()
4090 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_slowpath_interrupt()
4093 int r = niu_tx_error(np, rp); in niu_slowpath_interrupt()
4100 int r = niu_mif_interrupt(np); in niu_slowpath_interrupt()
4106 int r = niu_mac_interrupt(np); in niu_slowpath_interrupt()
4111 int r = niu_device_error(np); in niu_slowpath_interrupt()
4118 niu_enable_interrupts(np, 0); in niu_slowpath_interrupt()
4123 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, in niu_rxchan_intr() argument
4133 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_rxchan_intr()
4137 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, in niu_txchan_intr() argument
4142 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_txchan_intr()
4146 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) in __niu_fastpath_interrupt() argument
4148 struct niu_parent *parent = np->parent; in __niu_fastpath_interrupt()
4155 for (i = 0; i < np->num_rx_rings; i++) { in __niu_fastpath_interrupt()
4156 struct rx_ring_info *rp = &np->rx_rings[i]; in __niu_fastpath_interrupt()
4164 niu_rxchan_intr(np, rp, ldn); in __niu_fastpath_interrupt()
4167 for (i = 0; i < np->num_tx_rings; i++) { in __niu_fastpath_interrupt()
4168 struct tx_ring_info *rp = &np->tx_rings[i]; in __niu_fastpath_interrupt()
4176 niu_txchan_intr(np, rp, ldn); in __niu_fastpath_interrupt()
4180 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, in niu_schedule_napi() argument
4187 __niu_fastpath_interrupt(np, lp->ldg_num, v0); in niu_schedule_napi()
4195 struct niu *np = lp->np; in niu_interrupt() local
4200 if (netif_msg_intr(np)) in niu_interrupt()
4204 spin_lock_irqsave(&np->lock, flags); in niu_interrupt()
4210 if (netif_msg_intr(np)) in niu_interrupt()
4217 spin_unlock_irqrestore(&np->lock, flags); in niu_interrupt()
4222 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); in niu_interrupt()
4227 niu_schedule_napi(np, lp, v0, v1, v2); in niu_interrupt()
4229 niu_ldg_rearm(np, lp, 1); in niu_interrupt()
4231 spin_unlock_irqrestore(&np->lock, flags); in niu_interrupt()
4236 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) in niu_free_rx_ring_info() argument
4239 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4245 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4253 niu_rbr_free(np, rp); in niu_free_rx_ring_info()
4255 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4266 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) in niu_free_tx_ring_info() argument
4269 np->ops->free_coherent(np->device, in niu_free_tx_ring_info()
4279 (void) release_tx_packet(np, rp, i); in niu_free_tx_ring_info()
4282 np->ops->free_coherent(np->device, in niu_free_tx_ring_info()
4293 static void niu_free_channels(struct niu *np) in niu_free_channels() argument
4297 if (np->rx_rings) { in niu_free_channels()
4298 for (i = 0; i < np->num_rx_rings; i++) { in niu_free_channels()
4299 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_free_channels()
4301 niu_free_rx_ring_info(np, rp); in niu_free_channels()
4303 kfree(np->rx_rings); in niu_free_channels()
4304 np->rx_rings = NULL; in niu_free_channels()
4305 np->num_rx_rings = 0; in niu_free_channels()
4308 if (np->tx_rings) { in niu_free_channels()
4309 for (i = 0; i < np->num_tx_rings; i++) { in niu_free_channels()
4310 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_free_channels()
4312 niu_free_tx_ring_info(np, rp); in niu_free_channels()
4314 kfree(np->tx_rings); in niu_free_channels()
4315 np->tx_rings = NULL; in niu_free_channels()
4316 np->num_tx_rings = 0; in niu_free_channels()
4320 static int niu_alloc_rx_ring_info(struct niu *np, in niu_alloc_rx_ring_info() argument
4330 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4336 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", in niu_alloc_rx_ring_info()
4341 rp->rcr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4347 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", in niu_alloc_rx_ring_info()
4354 rp->rbr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4360 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", in niu_alloc_rx_ring_info()
4371 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) in niu_set_max_burst() argument
4373 int mtu = np->dev->mtu; in niu_set_max_burst()
4383 static int niu_alloc_tx_ring_info(struct niu *np, in niu_alloc_tx_ring_info() argument
4388 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4394 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", in niu_alloc_tx_ring_info()
4399 rp->descr = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4405 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", in niu_alloc_tx_ring_info()
4418 niu_set_max_burst(np, rp); in niu_alloc_tx_ring_info()
4423 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) in niu_size_rbr() argument
4434 if (np->dev->mtu > ETH_DATA_LEN) { in niu_size_rbr()
4450 static int niu_alloc_channels(struct niu *np) in niu_alloc_channels() argument
4452 struct niu_parent *parent = np->parent; in niu_alloc_channels()
4459 port = np->port; in niu_alloc_channels()
4475 np->num_rx_rings = num_rx_rings; in niu_alloc_channels()
4477 np->rx_rings = rx_rings; in niu_alloc_channels()
4479 netif_set_real_num_rx_queues(np->dev, num_rx_rings); in niu_alloc_channels()
4481 for (i = 0; i < np->num_rx_rings; i++) { in niu_alloc_channels()
4482 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_alloc_channels()
4484 rp->np = np; in niu_alloc_channels()
4487 err = niu_alloc_rx_ring_info(np, rp); in niu_alloc_channels()
4491 niu_size_rbr(np, rp); in niu_alloc_channels()
4504 err = niu_rbr_fill(np, rp, GFP_KERNEL); in niu_alloc_channels()
4515 np->num_tx_rings = num_tx_rings; in niu_alloc_channels()
4517 np->tx_rings = tx_rings; in niu_alloc_channels()
4519 netif_set_real_num_tx_queues(np->dev, num_tx_rings); in niu_alloc_channels()
4521 for (i = 0; i < np->num_tx_rings; i++) { in niu_alloc_channels()
4522 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_alloc_channels()
4524 rp->np = np; in niu_alloc_channels()
4527 err = niu_alloc_tx_ring_info(np, rp); in niu_alloc_channels()
4535 niu_free_channels(np); in niu_alloc_channels()
4539 static int niu_tx_cs_sng_poll(struct niu *np, int channel) in niu_tx_cs_sng_poll() argument
4551 static int niu_tx_channel_stop(struct niu *np, int channel) in niu_tx_channel_stop() argument
4558 return niu_tx_cs_sng_poll(np, channel); in niu_tx_channel_stop()
4561 static int niu_tx_cs_reset_poll(struct niu *np, int channel) in niu_tx_cs_reset_poll() argument
4573 static int niu_tx_channel_reset(struct niu *np, int channel) in niu_tx_channel_reset() argument
4581 err = niu_tx_cs_reset_poll(np, channel); in niu_tx_channel_reset()
4588 static int niu_tx_channel_lpage_init(struct niu *np, int channel) in niu_tx_channel_lpage_init() argument
4600 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; in niu_tx_channel_lpage_init()
4609 static void niu_txc_enable_port(struct niu *np, int on) in niu_txc_enable_port() argument
4614 niu_lock_parent(np, flags); in niu_txc_enable_port()
4616 mask = (u64)1 << np->port; in niu_txc_enable_port()
4625 niu_unlock_parent(np, flags); in niu_txc_enable_port()
4628 static void niu_txc_set_imask(struct niu *np, u64 imask) in niu_txc_set_imask() argument
4633 niu_lock_parent(np, flags); in niu_txc_set_imask()
4635 val &= ~TXC_INT_MASK_VAL(np->port); in niu_txc_set_imask()
4636 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); in niu_txc_set_imask()
4637 niu_unlock_parent(np, flags); in niu_txc_set_imask()
4640 static void niu_txc_port_dma_enable(struct niu *np, int on) in niu_txc_port_dma_enable() argument
4647 for (i = 0; i < np->num_tx_rings; i++) in niu_txc_port_dma_enable()
4648 val |= (1 << np->tx_rings[i].tx_channel); in niu_txc_port_dma_enable()
4650 nw64(TXC_PORT_DMA(np->port), val); in niu_txc_port_dma_enable()
4653 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) in niu_init_one_tx_channel() argument
4658 err = niu_tx_channel_stop(np, channel); in niu_init_one_tx_channel()
4662 err = niu_tx_channel_reset(np, channel); in niu_init_one_tx_channel()
4666 err = niu_tx_channel_lpage_init(np, channel); in niu_init_one_tx_channel()
4675 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", in niu_init_one_tx_channel()
4693 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", in niu_init_one_tx_channel()
4707 static void niu_init_rdc_groups(struct niu *np) in niu_init_rdc_groups() argument
4709 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; in niu_init_rdc_groups()
4722 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); in niu_init_rdc_groups()
4725 static void niu_init_drr_weight(struct niu *np) in niu_init_drr_weight() argument
4727 int type = phy_decode(np->parent->port_phy, np->port); in niu_init_drr_weight()
4740 nw64(PT_DRR_WT(np->port), val); in niu_init_drr_weight()
4743 static int niu_init_hostinfo(struct niu *np) in niu_init_hostinfo() argument
4745 struct niu_parent *parent = np->parent; in niu_init_hostinfo()
4746 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_hostinfo()
4747 int i, err, num_alt = niu_num_alt_addr(np); in niu_init_hostinfo()
4750 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); in niu_init_hostinfo()
4754 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); in niu_init_hostinfo()
4759 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); in niu_init_hostinfo()
4767 static int niu_rx_channel_reset(struct niu *np, int channel) in niu_rx_channel_reset() argument
4769 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), in niu_rx_channel_reset()
4774 static int niu_rx_channel_lpage_init(struct niu *np, int channel) in niu_rx_channel_lpage_init() argument
4786 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; in niu_rx_channel_lpage_init()
4793 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) in niu_rx_channel_wred_init() argument
4884 static int niu_enable_rx_channel(struct niu *np, int channel, int on) in niu_enable_rx_channel() argument
4906 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) in niu_init_one_rx_channel() argument
4911 err = niu_rx_channel_reset(np, channel); in niu_init_one_rx_channel()
4915 err = niu_rx_channel_lpage_init(np, channel); in niu_init_one_rx_channel()
4919 niu_rx_channel_wred_init(np, rp); in niu_init_one_rx_channel()
4946 err = niu_enable_rx_channel(np, channel, 1); in niu_init_one_rx_channel()
4959 static int niu_init_rx_channels(struct niu *np) in niu_init_rx_channels() argument
4965 niu_lock_parent(np, flags); in niu_init_rx_channels()
4966 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); in niu_init_rx_channels()
4968 niu_unlock_parent(np, flags); in niu_init_rx_channels()
4972 niu_init_rdc_groups(np); in niu_init_rx_channels()
4973 niu_init_drr_weight(np); in niu_init_rx_channels()
4975 err = niu_init_hostinfo(np); in niu_init_rx_channels()
4979 for (i = 0; i < np->num_rx_rings; i++) { in niu_init_rx_channels()
4980 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_init_rx_channels()
4982 err = niu_init_one_rx_channel(np, rp); in niu_init_rx_channels()
4990 static int niu_set_ip_frag_rule(struct niu *np) in niu_set_ip_frag_rule() argument
4992 struct niu_parent *parent = np->parent; in niu_set_ip_frag_rule()
4993 struct niu_classifier *cp = &np->clas; in niu_set_ip_frag_rule()
5008 err = tcam_write(np, index, tp->key, tp->key_mask); in niu_set_ip_frag_rule()
5011 err = tcam_assoc_write(np, index, tp->assoc_data); in niu_set_ip_frag_rule()
5020 static int niu_init_classifier_hw(struct niu *np) in niu_init_classifier_hw() argument
5022 struct niu_parent *parent = np->parent; in niu_init_classifier_hw()
5023 struct niu_classifier *cp = &np->clas; in niu_init_classifier_hw()
5029 err = niu_init_hostinfo(np); in niu_init_classifier_hw()
5036 vlan_tbl_write(np, i, np->port, in niu_init_classifier_hw()
5043 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, in niu_init_classifier_hw()
5052 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); in niu_init_classifier_hw()
5055 err = niu_set_flow_key(np, i, parent->flow_key[index]); in niu_init_classifier_hw()
5060 err = niu_set_ip_frag_rule(np); in niu_init_classifier_hw()
5064 tcam_enable(np, 1); in niu_init_classifier_hw()
5069 static int niu_zcp_write(struct niu *np, int index, u64 *data) in niu_zcp_write() argument
5080 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); in niu_zcp_write()
5082 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, in niu_zcp_write()
5086 static int niu_zcp_read(struct niu *np, int index, u64 *data) in niu_zcp_read() argument
5090 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, in niu_zcp_read()
5093 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", in niu_zcp_read()
5101 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); in niu_zcp_read()
5103 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, in niu_zcp_read()
5106 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", in niu_zcp_read()
5120 static void niu_zcp_cfifo_reset(struct niu *np) in niu_zcp_cfifo_reset() argument
5124 val |= RESET_CFIFO_RST(np->port); in niu_zcp_cfifo_reset()
5128 val &= ~RESET_CFIFO_RST(np->port); in niu_zcp_cfifo_reset()
5132 static int niu_init_zcp(struct niu *np) in niu_init_zcp() argument
5137 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_init_zcp()
5138 if (np->port == 0 || np->port == 1) in niu_init_zcp()
5152 err = niu_zcp_write(np, i, data); in niu_init_zcp()
5155 err = niu_zcp_read(np, i, rbuf); in niu_init_zcp()
5160 niu_zcp_cfifo_reset(np); in niu_init_zcp()
5161 nw64(CFIFO_ECC(np->port), 0); in niu_init_zcp()
5169 static void niu_ipp_write(struct niu *np, int index, u64 *data) in niu_ipp_write() argument
5183 static void niu_ipp_read(struct niu *np, int index, u64 *data) in niu_ipp_read() argument
5193 static int niu_ipp_reset(struct niu *np) in niu_ipp_reset() argument
5195 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, in niu_ipp_reset()
5199 static int niu_init_ipp(struct niu *np) in niu_init_ipp() argument
5204 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_init_ipp()
5205 if (np->port == 0 || np->port == 1) in niu_init_ipp()
5219 niu_ipp_write(np, i, data); in niu_init_ipp()
5220 niu_ipp_read(np, i, rbuf); in niu_init_ipp()
5226 err = niu_ipp_reset(np); in niu_init_ipp()
5250 static void niu_handle_led(struct niu *np, int status) in niu_handle_led() argument
5255 if ((np->flags & NIU_FLAGS_10G) != 0 && in niu_handle_led()
5256 (np->flags & NIU_FLAGS_FIBER) != 0) { in niu_handle_led()
5269 static void niu_init_xif_xmac(struct niu *np) in niu_init_xif_xmac() argument
5271 struct niu_link_config *lp = &np->link_config; in niu_init_xif_xmac()
5274 if (np->flags & NIU_FLAGS_XCVR_SERDES) { in niu_init_xif_xmac()
5292 if (np->flags & NIU_FLAGS_10G) { in niu_init_xif_xmac()
5296 if (!(np->flags & NIU_FLAGS_FIBER) && in niu_init_xif_xmac()
5297 !(np->flags & NIU_FLAGS_XCVR_SERDES)) in niu_init_xif_xmac()
5314 if (np->flags & NIU_FLAGS_10G) { in niu_init_xif_xmac()
5326 static void niu_init_xif_bmac(struct niu *np) in niu_init_xif_bmac() argument
5328 struct niu_link_config *lp = &np->link_config; in niu_init_xif_bmac()
5346 if (!(np->flags & NIU_FLAGS_10G) && in niu_init_xif_bmac()
5347 !(np->flags & NIU_FLAGS_FIBER) && in niu_init_xif_bmac()
5356 static void niu_init_xif(struct niu *np) in niu_init_xif() argument
5358 if (np->flags & NIU_FLAGS_XMAC) in niu_init_xif()
5359 niu_init_xif_xmac(np); in niu_init_xif()
5361 niu_init_xif_bmac(np); in niu_init_xif()
5364 static void niu_pcs_mii_reset(struct niu *np) in niu_pcs_mii_reset() argument
5376 static void niu_xpcs_reset(struct niu *np) in niu_xpcs_reset() argument
5388 static int niu_init_pcs(struct niu *np) in niu_init_pcs() argument
5390 struct niu_link_config *lp = &np->link_config; in niu_init_pcs()
5393 switch (np->flags & (NIU_FLAGS_10G | in niu_init_pcs()
5400 niu_pcs_mii_reset(np); in niu_init_pcs()
5407 if (!(np->flags & NIU_FLAGS_XMAC)) in niu_init_pcs()
5415 niu_xpcs_reset(np); in niu_init_pcs()
5432 niu_pcs_mii_reset(np); in niu_init_pcs()
5442 niu_pcs_mii_reset(np); in niu_init_pcs()
5452 static int niu_reset_tx_xmac(struct niu *np) in niu_reset_tx_xmac() argument
5454 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, in niu_reset_tx_xmac()
5460 static int niu_reset_tx_bmac(struct niu *np) in niu_reset_tx_bmac() argument
5472 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", in niu_reset_tx_bmac()
5473 np->port, in niu_reset_tx_bmac()
5481 static int niu_reset_tx_mac(struct niu *np) in niu_reset_tx_mac() argument
5483 if (np->flags & NIU_FLAGS_XMAC) in niu_reset_tx_mac()
5484 return niu_reset_tx_xmac(np); in niu_reset_tx_mac()
5486 return niu_reset_tx_bmac(np); in niu_reset_tx_mac()
5489 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) in niu_init_tx_xmac() argument
5505 if (np->flags & NIU_FLAGS_10G) { in niu_init_tx_xmac()
5525 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) in niu_init_tx_bmac() argument
5542 static void niu_init_tx_mac(struct niu *np) in niu_init_tx_mac() argument
5547 if (np->dev->mtu > ETH_DATA_LEN) in niu_init_tx_mac()
5557 if (np->flags & NIU_FLAGS_XMAC) in niu_init_tx_mac()
5558 niu_init_tx_xmac(np, min, max); in niu_init_tx_mac()
5560 niu_init_tx_bmac(np, min, max); in niu_init_tx_mac()
5563 static int niu_reset_rx_xmac(struct niu *np) in niu_reset_rx_xmac() argument
5577 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", in niu_reset_rx_xmac()
5578 np->port, in niu_reset_rx_xmac()
5586 static int niu_reset_rx_bmac(struct niu *np) in niu_reset_rx_bmac() argument
5598 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", in niu_reset_rx_bmac()
5599 np->port, in niu_reset_rx_bmac()
5607 static int niu_reset_rx_mac(struct niu *np) in niu_reset_rx_mac() argument
5609 if (np->flags & NIU_FLAGS_XMAC) in niu_reset_rx_mac()
5610 return niu_reset_rx_xmac(np); in niu_reset_rx_mac()
5612 return niu_reset_rx_bmac(np); in niu_reset_rx_mac()
5615 static void niu_init_rx_xmac(struct niu *np) in niu_init_rx_xmac() argument
5617 struct niu_parent *parent = np->parent; in niu_init_rx_xmac()
5618 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_rx_xmac()
5631 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); in niu_init_rx_xmac()
5632 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); in niu_init_rx_xmac()
5667 static void niu_init_rx_bmac(struct niu *np) in niu_init_rx_bmac() argument
5669 struct niu_parent *parent = np->parent; in niu_init_rx_bmac()
5670 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_rx_bmac()
5682 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); in niu_init_rx_bmac()
5683 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); in niu_init_rx_bmac()
5702 static void niu_init_rx_mac(struct niu *np) in niu_init_rx_mac() argument
5704 niu_set_primary_mac(np, np->dev->dev_addr); in niu_init_rx_mac()
5706 if (np->flags & NIU_FLAGS_XMAC) in niu_init_rx_mac()
5707 niu_init_rx_xmac(np); in niu_init_rx_mac()
5709 niu_init_rx_bmac(np); in niu_init_rx_mac()
5712 static void niu_enable_tx_xmac(struct niu *np, int on) in niu_enable_tx_xmac() argument
5723 static void niu_enable_tx_bmac(struct niu *np, int on) in niu_enable_tx_bmac() argument
5734 static void niu_enable_tx_mac(struct niu *np, int on) in niu_enable_tx_mac() argument
5736 if (np->flags & NIU_FLAGS_XMAC) in niu_enable_tx_mac()
5737 niu_enable_tx_xmac(np, on); in niu_enable_tx_mac()
5739 niu_enable_tx_bmac(np, on); in niu_enable_tx_mac()
5742 static void niu_enable_rx_xmac(struct niu *np, int on) in niu_enable_rx_xmac() argument
5749 if (np->flags & NIU_FLAGS_MCAST) in niu_enable_rx_xmac()
5751 if (np->flags & NIU_FLAGS_PROMISC) in niu_enable_rx_xmac()
5761 static void niu_enable_rx_bmac(struct niu *np, int on) in niu_enable_rx_bmac() argument
5768 if (np->flags & NIU_FLAGS_MCAST) in niu_enable_rx_bmac()
5770 if (np->flags & NIU_FLAGS_PROMISC) in niu_enable_rx_bmac()
5780 static void niu_enable_rx_mac(struct niu *np, int on) in niu_enable_rx_mac() argument
5782 if (np->flags & NIU_FLAGS_XMAC) in niu_enable_rx_mac()
5783 niu_enable_rx_xmac(np, on); in niu_enable_rx_mac()
5785 niu_enable_rx_bmac(np, on); in niu_enable_rx_mac()
5788 static int niu_init_mac(struct niu *np) in niu_init_mac() argument
5792 niu_init_xif(np); in niu_init_mac()
5793 err = niu_init_pcs(np); in niu_init_mac()
5797 err = niu_reset_tx_mac(np); in niu_init_mac()
5800 niu_init_tx_mac(np); in niu_init_mac()
5801 err = niu_reset_rx_mac(np); in niu_init_mac()
5804 niu_init_rx_mac(np); in niu_init_mac()
5811 niu_init_tx_mac(np); in niu_init_mac()
5812 niu_enable_tx_mac(np, 1); in niu_init_mac()
5814 niu_enable_rx_mac(np, 1); in niu_init_mac()
5819 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) in niu_stop_one_tx_channel() argument
5821 (void) niu_tx_channel_stop(np, rp->tx_channel); in niu_stop_one_tx_channel()
5824 static void niu_stop_tx_channels(struct niu *np) in niu_stop_tx_channels() argument
5828 for (i = 0; i < np->num_tx_rings; i++) { in niu_stop_tx_channels()
5829 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_stop_tx_channels()
5831 niu_stop_one_tx_channel(np, rp); in niu_stop_tx_channels()
5835 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) in niu_reset_one_tx_channel() argument
5837 (void) niu_tx_channel_reset(np, rp->tx_channel); in niu_reset_one_tx_channel()
5840 static void niu_reset_tx_channels(struct niu *np) in niu_reset_tx_channels() argument
5844 for (i = 0; i < np->num_tx_rings; i++) { in niu_reset_tx_channels()
5845 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_tx_channels()
5847 niu_reset_one_tx_channel(np, rp); in niu_reset_tx_channels()
5851 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) in niu_stop_one_rx_channel() argument
5853 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); in niu_stop_one_rx_channel()
5856 static void niu_stop_rx_channels(struct niu *np) in niu_stop_rx_channels() argument
5860 for (i = 0; i < np->num_rx_rings; i++) { in niu_stop_rx_channels()
5861 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_stop_rx_channels()
5863 niu_stop_one_rx_channel(np, rp); in niu_stop_rx_channels()
5867 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) in niu_reset_one_rx_channel() argument
5871 (void) niu_rx_channel_reset(np, channel); in niu_reset_one_rx_channel()
5874 (void) niu_enable_rx_channel(np, channel, 0); in niu_reset_one_rx_channel()
5877 static void niu_reset_rx_channels(struct niu *np) in niu_reset_rx_channels() argument
5881 for (i = 0; i < np->num_rx_rings; i++) { in niu_reset_rx_channels()
5882 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_rx_channels()
5884 niu_reset_one_rx_channel(np, rp); in niu_reset_rx_channels()
5888 static void niu_disable_ipp(struct niu *np) in niu_disable_ipp() argument
5902 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", in niu_disable_ipp()
5914 (void) niu_ipp_reset(np); in niu_disable_ipp()
5917 static int niu_init_hw(struct niu *np) in niu_init_hw() argument
5921 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); in niu_init_hw()
5922 niu_txc_enable_port(np, 1); in niu_init_hw()
5923 niu_txc_port_dma_enable(np, 1); in niu_init_hw()
5924 niu_txc_set_imask(np, 0); in niu_init_hw()
5926 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); in niu_init_hw()
5927 for (i = 0; i < np->num_tx_rings; i++) { in niu_init_hw()
5928 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_init_hw()
5930 err = niu_init_one_tx_channel(np, rp); in niu_init_hw()
5935 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); in niu_init_hw()
5936 err = niu_init_rx_channels(np); in niu_init_hw()
5940 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); in niu_init_hw()
5941 err = niu_init_classifier_hw(np); in niu_init_hw()
5945 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); in niu_init_hw()
5946 err = niu_init_zcp(np); in niu_init_hw()
5950 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); in niu_init_hw()
5951 err = niu_init_ipp(np); in niu_init_hw()
5955 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); in niu_init_hw()
5956 err = niu_init_mac(np); in niu_init_hw()
5963 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); in niu_init_hw()
5964 niu_disable_ipp(np); in niu_init_hw()
5967 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); in niu_init_hw()
5968 niu_stop_rx_channels(np); in niu_init_hw()
5969 niu_reset_rx_channels(np); in niu_init_hw()
5972 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); in niu_init_hw()
5973 niu_stop_tx_channels(np); in niu_init_hw()
5974 niu_reset_tx_channels(np); in niu_init_hw()
5979 static void niu_stop_hw(struct niu *np) in niu_stop_hw() argument
5981 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); in niu_stop_hw()
5982 niu_enable_interrupts(np, 0); in niu_stop_hw()
5984 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); in niu_stop_hw()
5985 niu_enable_rx_mac(np, 0); in niu_stop_hw()
5987 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); in niu_stop_hw()
5988 niu_disable_ipp(np); in niu_stop_hw()
5990 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); in niu_stop_hw()
5991 niu_stop_tx_channels(np); in niu_stop_hw()
5993 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); in niu_stop_hw()
5994 niu_stop_rx_channels(np); in niu_stop_hw()
5996 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); in niu_stop_hw()
5997 niu_reset_tx_channels(np); in niu_stop_hw()
5999 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); in niu_stop_hw()
6000 niu_reset_rx_channels(np); in niu_stop_hw()
6003 static void niu_set_irq_name(struct niu *np) in niu_set_irq_name() argument
6005 int port = np->port; in niu_set_irq_name()
6008 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); in niu_set_irq_name()
6011 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); in niu_set_irq_name()
6012 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); in niu_set_irq_name()
6016 for (i = 0; i < np->num_ldg - j; i++) { in niu_set_irq_name()
6017 if (i < np->num_rx_rings) in niu_set_irq_name()
6018 sprintf(np->irq_name[i+j], "%s-rx-%d", in niu_set_irq_name()
6019 np->dev->name, i); in niu_set_irq_name()
6020 else if (i < np->num_tx_rings + np->num_rx_rings) in niu_set_irq_name()
6021 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, in niu_set_irq_name()
6022 i - np->num_rx_rings); in niu_set_irq_name()
6026 static int niu_request_irq(struct niu *np) in niu_request_irq() argument
6030 niu_set_irq_name(np); in niu_request_irq()
6033 for (i = 0; i < np->num_ldg; i++) { in niu_request_irq()
6034 struct niu_ldg *lp = &np->ldg[i]; in niu_request_irq()
6037 np->irq_name[i], lp); in niu_request_irq()
6047 struct niu_ldg *lp = &np->ldg[j]; in niu_request_irq()
6054 static void niu_free_irq(struct niu *np) in niu_free_irq() argument
6058 for (i = 0; i < np->num_ldg; i++) { in niu_free_irq()
6059 struct niu_ldg *lp = &np->ldg[i]; in niu_free_irq()
6065 static void niu_enable_napi(struct niu *np) in niu_enable_napi() argument
6069 for (i = 0; i < np->num_ldg; i++) in niu_enable_napi()
6070 napi_enable(&np->ldg[i].napi); in niu_enable_napi()
6073 static void niu_disable_napi(struct niu *np) in niu_disable_napi() argument
6077 for (i = 0; i < np->num_ldg; i++) in niu_disable_napi()
6078 napi_disable(&np->ldg[i].napi); in niu_disable_napi()
6083 struct niu *np = netdev_priv(dev); in niu_open() local
6088 err = niu_alloc_channels(np); in niu_open()
6092 err = niu_enable_interrupts(np, 0); in niu_open()
6096 err = niu_request_irq(np); in niu_open()
6100 niu_enable_napi(np); in niu_open()
6102 spin_lock_irq(&np->lock); in niu_open()
6104 err = niu_init_hw(np); in niu_open()
6106 timer_setup(&np->timer, niu_timer, 0); in niu_open()
6107 np->timer.expires = jiffies + HZ; in niu_open()
6109 err = niu_enable_interrupts(np, 1); in niu_open()
6111 niu_stop_hw(np); in niu_open()
6114 spin_unlock_irq(&np->lock); in niu_open()
6117 niu_disable_napi(np); in niu_open()
6123 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) in niu_open()
6126 add_timer(&np->timer); in niu_open()
6131 niu_free_irq(np); in niu_open()
6134 niu_free_channels(np); in niu_open()
6140 static void niu_full_shutdown(struct niu *np, struct net_device *dev) in niu_full_shutdown() argument
6142 cancel_work_sync(&np->reset_task); in niu_full_shutdown()
6144 niu_disable_napi(np); in niu_full_shutdown()
6147 del_timer_sync(&np->timer); in niu_full_shutdown()
6149 spin_lock_irq(&np->lock); in niu_full_shutdown()
6151 niu_stop_hw(np); in niu_full_shutdown()
6153 spin_unlock_irq(&np->lock); in niu_full_shutdown()
6158 struct niu *np = netdev_priv(dev); in niu_close() local
6160 niu_full_shutdown(np, dev); in niu_close()
6162 niu_free_irq(np); in niu_close()
6164 niu_free_channels(np); in niu_close()
6166 niu_handle_led(np, 0); in niu_close()
6171 static void niu_sync_xmac_stats(struct niu *np) in niu_sync_xmac_stats() argument
6173 struct niu_xmac_stats *mp = &np->mac_stats.xmac; in niu_sync_xmac_stats()
6196 static void niu_sync_bmac_stats(struct niu *np) in niu_sync_bmac_stats() argument
6198 struct niu_bmac_stats *mp = &np->mac_stats.bmac; in niu_sync_bmac_stats()
6209 static void niu_sync_mac_stats(struct niu *np) in niu_sync_mac_stats() argument
6211 if (np->flags & NIU_FLAGS_XMAC) in niu_sync_mac_stats()
6212 niu_sync_xmac_stats(np); in niu_sync_mac_stats()
6214 niu_sync_bmac_stats(np); in niu_sync_mac_stats()
6217 static void niu_get_rx_stats(struct niu *np, in niu_get_rx_stats() argument
6226 rx_rings = READ_ONCE(np->rx_rings); in niu_get_rx_stats()
6230 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_rx_stats()
6233 niu_sync_rx_discard_stats(np, rp, 0); in niu_get_rx_stats()
6248 static void niu_get_tx_stats(struct niu *np, in niu_get_tx_stats() argument
6257 tx_rings = READ_ONCE(np->tx_rings); in niu_get_tx_stats()
6261 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_tx_stats()
6278 struct niu *np = netdev_priv(dev); in niu_get_stats() local
6281 niu_get_rx_stats(np, stats); in niu_get_stats()
6282 niu_get_tx_stats(np, stats); in niu_get_stats()
6286 static void niu_load_hash_xmac(struct niu *np, u16 *hash) in niu_load_hash_xmac() argument
6294 static void niu_load_hash_bmac(struct niu *np, u16 *hash) in niu_load_hash_bmac() argument
6302 static void niu_load_hash(struct niu *np, u16 *hash) in niu_load_hash() argument
6304 if (np->flags & NIU_FLAGS_XMAC) in niu_load_hash()
6305 niu_load_hash_xmac(np, hash); in niu_load_hash()
6307 niu_load_hash_bmac(np, hash); in niu_load_hash()
6312 struct niu *np = netdev_priv(dev); in niu_set_rx_mode() local
6318 spin_lock_irqsave(&np->lock, flags); in niu_set_rx_mode()
6319 niu_enable_rx_mac(np, 0); in niu_set_rx_mode()
6321 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); in niu_set_rx_mode()
6323 np->flags |= NIU_FLAGS_PROMISC; in niu_set_rx_mode()
6325 np->flags |= NIU_FLAGS_MCAST; in niu_set_rx_mode()
6328 if (alt_cnt > niu_num_alt_addr(np)) { in niu_set_rx_mode()
6330 np->flags |= NIU_FLAGS_PROMISC; in niu_set_rx_mode()
6337 err = niu_set_alt_mac(np, index, ha->addr); in niu_set_rx_mode()
6341 err = niu_enable_alt_mac(np, index, 1); in niu_set_rx_mode()
6350 if (np->flags & NIU_FLAGS_XMAC) in niu_set_rx_mode()
6354 for (i = alt_start; i < niu_num_alt_addr(np); i++) { in niu_set_rx_mode()
6355 err = niu_enable_alt_mac(np, i, 0); in niu_set_rx_mode()
6373 if (np->flags & NIU_FLAGS_MCAST) in niu_set_rx_mode()
6374 niu_load_hash(np, hash); in niu_set_rx_mode()
6376 niu_enable_rx_mac(np, 1); in niu_set_rx_mode()
6377 spin_unlock_irqrestore(&np->lock, flags); in niu_set_rx_mode()
6382 struct niu *np = netdev_priv(dev); in niu_set_mac_addr() local
6394 spin_lock_irqsave(&np->lock, flags); in niu_set_mac_addr()
6395 niu_enable_rx_mac(np, 0); in niu_set_mac_addr()
6396 niu_set_primary_mac(np, dev->dev_addr); in niu_set_mac_addr()
6397 niu_enable_rx_mac(np, 1); in niu_set_mac_addr()
6398 spin_unlock_irqrestore(&np->lock, flags); in niu_set_mac_addr()
6408 static void niu_netif_stop(struct niu *np) in niu_netif_stop() argument
6410 netif_trans_update(np->dev); /* prevent tx timeout */ in niu_netif_stop()
6412 niu_disable_napi(np); in niu_netif_stop()
6414 netif_tx_disable(np->dev); in niu_netif_stop()
6417 static void niu_netif_start(struct niu *np) in niu_netif_start() argument
6423 netif_tx_wake_all_queues(np->dev); in niu_netif_start()
6425 niu_enable_napi(np); in niu_netif_start()
6427 niu_enable_interrupts(np, 1); in niu_netif_start()
6430 static void niu_reset_buffers(struct niu *np) in niu_reset_buffers() argument
6434 if (np->rx_rings) { in niu_reset_buffers()
6435 for (i = 0; i < np->num_rx_rings; i++) { in niu_reset_buffers()
6436 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_buffers()
6452 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); in niu_reset_buffers()
6463 if (np->tx_rings) { in niu_reset_buffers()
6464 for (i = 0; i < np->num_tx_rings; i++) { in niu_reset_buffers()
6465 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_buffers()
6469 (void) release_tx_packet(np, rp, j); in niu_reset_buffers()
6482 struct niu *np = container_of(work, struct niu, reset_task); in niu_reset_task() local
6486 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6487 if (!netif_running(np->dev)) { in niu_reset_task()
6488 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6492 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6494 del_timer_sync(&np->timer); in niu_reset_task()
6496 niu_netif_stop(np); in niu_reset_task()
6498 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6500 niu_stop_hw(np); in niu_reset_task()
6502 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6504 niu_reset_buffers(np); in niu_reset_task()
6506 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6508 err = niu_init_hw(np); in niu_reset_task()
6510 np->timer.expires = jiffies + HZ; in niu_reset_task()
6511 add_timer(&np->timer); in niu_reset_task()
6512 niu_netif_start(np); in niu_reset_task()
6515 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6520 struct niu *np = netdev_priv(dev); in niu_tx_timeout() local
6522 dev_err(np->device, "%s: Transmit timed out, resetting\n", in niu_tx_timeout()
6525 schedule_work(&np->reset_task); in niu_tx_timeout()
6608 struct niu *np = netdev_priv(dev); in niu_start_xmit() local
6619 rp = &np->tx_rings[i]; in niu_start_xmit()
6624 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); in niu_start_xmit()
6655 mapping = np->ops->map_single(np->device, skb->data, in niu_start_xmit()
6695 mapping = np->ops->map_page(np->device, skb_frag_page(frag), in niu_start_xmit()
6730 struct niu *np = netdev_priv(dev); in niu_change_mtu() local
6742 niu_full_shutdown(np, dev); in niu_change_mtu()
6744 niu_free_channels(np); in niu_change_mtu()
6746 niu_enable_napi(np); in niu_change_mtu()
6748 err = niu_alloc_channels(np); in niu_change_mtu()
6752 spin_lock_irq(&np->lock); in niu_change_mtu()
6754 err = niu_init_hw(np); in niu_change_mtu()
6756 timer_setup(&np->timer, niu_timer, 0); in niu_change_mtu()
6757 np->timer.expires = jiffies + HZ; in niu_change_mtu()
6759 err = niu_enable_interrupts(np, 1); in niu_change_mtu()
6761 niu_stop_hw(np); in niu_change_mtu()
6764 spin_unlock_irq(&np->lock); in niu_change_mtu()
6768 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) in niu_change_mtu()
6771 add_timer(&np->timer); in niu_change_mtu()
6780 struct niu *np = netdev_priv(dev); in niu_get_drvinfo() local
6781 struct niu_vpd *vpd = &np->vpd; in niu_get_drvinfo()
6787 if (np->parent->plat_type != PLAT_TYPE_NIU) in niu_get_drvinfo()
6788 strlcpy(info->bus_info, pci_name(np->pdev), in niu_get_drvinfo()
6795 struct niu *np = netdev_priv(dev); in niu_get_link_ksettings() local
6798 lp = &np->link_config; in niu_get_link_ksettings()
6801 cmd->base.phy_address = np->phy_addr; in niu_get_link_ksettings()
6809 cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; in niu_get_link_ksettings()
6817 struct niu *np = netdev_priv(dev); in niu_set_link_ksettings() local
6818 struct niu_link_config *lp = &np->link_config; in niu_set_link_ksettings()
6825 return niu_init_link(np); in niu_set_link_ksettings()
6830 struct niu *np = netdev_priv(dev); in niu_get_msglevel() local
6831 return np->msg_enable; in niu_get_msglevel()
6836 struct niu *np = netdev_priv(dev); in niu_set_msglevel() local
6837 np->msg_enable = value; in niu_set_msglevel()
6842 struct niu *np = netdev_priv(dev); in niu_nway_reset() local
6844 if (np->link_config.autoneg) in niu_nway_reset()
6845 return niu_init_link(np); in niu_nway_reset()
6852 struct niu *np = netdev_priv(dev); in niu_get_eeprom_len() local
6854 return np->eeprom_len; in niu_get_eeprom_len()
6860 struct niu *np = netdev_priv(dev); in niu_get_eeprom() local
6868 if (offset >= np->eeprom_len) in niu_get_eeprom()
6870 if (offset + len > np->eeprom_len) in niu_get_eeprom()
6871 len = eeprom->len = np->eeprom_len - offset; in niu_get_eeprom()
7056 static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) in niu_get_hash_opts() argument
7065 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & in niu_get_hash_opts()
7069 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - in niu_get_hash_opts()
7150 static int niu_get_ethtool_tcam_entry(struct niu *np, in niu_get_ethtool_tcam_entry() argument
7153 struct niu_parent *parent = np->parent; in niu_get_ethtool_tcam_entry()
7160 idx = tcam_get_index(np, (u16)nfc->fs.location); in niu_get_ethtool_tcam_entry()
7164 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", in niu_get_ethtool_tcam_entry()
7174 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", in niu_get_ethtool_tcam_entry()
7224 nfc->data = tcam_get_size(np); in niu_get_ethtool_tcam_entry()
7229 static int niu_get_ethtool_tcam_all(struct niu *np, in niu_get_ethtool_tcam_all() argument
7233 struct niu_parent *parent = np->parent; in niu_get_ethtool_tcam_all()
7240 nfc->data = tcam_get_size(np); in niu_get_ethtool_tcam_all()
7242 niu_lock_parent(np, flags); in niu_get_ethtool_tcam_all()
7244 idx = tcam_get_index(np, i); in niu_get_ethtool_tcam_all()
7255 niu_unlock_parent(np, flags); in niu_get_ethtool_tcam_all()
7265 struct niu *np = netdev_priv(dev); in niu_get_nfc() local
7270 ret = niu_get_hash_opts(np, cmd); in niu_get_nfc()
7273 cmd->data = np->num_rx_rings; in niu_get_nfc()
7276 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); in niu_get_nfc()
7279 ret = niu_get_ethtool_tcam_entry(np, cmd); in niu_get_nfc()
7282 ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs); in niu_get_nfc()
7292 static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) in niu_set_hash_opts() argument
7306 niu_lock_parent(np, flags); in niu_set_hash_opts()
7307 flow_key = np->parent->tcam_key[class - in niu_set_hash_opts()
7311 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; in niu_set_hash_opts()
7312 niu_unlock_parent(np, flags); in niu_set_hash_opts()
7316 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & in niu_set_hash_opts()
7318 niu_lock_parent(np, flags); in niu_set_hash_opts()
7319 flow_key = np->parent->tcam_key[class - in niu_set_hash_opts()
7324 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = in niu_set_hash_opts()
7326 niu_unlock_parent(np, flags); in niu_set_hash_opts()
7333 niu_lock_parent(np, flags); in niu_set_hash_opts()
7335 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; in niu_set_hash_opts()
7336 niu_unlock_parent(np, flags); in niu_set_hash_opts()
7409 static int niu_add_ethtool_tcam_entry(struct niu *np, in niu_add_ethtool_tcam_entry() argument
7412 struct niu_parent *parent = np->parent; in niu_add_ethtool_tcam_entry()
7415 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; in niu_add_ethtool_tcam_entry()
7425 if (idx >= tcam_get_size(np)) in niu_add_ethtool_tcam_entry()
7437 niu_lock_parent(np, flags); in niu_add_ethtool_tcam_entry()
7466 ret = tcam_user_ip_class_set(np, class, 0, in niu_add_ethtool_tcam_entry()
7473 ret = tcam_user_ip_class_enable(np, class, 1); in niu_add_ethtool_tcam_entry()
7484 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", in niu_add_ethtool_tcam_entry()
7489 niu_unlock_parent(np, flags); in niu_add_ethtool_tcam_entry()
7496 niu_lock_parent(np, flags); in niu_add_ethtool_tcam_entry()
7498 idx = tcam_get_index(np, idx); in niu_add_ethtool_tcam_entry()
7518 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", in niu_add_ethtool_tcam_entry()
7526 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", in niu_add_ethtool_tcam_entry()
7536 if (fsp->ring_cookie >= np->num_rx_rings) { in niu_add_ethtool_tcam_entry()
7537 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", in niu_add_ethtool_tcam_entry()
7548 err = tcam_write(np, idx, tp->key, tp->key_mask); in niu_add_ethtool_tcam_entry()
7553 err = tcam_assoc_write(np, idx, tp->assoc_data); in niu_add_ethtool_tcam_entry()
7561 np->clas.tcam_valid_entries++; in niu_add_ethtool_tcam_entry()
7563 niu_unlock_parent(np, flags); in niu_add_ethtool_tcam_entry()
7568 static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) in niu_del_ethtool_tcam_entry() argument
7570 struct niu_parent *parent = np->parent; in niu_del_ethtool_tcam_entry()
7577 if (loc >= tcam_get_size(np)) in niu_del_ethtool_tcam_entry()
7580 niu_lock_parent(np, flags); in niu_del_ethtool_tcam_entry()
7582 idx = tcam_get_index(np, loc); in niu_del_ethtool_tcam_entry()
7596 ret = tcam_user_ip_class_enable(np, in niu_del_ethtool_tcam_entry()
7608 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", in niu_del_ethtool_tcam_entry()
7616 ret = tcam_flush(np, idx); in niu_del_ethtool_tcam_entry()
7622 np->clas.tcam_valid_entries--; in niu_del_ethtool_tcam_entry()
7624 niu_unlock_parent(np, flags); in niu_del_ethtool_tcam_entry()
7631 struct niu *np = netdev_priv(dev); in niu_set_nfc() local
7636 ret = niu_set_hash_opts(np, cmd); in niu_set_nfc()
7639 ret = niu_add_ethtool_tcam_entry(np, cmd); in niu_set_nfc()
7642 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); in niu_set_nfc()
7732 struct niu *np = netdev_priv(dev); in niu_get_strings() local
7738 if (np->flags & NIU_FLAGS_XMAC) { in niu_get_strings()
7747 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_strings()
7752 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_strings()
7761 struct niu *np = netdev_priv(dev); in niu_get_sset_count() local
7766 return (np->flags & NIU_FLAGS_XMAC ? in niu_get_sset_count()
7769 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + in niu_get_sset_count()
7770 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); in niu_get_sset_count()
7776 struct niu *np = netdev_priv(dev); in niu_get_ethtool_stats() local
7779 niu_sync_mac_stats(np); in niu_get_ethtool_stats()
7780 if (np->flags & NIU_FLAGS_XMAC) { in niu_get_ethtool_stats()
7781 memcpy(data, &np->mac_stats.xmac, in niu_get_ethtool_stats()
7785 memcpy(data, &np->mac_stats.bmac, in niu_get_ethtool_stats()
7789 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_ethtool_stats()
7790 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_get_ethtool_stats()
7792 niu_sync_rx_discard_stats(np, rp, 0); in niu_get_ethtool_stats()
7801 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_ethtool_stats()
7802 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_get_ethtool_stats()
7812 static u64 niu_led_state_save(struct niu *np) in niu_led_state_save() argument
7814 if (np->flags & NIU_FLAGS_XMAC) in niu_led_state_save()
7820 static void niu_led_state_restore(struct niu *np, u64 val) in niu_led_state_restore() argument
7822 if (np->flags & NIU_FLAGS_XMAC) in niu_led_state_restore()
7828 static void niu_force_led(struct niu *np, int on) in niu_force_led() argument
7832 if (np->flags & NIU_FLAGS_XMAC) { in niu_force_led()
7852 struct niu *np = netdev_priv(dev); in niu_set_phys_id() local
7859 np->orig_led_state = niu_led_state_save(np); in niu_set_phys_id()
7863 niu_force_led(np, 1); in niu_set_phys_id()
7867 niu_force_led(np, 0); in niu_set_phys_id()
7871 niu_led_state_restore(np, np->orig_led_state); in niu_set_phys_id()
7895 static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, in niu_ldg_assign_ldn() argument
7905 if (np->parent->plat_type == PLAT_TYPE_NIU) { in niu_ldg_assign_ldn()
7912 dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", in niu_ldg_assign_ldn()
7913 np->port, ldn, ldg, in niu_ldg_assign_ldn()
7923 static int niu_set_ldg_timer_res(struct niu *np, int res) in niu_set_ldg_timer_res() argument
7934 static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) in niu_set_ldg_sid() argument
7946 static int niu_pci_eeprom_read(struct niu *np, u32 addr) in niu_pci_eeprom_read() argument
7965 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", in niu_pci_eeprom_read()
7980 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", in niu_pci_eeprom_read()
7989 static int niu_pci_eeprom_read16(struct niu *np, u32 off) in niu_pci_eeprom_read16() argument
7991 int err = niu_pci_eeprom_read(np, off); in niu_pci_eeprom_read16()
7997 err = niu_pci_eeprom_read(np, off + 1); in niu_pci_eeprom_read16()
8005 static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) in niu_pci_eeprom_read16_swp() argument
8007 int err = niu_pci_eeprom_read(np, off); in niu_pci_eeprom_read16_swp()
8014 err = niu_pci_eeprom_read(np, off + 1); in niu_pci_eeprom_read16_swp()
8023 static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, in niu_pci_vpd_get_propname() argument
8029 int err = niu_pci_eeprom_read(np, off + i); in niu_pci_vpd_get_propname()
8042 static void niu_vpd_parse_version(struct niu *np) in niu_vpd_parse_version() argument
8044 struct niu_vpd *vpd = &np->vpd; in niu_vpd_parse_version()
8059 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_vpd_parse_version()
8065 np->flags |= NIU_FLAGS_VPD_VALID; in niu_vpd_parse_version()
8069 static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) in niu_pci_vpd_scan_props() argument
8080 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_vpd_scan_props()
8089 niu_vpd_parse_version(np); in niu_pci_vpd_scan_props()
8093 err = niu_pci_eeprom_read(np, start + 2); in niu_pci_vpd_scan_props()
8099 prop_len = niu_pci_eeprom_read(np, start + 4); in niu_pci_vpd_scan_props()
8102 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); in niu_pci_vpd_scan_props()
8109 prop_buf = np->vpd.model; in niu_pci_vpd_scan_props()
8113 prop_buf = np->vpd.board_model; in niu_pci_vpd_scan_props()
8117 prop_buf = np->vpd.version; in niu_pci_vpd_scan_props()
8121 prop_buf = np->vpd.local_mac; in niu_pci_vpd_scan_props()
8125 prop_buf = &np->vpd.mac_num; in niu_pci_vpd_scan_props()
8129 prop_buf = np->vpd.phy_type; in niu_pci_vpd_scan_props()
8135 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); in niu_pci_vpd_scan_props()
8143 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_vpd_scan_props()
8147 err = niu_pci_eeprom_read(np, off + i); in niu_pci_vpd_scan_props()
8161 static int niu_pci_vpd_fetch(struct niu *np, u32 start) in niu_pci_vpd_fetch() argument
8166 err = niu_pci_eeprom_read16_swp(np, start + 1); in niu_pci_vpd_fetch()
8176 err = niu_pci_eeprom_read(np, here); in niu_pci_vpd_fetch()
8182 err = niu_pci_eeprom_read16_swp(np, here + 1); in niu_pci_vpd_fetch()
8191 err = niu_pci_vpd_scan_props(np, here, end); in niu_pci_vpd_fetch()
8202 static u32 niu_pci_vpd_offset(struct niu *np) in niu_pci_vpd_offset() argument
8211 err = niu_pci_eeprom_read16(np, start + 0); in niu_pci_vpd_offset()
8216 err = niu_pci_eeprom_read16(np, start + 23); in niu_pci_vpd_offset()
8222 err = niu_pci_eeprom_read16(np, start + 0); in niu_pci_vpd_offset()
8225 err = niu_pci_eeprom_read16(np, start + 2); in niu_pci_vpd_offset()
8230 err = niu_pci_eeprom_read(np, start + 20); in niu_pci_vpd_offset()
8234 err = niu_pci_eeprom_read(np, ret + 2); in niu_pci_vpd_offset()
8242 err = niu_pci_eeprom_read16_swp(np, start + 8); in niu_pci_vpd_offset()
8247 err = niu_pci_eeprom_read(np, ret + 0); in niu_pci_vpd_offset()
8257 static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) in niu_phy_type_prop_decode() argument
8261 np->flags &= ~(NIU_FLAGS_FIBER | in niu_phy_type_prop_decode()
8263 np->mac_xcvr = MAC_XCVR_MII; in niu_phy_type_prop_decode()
8266 np->flags |= (NIU_FLAGS_10G | in niu_phy_type_prop_decode()
8268 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8271 np->flags &= ~NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8272 np->flags |= NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8273 np->mac_xcvr = MAC_XCVR_PCS; in niu_phy_type_prop_decode()
8276 np->flags |= NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8277 np->flags &= ~NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8278 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8281 np->flags |= NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8282 np->flags &= ~NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8283 np->flags |= NIU_FLAGS_XCVR_SERDES; in niu_phy_type_prop_decode()
8284 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8291 static int niu_pci_vpd_get_nports(struct niu *np) in niu_pci_vpd_get_nports() argument
8295 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || in niu_pci_vpd_get_nports()
8296 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || in niu_pci_vpd_get_nports()
8297 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || in niu_pci_vpd_get_nports()
8298 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || in niu_pci_vpd_get_nports()
8299 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { in niu_pci_vpd_get_nports()
8301 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || in niu_pci_vpd_get_nports()
8302 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || in niu_pci_vpd_get_nports()
8303 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || in niu_pci_vpd_get_nports()
8304 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { in niu_pci_vpd_get_nports()
8311 static void niu_pci_vpd_validate(struct niu *np) in niu_pci_vpd_validate() argument
8313 struct net_device *dev = np->dev; in niu_pci_vpd_validate()
8314 struct niu_vpd *vpd = &np->vpd; in niu_pci_vpd_validate()
8318 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); in niu_pci_vpd_validate()
8320 np->flags &= ~NIU_FLAGS_VPD_VALID; in niu_pci_vpd_validate()
8324 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || in niu_pci_vpd_validate()
8325 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { in niu_pci_vpd_validate()
8326 np->flags |= NIU_FLAGS_10G; in niu_pci_vpd_validate()
8327 np->flags &= ~NIU_FLAGS_FIBER; in niu_pci_vpd_validate()
8328 np->flags |= NIU_FLAGS_XCVR_SERDES; in niu_pci_vpd_validate()
8329 np->mac_xcvr = MAC_XCVR_PCS; in niu_pci_vpd_validate()
8330 if (np->port > 1) { in niu_pci_vpd_validate()
8331 np->flags |= NIU_FLAGS_FIBER; in niu_pci_vpd_validate()
8332 np->flags &= ~NIU_FLAGS_10G; in niu_pci_vpd_validate()
8334 if (np->flags & NIU_FLAGS_10G) in niu_pci_vpd_validate()
8335 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_vpd_validate()
8336 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { in niu_pci_vpd_validate()
8337 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | in niu_pci_vpd_validate()
8339 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { in niu_pci_vpd_validate()
8340 dev_err(np->device, "Illegal phy string [%s]\n", in niu_pci_vpd_validate()
8341 np->vpd.phy_type); in niu_pci_vpd_validate()
8342 dev_err(np->device, "Falling back to SPROM\n"); in niu_pci_vpd_validate()
8343 np->flags &= ~NIU_FLAGS_VPD_VALID; in niu_pci_vpd_validate()
8350 dev->dev_addr[5] += np->port; in niu_pci_vpd_validate()
8355 static int niu_pci_probe_sprom(struct niu *np) in niu_pci_probe_sprom() argument
8357 struct net_device *dev = np->dev; in niu_pci_probe_sprom()
8366 np->eeprom_len = len; in niu_pci_probe_sprom()
8368 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8379 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8382 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); in niu_pci_probe_sprom()
8387 switch (np->port) { in niu_pci_probe_sprom()
8405 dev_err(np->device, "Bogus port number %u\n", in niu_pci_probe_sprom()
8406 np->port); in niu_pci_probe_sprom()
8409 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8415 np->flags &= ~(NIU_FLAGS_FIBER | in niu_pci_probe_sprom()
8417 np->mac_xcvr = MAC_XCVR_MII; in niu_pci_probe_sprom()
8422 np->flags &= ~NIU_FLAGS_10G; in niu_pci_probe_sprom()
8423 np->flags |= NIU_FLAGS_FIBER; in niu_pci_probe_sprom()
8424 np->mac_xcvr = MAC_XCVR_PCS; in niu_pci_probe_sprom()
8429 np->flags |= NIU_FLAGS_10G; in niu_pci_probe_sprom()
8430 np->flags &= ~NIU_FLAGS_FIBER; in niu_pci_probe_sprom()
8431 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_probe_sprom()
8436 np->flags |= (NIU_FLAGS_10G | in niu_pci_probe_sprom()
8438 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_probe_sprom()
8442 dev_err(np->device, "Bogus SPROM phy type %u\n", val8); in niu_pci_probe_sprom()
8447 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8455 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8461 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", in niu_pci_probe_sprom()
8467 dev->dev_addr[5] += np->port; in niu_pci_probe_sprom()
8472 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8480 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; in niu_pci_probe_sprom()
8481 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; in niu_pci_probe_sprom()
8482 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; in niu_pci_probe_sprom()
8483 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; in niu_pci_probe_sprom()
8485 np->vpd.model[val] = '\0'; in niu_pci_probe_sprom()
8488 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8496 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; in niu_pci_probe_sprom()
8497 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; in niu_pci_probe_sprom()
8498 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; in niu_pci_probe_sprom()
8499 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; in niu_pci_probe_sprom()
8501 np->vpd.board_model[val] = '\0'; in niu_pci_probe_sprom()
8503 np->vpd.mac_num = in niu_pci_probe_sprom()
8505 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8506 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); in niu_pci_probe_sprom()
8511 static int niu_get_and_validate_port(struct niu *np) in niu_get_and_validate_port() argument
8513 struct niu_parent *parent = np->parent; in niu_get_and_validate_port()
8515 if (np->port <= 1) in niu_get_and_validate_port()
8516 np->flags |= NIU_FLAGS_XMAC; in niu_get_and_validate_port()
8522 parent->num_ports = niu_pci_vpd_get_nports(np); in niu_get_and_validate_port()
8539 if (np->port >= parent->num_ports) in niu_get_and_validate_port()
8747 static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, in fill_phy_probe_info() argument
8756 niu_lock_parent(np, flags); in fill_phy_probe_info()
8761 dev_id_1 = mdio_read(np, port, in fill_phy_probe_info()
8763 dev_id_2 = mdio_read(np, port, in fill_phy_probe_info()
8769 dev_id_1 = mdio_read(np, port, in fill_phy_probe_info()
8771 dev_id_2 = mdio_read(np, port, in fill_phy_probe_info()
8777 dev_id_1 = mii_read(np, port, MII_PHYSID1); in fill_phy_probe_info()
8778 dev_id_2 = mii_read(np, port, MII_PHYSID2); in fill_phy_probe_info()
8784 niu_unlock_parent(np, flags); in fill_phy_probe_info()
8789 static int walk_phys(struct niu *np, struct niu_parent *parent) in walk_phys() argument
8799 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || in walk_phys()
8800 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { in walk_phys()
8809 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { in walk_phys()
8815 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && in walk_phys()
8818 if (np->flags & NIU_FLAGS_10G) { in walk_phys()
8826 err = fill_phy_probe_info(np, parent, info); in walk_phys()
8856 val = phy_encode(PORT_TYPE_10G, np->port); in walk_phys()
8918 static int niu_probe_ports(struct niu *np) in niu_probe_ports() argument
8920 struct niu_parent *parent = np->parent; in niu_probe_ports()
8924 err = walk_phys(np, parent); in niu_probe_ports()
8928 niu_set_ldg_timer_res(np, 2); in niu_probe_ports()
8930 niu_ldn_irq_enable(np, i, 0); in niu_probe_ports()
8939 static int niu_classifier_swstate_init(struct niu *np) in niu_classifier_swstate_init() argument
8941 struct niu_classifier *cp = &np->clas; in niu_classifier_swstate_init()
8943 cp->tcam_top = (u16) np->port; in niu_classifier_swstate_init()
8944 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; in niu_classifier_swstate_init()
8948 return fflp_early_init(np); in niu_classifier_swstate_init()
8951 static void niu_link_config_init(struct niu *np) in niu_link_config_init() argument
8953 struct niu_link_config *lp = &np->link_config; in niu_link_config_init()
8976 static int niu_init_mac_ipp_pcs_base(struct niu *np) in niu_init_mac_ipp_pcs_base() argument
8978 switch (np->port) { in niu_init_mac_ipp_pcs_base()
8980 np->mac_regs = np->regs + XMAC_PORT0_OFF; in niu_init_mac_ipp_pcs_base()
8981 np->ipp_off = 0x00000; in niu_init_mac_ipp_pcs_base()
8982 np->pcs_off = 0x04000; in niu_init_mac_ipp_pcs_base()
8983 np->xpcs_off = 0x02000; in niu_init_mac_ipp_pcs_base()
8987 np->mac_regs = np->regs + XMAC_PORT1_OFF; in niu_init_mac_ipp_pcs_base()
8988 np->ipp_off = 0x08000; in niu_init_mac_ipp_pcs_base()
8989 np->pcs_off = 0x0a000; in niu_init_mac_ipp_pcs_base()
8990 np->xpcs_off = 0x08000; in niu_init_mac_ipp_pcs_base()
8994 np->mac_regs = np->regs + BMAC_PORT2_OFF; in niu_init_mac_ipp_pcs_base()
8995 np->ipp_off = 0x04000; in niu_init_mac_ipp_pcs_base()
8996 np->pcs_off = 0x0e000; in niu_init_mac_ipp_pcs_base()
8997 np->xpcs_off = ~0UL; in niu_init_mac_ipp_pcs_base()
9001 np->mac_regs = np->regs + BMAC_PORT3_OFF; in niu_init_mac_ipp_pcs_base()
9002 np->ipp_off = 0x0c000; in niu_init_mac_ipp_pcs_base()
9003 np->pcs_off = 0x12000; in niu_init_mac_ipp_pcs_base()
9004 np->xpcs_off = ~0UL; in niu_init_mac_ipp_pcs_base()
9008 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); in niu_init_mac_ipp_pcs_base()
9015 static void niu_try_msix(struct niu *np, u8 *ldg_num_map) in niu_try_msix() argument
9018 struct niu_parent *parent = np->parent; in niu_try_msix()
9019 struct pci_dev *pdev = np->pdev; in niu_try_msix()
9023 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; in niu_try_msix()
9027 num_irqs = (parent->rxchan_per_port[np->port] + in niu_try_msix()
9028 parent->txchan_per_port[np->port] + in niu_try_msix()
9029 (np->port == 0 ? 3 : 1)); in niu_try_msix()
9039 np->flags &= ~NIU_FLAGS_MSIX; in niu_try_msix()
9043 np->flags |= NIU_FLAGS_MSIX; in niu_try_msix()
9045 np->ldg[i].irq = msi_vec[i].vector; in niu_try_msix()
9046 np->num_ldg = num_irqs; in niu_try_msix()
9049 static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) in niu_n2_irq_init() argument
9052 struct platform_device *op = np->op; in niu_n2_irq_init()
9062 np->ldg[i].irq = op->archdata.irqs[i]; in niu_n2_irq_init()
9065 np->num_ldg = op->archdata.num_irqs; in niu_n2_irq_init()
9073 static int niu_ldg_init(struct niu *np) in niu_ldg_init() argument
9075 struct niu_parent *parent = np->parent; in niu_ldg_init()
9081 np->num_ldg = 1; in niu_ldg_init()
9082 np->ldg[0].irq = np->dev->irq; in niu_ldg_init()
9084 err = niu_n2_irq_init(np, ldg_num_map); in niu_ldg_init()
9088 niu_try_msix(np, ldg_num_map); in niu_ldg_init()
9090 port = np->port; in niu_ldg_init()
9091 for (i = 0; i < np->num_ldg; i++) { in niu_ldg_init()
9092 struct niu_ldg *lp = &np->ldg[i]; in niu_ldg_init()
9094 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); in niu_ldg_init()
9096 lp->np = np; in niu_ldg_init()
9104 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_ldg_init()
9105 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); in niu_ldg_init()
9124 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], in niu_ldg_init()
9130 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9134 err = niu_ldg_assign_ldn(np, parent, in niu_ldg_init()
9141 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9144 err = niu_ldg_assign_ldn(np, parent, in niu_ldg_init()
9151 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9162 err = niu_ldg_assign_ldn(np, parent, in niu_ldg_init()
9168 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9177 err = niu_ldg_assign_ldn(np, parent, in niu_ldg_init()
9183 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9190 static void niu_ldg_free(struct niu *np) in niu_ldg_free() argument
9192 if (np->flags & NIU_FLAGS_MSIX) in niu_ldg_free()
9193 pci_disable_msix(np->pdev); in niu_ldg_free()
9196 static int niu_get_of_props(struct niu *np) in niu_get_of_props() argument
9199 struct net_device *dev = np->dev; in niu_get_of_props()
9206 if (np->parent->plat_type == PLAT_TYPE_NIU) in niu_get_of_props()
9207 dp = np->op->dev.of_node; in niu_get_of_props()
9209 dp = pci_device_to_OF_node(np->pdev); in niu_get_of_props()
9220 strcpy(np->vpd.phy_type, phy_type); in niu_get_of_props()
9222 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { in niu_get_of_props()
9224 dp, np->vpd.phy_type); in niu_get_of_props()
9248 strcpy(np->vpd.model, model); in niu_get_of_props()
9251 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | in niu_get_of_props()
9261 static int niu_get_invariants(struct niu *np) in niu_get_invariants() argument
9266 err = niu_get_of_props(np); in niu_get_invariants()
9272 err = niu_init_mac_ipp_pcs_base(np); in niu_get_invariants()
9277 err = niu_get_and_validate_port(np); in niu_get_invariants()
9282 if (np->parent->plat_type == PLAT_TYPE_NIU) in niu_get_invariants()
9286 offset = niu_pci_vpd_offset(np); in niu_get_invariants()
9287 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_get_invariants()
9290 err = niu_pci_vpd_fetch(np, offset); in niu_get_invariants()
9296 if (np->flags & NIU_FLAGS_VPD_VALID) { in niu_get_invariants()
9297 niu_pci_vpd_validate(np); in niu_get_invariants()
9298 err = niu_get_and_validate_port(np); in niu_get_invariants()
9303 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { in niu_get_invariants()
9304 err = niu_get_and_validate_port(np); in niu_get_invariants()
9307 err = niu_pci_probe_sprom(np); in niu_get_invariants()
9313 err = niu_probe_ports(np); in niu_get_invariants()
9317 niu_ldg_init(np); in niu_get_invariants()
9319 niu_classifier_swstate_init(np); in niu_get_invariants()
9320 niu_link_config_init(np); in niu_get_invariants()
9322 err = niu_determine_phy_disposition(np); in niu_get_invariants()
9324 err = niu_init_link(np); in niu_get_invariants()
9443 static struct niu_parent *niu_new_parent(struct niu *np, in niu_new_parent() argument
9507 static struct niu_parent *niu_get_parent(struct niu *np, in niu_get_parent() argument
9511 int port = np->port; in niu_get_parent()
9522 p = niu_new_parent(np, id, ptype); in niu_get_parent()
9530 &np->device->kobj, in niu_get_parent()
9533 p->ports[port] = np; in niu_get_parent()
9542 static void niu_put_parent(struct niu *np) in niu_put_parent() argument
9544 struct niu_parent *p = np->parent; in niu_put_parent()
9545 u8 port = np->port; in niu_put_parent()
9548 BUG_ON(!p || p->ports[port] != np); in niu_put_parent()
9550 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_put_parent()
9560 np->parent = NULL; in niu_put_parent()
9638 struct niu *np; in niu_alloc_and_init() local
9646 np = netdev_priv(dev); in niu_alloc_and_init()
9647 np->dev = dev; in niu_alloc_and_init()
9648 np->pdev = pdev; in niu_alloc_and_init()
9649 np->op = op; in niu_alloc_and_init()
9650 np->device = gen_dev; in niu_alloc_and_init()
9651 np->ops = ops; in niu_alloc_and_init()
9653 np->msg_enable = niu_debug; in niu_alloc_and_init()
9655 spin_lock_init(&np->lock); in niu_alloc_and_init()
9656 INIT_WORK(&np->reset_task, niu_reset_task); in niu_alloc_and_init()
9658 np->port = port; in niu_alloc_and_init()
9683 static void niu_device_announce(struct niu *np) in niu_device_announce() argument
9685 struct net_device *dev = np->dev; in niu_device_announce()
9689 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { in niu_device_announce()
9692 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), in niu_device_announce()
9693 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), in niu_device_announce()
9694 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), in niu_device_announce()
9695 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : in niu_device_announce()
9696 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), in niu_device_announce()
9697 np->vpd.phy_type); in niu_device_announce()
9701 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), in niu_device_announce()
9702 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), in niu_device_announce()
9703 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : in niu_device_announce()
9704 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : in niu_device_announce()
9706 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : in niu_device_announce()
9707 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), in niu_device_announce()
9708 np->vpd.phy_type); in niu_device_announce()
9723 struct niu *np; in niu_pci_init_one() local
9760 np = netdev_priv(dev); in niu_pci_init_one()
9767 np->parent = niu_get_parent(np, &parent_id, in niu_pci_init_one()
9769 if (!np->parent) { in niu_pci_init_one()
9802 np->regs = pci_ioremap_bar(pdev, 0); in niu_pci_init_one()
9803 if (!np->regs) { in niu_pci_init_one()
9820 err = niu_get_invariants(np); in niu_pci_init_one()
9835 niu_device_announce(np); in niu_pci_init_one()
9840 if (np->regs) { in niu_pci_init_one()
9841 iounmap(np->regs); in niu_pci_init_one()
9842 np->regs = NULL; in niu_pci_init_one()
9846 niu_put_parent(np); in niu_pci_init_one()
9865 struct niu *np = netdev_priv(dev); in niu_pci_remove_one() local
9868 if (np->regs) { in niu_pci_remove_one()
9869 iounmap(np->regs); in niu_pci_remove_one()
9870 np->regs = NULL; in niu_pci_remove_one()
9873 niu_ldg_free(np); in niu_pci_remove_one()
9875 niu_put_parent(np); in niu_pci_remove_one()
9886 struct niu *np = netdev_priv(dev); in niu_suspend() local
9892 flush_work(&np->reset_task); in niu_suspend()
9893 niu_netif_stop(np); in niu_suspend()
9895 del_timer_sync(&np->timer); in niu_suspend()
9897 spin_lock_irqsave(&np->lock, flags); in niu_suspend()
9898 niu_enable_interrupts(np, 0); in niu_suspend()
9899 spin_unlock_irqrestore(&np->lock, flags); in niu_suspend()
9903 spin_lock_irqsave(&np->lock, flags); in niu_suspend()
9904 niu_stop_hw(np); in niu_suspend()
9905 spin_unlock_irqrestore(&np->lock, flags); in niu_suspend()
9913 struct niu *np = netdev_priv(dev); in niu_resume() local
9922 spin_lock_irqsave(&np->lock, flags); in niu_resume()
9924 err = niu_init_hw(np); in niu_resume()
9926 np->timer.expires = jiffies + HZ; in niu_resume()
9927 add_timer(&np->timer); in niu_resume()
9928 niu_netif_start(np); in niu_resume()
9931 spin_unlock_irqrestore(&np->lock, flags); in niu_resume()
10009 struct niu *np; in niu_of_probe() local
10028 np = netdev_priv(dev); in niu_of_probe()
10033 np->parent = niu_get_parent(np, &parent_id, in niu_of_probe()
10035 if (!np->parent) { in niu_of_probe()
10042 np->regs = of_ioremap(&op->resource[1], 0, in niu_of_probe()
10045 if (!np->regs) { in niu_of_probe()
10051 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, in niu_of_probe()
10054 if (!np->vir_regs_1) { in niu_of_probe()
10060 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, in niu_of_probe()
10063 if (!np->vir_regs_2) { in niu_of_probe()
10071 err = niu_get_invariants(np); in niu_of_probe()
10086 niu_device_announce(np); in niu_of_probe()
10091 if (np->vir_regs_1) { in niu_of_probe()
10092 of_iounmap(&op->resource[2], np->vir_regs_1, in niu_of_probe()
10094 np->vir_regs_1 = NULL; in niu_of_probe()
10097 if (np->vir_regs_2) { in niu_of_probe()
10098 of_iounmap(&op->resource[3], np->vir_regs_2, in niu_of_probe()
10100 np->vir_regs_2 = NULL; in niu_of_probe()
10103 if (np->regs) { in niu_of_probe()
10104 of_iounmap(&op->resource[1], np->regs, in niu_of_probe()
10106 np->regs = NULL; in niu_of_probe()
10110 niu_put_parent(np); in niu_of_probe()
10124 struct niu *np = netdev_priv(dev); in niu_of_remove() local
10128 if (np->vir_regs_1) { in niu_of_remove()
10129 of_iounmap(&op->resource[2], np->vir_regs_1, in niu_of_remove()
10131 np->vir_regs_1 = NULL; in niu_of_remove()
10134 if (np->vir_regs_2) { in niu_of_remove()
10135 of_iounmap(&op->resource[3], np->vir_regs_2, in niu_of_remove()
10137 np->vir_regs_2 = NULL; in niu_of_remove()
10140 if (np->regs) { in niu_of_remove()
10141 of_iounmap(&op->resource[1], np->regs, in niu_of_remove()
10143 np->regs = NULL; in niu_of_remove()
10146 niu_ldg_free(np); in niu_of_remove()
10148 niu_put_parent(np); in niu_of_remove()