• Home
  • Raw
  • Download

Lines Matching +full:mii +full:- +full:rt

1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/dma-mapping.h>
20 #include <linux/mii.h>
55 #define niu_next_page(p) container_of(p, union niu_page, page)->next
91 #define nr64(reg) readq(np->regs + (reg))
92 #define nw64(reg, val) writeq((val), np->regs + (reg))
94 #define nr64_mac(reg) readq(np->mac_regs + (reg))
95 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
97 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
98 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
100 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
101 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
103 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
104 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
109 static int debug = -1;
114 spin_lock_irqsave(&np->parent->lock, flags)
116 spin_unlock_irqrestore(&np->parent->lock, flags)
123 while (--limit >= 0) { in __niu_wait_bits_clear_mac()
131 return -ENODEV; in __niu_wait_bits_clear_mac()
144 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear_mac()
158 while (--limit >= 0) { in __niu_wait_bits_clear_ipp()
166 return -ENODEV; in __niu_wait_bits_clear_ipp()
183 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear_ipp()
197 while (--limit >= 0) { in __niu_wait_bits_clear()
205 return -ENODEV; in __niu_wait_bits_clear()
223 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear()
236 u64 val = (u64) lp->timer; in niu_ldg_rearm()
241 nw64(LDG_IMGMT(lp->ldg_num), val); in niu_ldg_rearm()
250 return -EINVAL; in niu_ldn_irq_enable()
256 mask_reg = LD_IM1(ldn - 64); in niu_ldn_irq_enable()
272 struct niu_parent *parent = np->parent; in niu_enable_ldn_in_ldg()
278 if (parent->ldg_map[i] != lp->ldg_num) in niu_enable_ldn_in_ldg()
292 for (i = 0; i < np->num_ldg; i++) { in niu_enable_interrupts()
293 struct niu_ldg *lp = &np->ldg[i]; in niu_enable_interrupts()
300 for (i = 0; i < np->num_ldg; i++) in niu_enable_interrupts()
301 niu_ldg_rearm(np, &np->ldg[i], on); in niu_enable_interrupts()
321 while (--limit > 0) { in mdio_wait()
329 return -ENODEV; in mdio_wait()
384 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_tx_cfg()
388 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_tx_cfg()
398 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_rx_cfg()
402 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_rx_cfg()
411 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_10g_fiber()
420 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_niu_10g_fiber()
423 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_fiber()
448 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_1g_serdes()
462 if (np->port == 0) in serdes_init_niu_1g_serdes()
465 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_niu_1g_serdes()
468 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
478 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
481 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", in serdes_init_niu_1g_serdes()
482 np->port, __func__); in serdes_init_niu_1g_serdes()
488 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
491 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", in serdes_init_niu_1g_serdes()
492 np->port, __func__); in serdes_init_niu_1g_serdes()
511 switch (np->port) { in serdes_init_niu_1g_serdes()
523 return -EINVAL; in serdes_init_niu_1g_serdes()
526 while (max_retry--) { in serdes_init_niu_1g_serdes()
535 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_niu_1g_serdes()
536 np->port, (int)(sig & mask), (int)val); in serdes_init_niu_1g_serdes()
537 return -ENODEV; in serdes_init_niu_1g_serdes()
545 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_10g_serdes()
557 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_niu_10g_serdes()
560 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
570 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
573 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", in serdes_init_niu_10g_serdes()
574 np->port, __func__); in serdes_init_niu_10g_serdes()
580 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
583 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", in serdes_init_niu_10g_serdes()
584 np->port, __func__); in serdes_init_niu_10g_serdes()
605 switch (np->port) { in serdes_init_niu_10g_serdes()
629 return -EINVAL; in serdes_init_niu_10g_serdes()
632 while (max_retry--) { in serdes_init_niu_10g_serdes()
642 np->port, (int)(sig & mask), (int)val); in serdes_init_niu_10g_serdes()
647 np->flags &= ~NIU_FLAGS_10G; in serdes_init_niu_10g_serdes()
648 np->mac_xcvr = MAC_XCVR_PCS; in serdes_init_niu_10g_serdes()
650 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", in serdes_init_niu_10g_serdes()
651 np->port); in serdes_init_niu_10g_serdes()
652 return -ENODEV; in serdes_init_niu_10g_serdes()
662 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); in esr_read_rxtx_ctrl()
665 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_rxtx_ctrl()
678 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_glue0()
682 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_glue0()
696 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_reset()
700 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_reset()
714 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_rxtx_ctrl()
717 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_rxtx_ctrl()
726 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_glue0()
729 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_glue0()
739 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
743 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
749 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
755 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
765 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", in esr_reset()
766 np->port, reset); in esr_reset()
767 return -ENODEV; in esr_reset()
775 struct niu_link_config *lp = &np->link_config; in serdes_init_10g()
780 switch (np->port) { in serdes_init_10g()
791 return -EINVAL; in serdes_init_10g()
807 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_10g()
858 switch (np->port) { in serdes_init_10g()
882 return -EINVAL; in serdes_init_10g()
886 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in serdes_init_10g()
887 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in serdes_init_10g()
890 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_10g()
891 np->port, (int)(sig & mask), (int)val); in serdes_init_10g()
892 return -ENODEV; in serdes_init_10g()
894 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) in serdes_init_10g()
895 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; in serdes_init_10g()
905 switch (np->port) { in serdes_init_1g()
919 return -EINVAL; in serdes_init_1g()
928 struct niu_link_config *lp = &np->link_config; in serdes_init_1g_serdes()
937 switch (np->port) { in serdes_init_1g_serdes()
952 return -EINVAL; in serdes_init_1g_serdes()
968 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_1g_serdes()
1023 switch (np->port) { in serdes_init_1g_serdes()
1035 return -EINVAL; in serdes_init_1g_serdes()
1039 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_1g_serdes()
1040 np->port, (int)(sig & mask), (int)val); in serdes_init_1g_serdes()
1041 return -ENODEV; in serdes_init_1g_serdes()
1049 struct niu_link_config *lp = &np->link_config; in link_status_1g_serdes()
1060 spin_lock_irqsave(&np->lock, flags); in link_status_1g_serdes()
1070 lp->active_speed = current_speed; in link_status_1g_serdes()
1071 lp->active_duplex = current_duplex; in link_status_1g_serdes()
1072 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g_serdes()
1081 struct niu_link_config *lp = &np->link_config; in link_status_10g_serdes()
1088 if (!(np->flags & NIU_FLAGS_10G)) in link_status_10g_serdes()
1093 spin_lock_irqsave(&np->lock, flags); in link_status_10g_serdes()
1105 lp->active_speed = current_speed; in link_status_10g_serdes()
1106 lp->active_duplex = current_duplex; in link_status_10g_serdes()
1107 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g_serdes()
1114 struct niu_link_config *lp = &np->link_config; in link_status_mii()
1119 err = mii_read(np, np->phy_addr, MII_BMCR); in link_status_mii()
1124 err = mii_read(np, np->phy_addr, MII_BMSR); in link_status_mii()
1129 err = mii_read(np, np->phy_addr, MII_ADVERTISE); in link_status_mii()
1134 err = mii_read(np, np->phy_addr, MII_LPA); in link_status_mii()
1140 err = mii_read(np, np->phy_addr, MII_ESTATUS); in link_status_mii()
1145 err = mii_read(np, np->phy_addr, MII_CTRL1000); in link_status_mii()
1150 err = mii_read(np, np->phy_addr, MII_STAT1000); in link_status_mii()
1172 lp->supported = supported; in link_status_mii()
1180 lp->active_autoneg = 1; in link_status_mii()
1202 lp->active_autoneg = 0; in link_status_mii()
1217 lp->active_advertising = advertising; in link_status_mii()
1218 lp->active_speed = active_speed; in link_status_mii()
1219 lp->active_duplex = active_duplex; in link_status_mii()
1227 struct niu_link_config *lp = &np->link_config; in link_status_1g_rgmii()
1237 spin_lock_irqsave(&np->lock, flags); in link_status_1g_rgmii()
1239 err = mii_read(np, np->phy_addr, MII_BMSR); in link_status_1g_rgmii()
1249 lp->active_speed = current_speed; in link_status_1g_rgmii()
1250 lp->active_duplex = current_duplex; in link_status_1g_rgmii()
1254 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g_rgmii()
1262 struct niu_link_config *lp = &np->link_config; in link_status_1g()
1266 spin_lock_irqsave(&np->lock, flags); in link_status_1g()
1269 lp->supported |= SUPPORTED_TP; in link_status_1g()
1270 lp->active_advertising |= ADVERTISED_TP; in link_status_1g()
1272 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g()
1280 err = mdio_read(np, np->phy_addr, in bcm8704_reset()
1285 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in bcm8704_reset()
1291 while (--limit >= 0) { in bcm8704_reset()
1292 err = mdio_read(np, np->phy_addr, in bcm8704_reset()
1300 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", in bcm8704_reset()
1301 np->port, (err & 0xffff)); in bcm8704_reset()
1302 return -ENODEV; in bcm8704_reset()
1312 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); in bcm8704_user_dev3_readback()
1315 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); in bcm8704_user_dev3_readback()
1326 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8706_init_user_dev3()
1333 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8706_init_user_dev3()
1347 err = mdio_write(np, np->phy_addr, in bcm8704_init_user_dev3()
1361 err = mdio_write(np, np->phy_addr, in bcm8704_init_user_dev3()
1377 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8704_init_user_dev3()
1383 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8704_init_user_dev3()
1397 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_act_led()
1405 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_act_led()
1413 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_led_blink_rate()
1419 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_led_blink_rate()
1440 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in xcvr_init_10g_mrvl88x2011()
1447 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in xcvr_init_10g_mrvl88x2011()
1452 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1457 if (np->link_config.loopback_mode == LOOPBACK_MAC) in xcvr_init_10g_mrvl88x2011()
1462 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1468 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1479 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in xcvr_diag_bcm870x()
1483 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1485 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); in xcvr_diag_bcm870x()
1488 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1490 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in xcvr_diag_bcm870x()
1494 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1498 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1502 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1508 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1512 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1521 np->port); in xcvr_diag_bcm870x()
1524 np->port); in xcvr_diag_bcm870x()
1533 struct niu_link_config *lp = &np->link_config; in xcvr_10g_set_lb_bcm870x()
1536 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in xcvr_10g_set_lb_bcm870x()
1543 if (lp->loopback_mode == LOOPBACK_MAC) in xcvr_10g_set_lb_bcm870x()
1546 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in xcvr_10g_set_lb_bcm870x()
1559 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && in xcvr_init_10g_bcm8706()
1560 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) in xcvr_init_10g_bcm8706()
1629 phy_id = phy_decode(np->parent->port_phy, np->port); in xcvr_init_10g()
1630 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; in xcvr_init_10g()
1650 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); in mii_reset()
1655 while (--limit >= 0) { in mii_reset()
1657 err = mii_read(np, np->phy_addr, MII_BMCR); in mii_reset()
1664 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", in mii_reset()
1665 np->port, err); in mii_reset()
1666 return -ENODEV; in mii_reset()
1686 err = mii_read(np, np->phy_addr, MII_BMSR); in xcvr_init_1g_rgmii()
1693 err = mii_read(np, np->phy_addr, MII_ESTATUS); in xcvr_init_1g_rgmii()
1700 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in xcvr_init_1g_rgmii()
1709 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); in xcvr_init_1g_rgmii()
1716 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in xcvr_init_1g_rgmii()
1720 err = mii_read(np, np->phy_addr, MII_BMCR); in xcvr_init_1g_rgmii()
1723 bmcr = mii_read(np, np->phy_addr, MII_BMCR); in xcvr_init_1g_rgmii()
1725 err = mii_read(np, np->phy_addr, MII_BMSR); in xcvr_init_1g_rgmii()
1734 struct niu_link_config *lp = &np->link_config; in mii_init_common()
1742 err = mii_read(np, np->phy_addr, MII_BMSR); in mii_init_common()
1749 err = mii_read(np, np->phy_addr, MII_ESTATUS); in mii_init_common()
1756 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in mii_init_common()
1760 if (lp->loopback_mode == LOOPBACK_MAC) { in mii_init_common()
1762 if (lp->active_speed == SPEED_1000) in mii_init_common()
1764 if (lp->active_duplex == DUPLEX_FULL) in mii_init_common()
1768 if (lp->loopback_mode == LOOPBACK_PHY) { in mii_init_common()
1773 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); in mii_init_common()
1778 if (lp->autoneg) { in mii_init_common()
1783 (lp->advertising & ADVERTISED_10baseT_Half)) in mii_init_common()
1786 (lp->advertising & ADVERTISED_10baseT_Full)) in mii_init_common()
1789 (lp->advertising & ADVERTISED_100baseT_Half)) in mii_init_common()
1792 (lp->advertising & ADVERTISED_100baseT_Full)) in mii_init_common()
1794 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); in mii_init_common()
1801 (lp->advertising & ADVERTISED_1000baseT_Half)) in mii_init_common()
1804 (lp->advertising & ADVERTISED_1000baseT_Full)) in mii_init_common()
1806 err = mii_write(np, np->phy_addr, in mii_init_common()
1814 /* !lp->autoneg */ in mii_init_common()
1817 if (lp->duplex == DUPLEX_FULL) { in mii_init_common()
1820 } else if (lp->duplex == DUPLEX_HALF) in mii_init_common()
1823 return -EINVAL; in mii_init_common()
1825 if (lp->speed == SPEED_1000) { in mii_init_common()
1826 /* if X-full requested while not supported, or in mii_init_common()
1827 X-half requested while not supported... */ in mii_init_common()
1830 return -EINVAL; in mii_init_common()
1832 } else if (lp->speed == SPEED_100) { in mii_init_common()
1835 return -EINVAL; in mii_init_common()
1837 } else if (lp->speed == SPEED_10) { in mii_init_common()
1840 return -EINVAL; in mii_init_common()
1842 return -EINVAL; in mii_init_common()
1845 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in mii_init_common()
1850 err = mii_read(np, np->phy_addr, MII_BMCR); in mii_init_common()
1855 err = mii_read(np, np->phy_addr, MII_BMSR); in mii_init_common()
1860 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", in mii_init_common()
1861 np->port, bmcr, bmsr); in mii_init_common()
1881 const struct niu_phy_ops *ops = np->phy_ops; in niu_xcvr_init()
1885 if (ops->xcvr_init) in niu_xcvr_init()
1886 err = ops->xcvr_init(np); in niu_xcvr_init()
1893 const struct niu_phy_ops *ops = np->phy_ops; in niu_serdes_init()
1897 if (ops->serdes_init) in niu_serdes_init()
1898 err = ops->serdes_init(np); in niu_serdes_init()
1908 struct niu_link_config *lp = &np->link_config; in niu_link_status_common()
1909 struct net_device *dev = np->dev; in niu_link_status_common()
1914 lp->active_speed == SPEED_10000 ? "10Gb/sec" : in niu_link_status_common()
1915 lp->active_speed == SPEED_1000 ? "1Gb/sec" : in niu_link_status_common()
1916 lp->active_speed == SPEED_100 ? "100Mbit/sec" : in niu_link_status_common()
1918 lp->active_duplex == DUPLEX_FULL ? "full" : "half"); in niu_link_status_common()
1920 spin_lock_irqsave(&np->lock, flags); in niu_link_status_common()
1923 spin_unlock_irqrestore(&np->lock, flags); in niu_link_status_common()
1928 spin_lock_irqsave(&np->lock, flags); in niu_link_status_common()
1930 spin_unlock_irqrestore(&np->lock, flags); in niu_link_status_common()
1943 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in link_status_10g_mrvl()
1949 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in link_status_10g_mrvl()
1957 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in link_status_10g_mrvl()
1962 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in link_status_10g_mrvl()
1969 /* Check XGXS Register : 4.0018.[0-3,12] */ in link_status_10g_mrvl()
1970 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, in link_status_10g_mrvl()
1981 np->link_config.active_speed = SPEED_10000; in link_status_10g_mrvl()
1982 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_mrvl()
1998 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in link_status_10g_bcm8706()
2007 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in link_status_10g_bcm8706()
2017 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in link_status_10g_bcm8706()
2029 np->link_config.active_speed = SPEED_INVALID; in link_status_10g_bcm8706()
2030 np->link_config.active_duplex = DUPLEX_INVALID; in link_status_10g_bcm8706()
2035 np->link_config.active_speed = SPEED_10000; in link_status_10g_bcm8706()
2036 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_bcm8706()
2050 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in link_status_10g_bcom()
2059 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in link_status_10g_bcom()
2068 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in link_status_10g_bcom()
2084 np->link_config.active_speed = SPEED_10000; in link_status_10g_bcom()
2085 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_bcom()
2096 int err = -EINVAL; in link_status_10g()
2098 spin_lock_irqsave(&np->lock, flags); in link_status_10g()
2100 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { in link_status_10g()
2103 phy_id = phy_decode(np->parent->port_phy, np->port); in link_status_10g()
2104 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; in link_status_10g()
2118 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g()
2128 switch (np->port) { in niu_10g_phy_present()
2167 spin_lock_irqsave(&np->lock, flags); in link_status_10g_hotplug()
2169 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { in link_status_10g_hotplug()
2170 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? in link_status_10g_hotplug()
2177 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2178 if (np->phy_ops->xcvr_init) in link_status_10g_hotplug()
2179 err = np->phy_ops->xcvr_init(np); in link_status_10g_hotplug()
2181 err = mdio_read(np, np->phy_addr, in link_status_10g_hotplug()
2184 /* No mdio, back-to-back XAUI */ in link_status_10g_hotplug()
2188 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2191 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2193 netif_warn(np, link, np->dev, in link_status_10g_hotplug()
2198 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { in link_status_10g_hotplug()
2201 /* No mdio, back-to-back XAUI: it is C10NEM */ in link_status_10g_hotplug()
2203 np->link_config.active_speed = SPEED_10000; in link_status_10g_hotplug()
2204 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_hotplug()
2209 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g_hotplug()
2216 const struct niu_phy_ops *ops = np->phy_ops; in niu_link_status()
2220 if (ops->link_status) in niu_link_status()
2221 err = ops->link_status(np, link_up_p); in niu_link_status()
2236 if (netif_carrier_ok(np->dev)) in niu_timer()
2240 np->timer.expires = jiffies + off; in niu_timer()
2242 add_timer(&np->timer); in niu_timer()
2371 struct niu_link_config *lp = &np->link_config; in serdes_init_10g_serdes()
2375 switch (np->port) { in serdes_init_10g_serdes()
2388 return -EINVAL; in serdes_init_10g_serdes()
2404 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_10g_serdes()
2455 switch (np->port) { in serdes_init_10g_serdes()
2479 return -EINVAL; in serdes_init_10g_serdes()
2486 np->flags &= ~NIU_FLAGS_10G; in serdes_init_10g_serdes()
2487 np->mac_xcvr = MAC_XCVR_PCS; in serdes_init_10g_serdes()
2489 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", in serdes_init_10g_serdes()
2490 np->port); in serdes_init_10g_serdes()
2491 return -ENODEV; in serdes_init_10g_serdes()
2500 struct niu_parent *parent = np->parent; in niu_determine_phy_disposition()
2501 u8 plat_type = parent->plat_type; in niu_determine_phy_disposition()
2506 switch (np->flags & in niu_determine_phy_disposition()
2521 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in niu_determine_phy_disposition()
2523 if (np->port == 0) in niu_determine_phy_disposition()
2525 if (np->port == 1) in niu_determine_phy_disposition()
2529 phy_addr_off += np->port; in niu_determine_phy_disposition()
2534 switch (np->flags & in niu_determine_phy_disposition()
2546 phy_addr_off += (np->port ^ 0x3); in niu_determine_phy_disposition()
2565 phy_addr_off += np->port; in niu_determine_phy_disposition()
2566 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in niu_determine_phy_disposition()
2568 if (np->port == 0) in niu_determine_phy_disposition()
2570 if (np->port == 1) in niu_determine_phy_disposition()
2578 switch(np->port) { in niu_determine_phy_disposition()
2588 return -EINVAL; in niu_determine_phy_disposition()
2590 phy_addr_off = niu_atca_port_num[np->port]; in niu_determine_phy_disposition()
2594 return -EINVAL; in niu_determine_phy_disposition()
2598 np->phy_ops = tp->ops; in niu_determine_phy_disposition()
2599 np->phy_addr = tp->phy_addr_base + phy_addr_off; in niu_determine_phy_disposition()
2606 struct niu_parent *parent = np->parent; in niu_init_link()
2609 if (parent->plat_type == PLAT_TYPE_NIU) { in niu_init_link()
2616 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) in niu_init_link()
2620 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) in niu_init_link()
2631 if (np->flags & NIU_FLAGS_XMAC) { in niu_set_primary_mac()
2644 if (np->flags & NIU_FLAGS_XMAC) in niu_num_alt_addr()
2657 return -EINVAL; in niu_set_alt_mac()
2659 if (np->flags & NIU_FLAGS_XMAC) { in niu_set_alt_mac()
2678 return -EINVAL; in niu_enable_alt_mac()
2680 if (np->flags & NIU_FLAGS_XMAC) { in niu_enable_alt_mac()
2716 return -EINVAL; in __set_rdc_table_num()
2717 if (np->flags & NIU_FLAGS_XMAC) in __set_rdc_table_num()
2741 return -EINVAL; in niu_set_alt_mac_rdc_table()
2796 while (--limit > 0) { in tcam_wait_bit()
2802 return -ENODEV; in tcam_wait_bit()
2911 return -EINVAL; in tcam_user_eth_class_enable()
2913 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); in tcam_user_eth_class_enable()
2934 return -EINVAL;
2936 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2954 return -EINVAL; in tcam_user_ip_class_enable()
2956 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); in tcam_user_ip_class_enable()
2979 return -EINVAL; in tcam_user_ip_class_set()
2981 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); in tcam_user_ip_class_set()
3022 for (i = 0; i < np->parent->tcam_num_entries; i++) { in tcam_flush_all()
3045 return -EINVAL;
3064 return -EINVAL; in hash_write()
3113 return -EINVAL; in fflp_set_partition()
3187 parent = np->parent; in fflp_early_init()
3189 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { in fflp_early_init()
3190 if (np->parent->plat_type != PLAT_TYPE_NIU) { in fflp_early_init()
3195 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3204 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3215 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3219 if (np->parent->plat_type != PLAT_TYPE_NIU) { in fflp_early_init()
3222 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3231 parent->flags |= PARENT_FLGS_CLS_HWINIT; in fflp_early_init()
3242 return -EINVAL; in niu_set_flow_key()
3244 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); in niu_set_flow_key()
3252 return -EINVAL; in niu_set_tcam_key()
3254 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); in niu_set_tcam_key()
3262 if (idx >= (np->clas.tcam_sz - 1)) in tcam_get_index()
3264 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); in tcam_get_index()
3270 return np->clas.tcam_sz - 1; in tcam_get_size()
3276 return np->clas.tcam_valid_entries - 1; in tcam_get_valid_entry_cnt()
3282 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); in niu_rx_skb_append()
3284 skb->len += size; in niu_rx_skb_append()
3285 skb->data_len += size; in niu_rx_skb_append()
3286 skb->truesize += truesize; in niu_rx_skb_append()
3294 return a & (MAX_RBR_RING_SIZE - 1); in niu_hash_rxaddr()
3304 pp = &rp->rxhash[h]; in niu_find_rxpage()
3306 if (p->index == addr) { in niu_find_rxpage()
3321 page->index = base; in niu_hash_page()
3322 niu_next_page(page) = rp->rxhash[h]; in niu_hash_page()
3323 rp->rxhash[h] = page; in niu_hash_page()
3335 return -ENOMEM; in niu_rbr_add_page()
3337 addr = np->ops->map_page(np->device, page, 0, in niu_rbr_add_page()
3339 if (np->ops->mapping_error(np->device, addr)) { in niu_rbr_add_page()
3341 return -ENOMEM; in niu_rbr_add_page()
3345 if (rp->rbr_blocks_per_page > 1) in niu_rbr_add_page()
3346 page_ref_add(page, rp->rbr_blocks_per_page - 1); in niu_rbr_add_page()
3348 for (i = 0; i < rp->rbr_blocks_per_page; i++) { in niu_rbr_add_page()
3349 __le32 *rbr = &rp->rbr[start_index + i]; in niu_rbr_add_page()
3352 addr += rp->rbr_block_size; in niu_rbr_add_page()
3360 int index = rp->rbr_index; in niu_rbr_refill()
3362 rp->rbr_pending++; in niu_rbr_refill()
3363 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { in niu_rbr_refill()
3367 rp->rbr_pending--; in niu_rbr_refill()
3371 rp->rbr_index += rp->rbr_blocks_per_page; in niu_rbr_refill()
3372 BUG_ON(rp->rbr_index > rp->rbr_table_size); in niu_rbr_refill()
3373 if (rp->rbr_index == rp->rbr_table_size) in niu_rbr_refill()
3374 rp->rbr_index = 0; in niu_rbr_refill()
3376 if (rp->rbr_pending >= rp->rbr_kick_thresh) { in niu_rbr_refill()
3377 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); in niu_rbr_refill()
3378 rp->rbr_pending = 0; in niu_rbr_refill()
3385 unsigned int index = rp->rcr_index; in niu_rx_pkt_ignore()
3388 rp->rx_dropped++; in niu_rx_pkt_ignore()
3396 val = le64_to_cpup(&rp->rcr[index]); in niu_rx_pkt_ignore()
3401 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> in niu_rx_pkt_ignore()
3403 if ((page->index + PAGE_SIZE) - rcr_size == addr) { in niu_rx_pkt_ignore()
3405 np->ops->unmap_page(np->device, page->index, in niu_rx_pkt_ignore()
3407 page->index = 0; in niu_rx_pkt_ignore()
3410 rp->rbr_refill_pending++; in niu_rx_pkt_ignore()
3418 rp->rcr_index = index; in niu_rx_pkt_ignore()
3426 unsigned int index = rp->rcr_index; in niu_process_rx_pkt()
3431 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); in niu_process_rx_pkt()
3443 val = le64_to_cpup(&rp->rcr[index]); in niu_process_rx_pkt()
3453 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> in niu_process_rx_pkt()
3465 skb->ip_summed = CHECKSUM_UNNECESSARY; in niu_process_rx_pkt()
3469 append_size = append_size - skb->len; in niu_process_rx_pkt()
3472 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { in niu_process_rx_pkt()
3474 np->ops->unmap_page(np->device, page->index, in niu_process_rx_pkt()
3476 page->index = 0; in niu_process_rx_pkt()
3478 rp->rbr_refill_pending++; in niu_process_rx_pkt()
3487 rp->rcr_index = index; in niu_process_rx_pkt()
3493 rh = (struct rx_pkt_hdr1 *) skb->data; in niu_process_rx_pkt()
3494 if (np->dev->features & NETIF_F_RXHASH) in niu_process_rx_pkt()
3496 ((u32)rh->hashval2_0 << 24 | in niu_process_rx_pkt()
3497 (u32)rh->hashval2_1 << 16 | in niu_process_rx_pkt()
3498 (u32)rh->hashval1_1 << 8 | in niu_process_rx_pkt()
3499 (u32)rh->hashval1_2 << 0), in niu_process_rx_pkt()
3503 rp->rx_packets++; in niu_process_rx_pkt()
3504 rp->rx_bytes += skb->len; in niu_process_rx_pkt()
3506 skb->protocol = eth_type_trans(skb, np->dev); in niu_process_rx_pkt()
3507 skb_record_rx_queue(skb, rp->rx_channel); in niu_process_rx_pkt()
3515 int blocks_per_page = rp->rbr_blocks_per_page; in niu_rbr_fill()
3516 int err, index = rp->rbr_index; in niu_rbr_fill()
3519 while (index < (rp->rbr_table_size - blocks_per_page)) { in niu_rbr_fill()
3527 rp->rbr_index = index; in niu_rbr_fill()
3538 page = rp->rxhash[i]; in niu_rbr_free()
3541 u64 base = page->index; in niu_rbr_free()
3543 np->ops->unmap_page(np->device, base, PAGE_SIZE, in niu_rbr_free()
3545 page->index = 0; in niu_rbr_free()
3554 for (i = 0; i < rp->rbr_table_size; i++) in niu_rbr_free()
3555 rp->rbr[i] = cpu_to_le32(0); in niu_rbr_free()
3556 rp->rbr_index = 0; in niu_rbr_free()
3561 struct tx_buff_info *tb = &rp->tx_buffs[idx]; in release_tx_packet()
3562 struct sk_buff *skb = tb->skb; in release_tx_packet()
3567 tp = (struct tx_pkt_hdr *) skb->data; in release_tx_packet()
3568 tx_flags = le64_to_cpup(&tp->flags); in release_tx_packet()
3570 rp->tx_packets++; in release_tx_packet()
3571 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - in release_tx_packet()
3575 np->ops->unmap_single(np->device, tb->mapping, in release_tx_packet()
3578 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) in release_tx_packet()
3579 rp->mark_pending--; in release_tx_packet()
3581 tb->skb = NULL; in release_tx_packet()
3584 len -= MAX_TX_DESC_LEN; in release_tx_packet()
3587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in release_tx_packet()
3588 tb = &rp->tx_buffs[idx]; in release_tx_packet()
3589 BUG_ON(tb->skb != NULL); in release_tx_packet()
3590 np->ops->unmap_page(np->device, tb->mapping, in release_tx_packet()
3591 skb_frag_size(&skb_shinfo(skb)->frags[i]), in release_tx_packet()
3601 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3610 index = (rp - np->tx_rings); in niu_tx_work()
3611 txq = netdev_get_tx_queue(np->dev, index); in niu_tx_work()
3613 cs = rp->tx_cs; in niu_tx_work()
3618 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & in niu_tx_work()
3621 rp->last_pkt_cnt = tmp; in niu_tx_work()
3623 cons = rp->cons; in niu_tx_work()
3625 netif_printk(np, tx_done, KERN_DEBUG, np->dev, in niu_tx_work()
3628 while (pkt_cnt--) in niu_tx_work()
3631 rp->cons = cons; in niu_tx_work()
3650 * counters, as they are only 16-bit and can overflow quickly, in niu_sync_rx_discard_stats()
3660 int rx_channel = rp->rx_channel; in niu_sync_rx_discard_stats()
3671 rp->rx_errors += misc & RXMISC_COUNT; in niu_sync_rx_discard_stats()
3674 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", in niu_sync_rx_discard_stats()
3677 netif_printk(np, rx_err, KERN_DEBUG, np->dev, in niu_sync_rx_discard_stats()
3678 "rx-%d: MISC drop=%u over=%u\n", in niu_sync_rx_discard_stats()
3679 rx_channel, misc, misc-limit); in niu_sync_rx_discard_stats()
3686 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; in niu_sync_rx_discard_stats()
3689 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); in niu_sync_rx_discard_stats()
3691 netif_printk(np, rx_err, KERN_DEBUG, np->dev, in niu_sync_rx_discard_stats()
3692 "rx-%d: WRED drop=%u over=%u\n", in niu_sync_rx_discard_stats()
3693 rx_channel, wred, wred-limit); in niu_sync_rx_discard_stats()
3701 struct rxdma_mailbox *mbox = rp->mbox; in niu_rx_work()
3705 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); in niu_rx_work()
3706 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; in niu_rx_work()
3708 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); in niu_rx_work()
3709 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); in niu_rx_work()
3711 mbox->rx_dma_ctl_stat = 0; in niu_rx_work()
3712 mbox->rcrstat_a = 0; in niu_rx_work()
3714 netif_printk(np, rx_status, KERN_DEBUG, np->dev, in niu_rx_work()
3716 __func__, rp->rx_channel, (unsigned long long)stat, qlen); in niu_rx_work()
3725 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { in niu_rx_work()
3728 for (i = 0; i < rp->rbr_refill_pending; i++) in niu_rx_work()
3730 rp->rbr_refill_pending = 0; in niu_rx_work()
3737 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); in niu_rx_work()
3748 u64 v0 = lp->v0; in niu_poll_core()
3753 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_poll_core()
3756 for (i = 0; i < np->num_tx_rings; i++) { in niu_poll_core()
3757 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_poll_core()
3758 if (tx_vec & (1 << rp->tx_channel)) in niu_poll_core()
3760 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); in niu_poll_core()
3763 for (i = 0; i < np->num_rx_rings; i++) { in niu_poll_core()
3764 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_poll_core()
3766 if (rx_vec & (1 << rp->rx_channel)) { in niu_poll_core()
3769 this_work_done = niu_rx_work(&lp->napi, np, rp, in niu_poll_core()
3772 budget -= this_work_done; in niu_poll_core()
3775 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); in niu_poll_core()
3784 struct niu *np = lp->np; in niu_poll()
3799 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); in niu_log_rxchan_errors()
3835 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); in niu_rx_error()
3841 err = -EINVAL; in niu_rx_error()
3844 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", in niu_rx_error()
3845 rp->rx_channel, in niu_rx_error()
3851 nw64(RX_DMA_CTL_STAT(rp->rx_channel), in niu_rx_error()
3860 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); in niu_log_txchan_errors()
3886 cs = nr64(TX_CS(rp->tx_channel)); in niu_tx_error()
3887 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); in niu_tx_error()
3888 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); in niu_tx_error()
3890 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", in niu_tx_error()
3891 rp->tx_channel, in niu_tx_error()
3898 return -ENODEV; in niu_tx_error()
3906 if (np->flags & NIU_FLAGS_XMAC) { in niu_mif_interrupt()
3913 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", in niu_mif_interrupt()
3916 return -ENODEV; in niu_mif_interrupt()
3921 struct niu_xmac_stats *mp = &np->mac_stats.xmac; in niu_xmac_interrupt()
3926 mp->tx_frames += TXMAC_FRM_CNT_COUNT; in niu_xmac_interrupt()
3928 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; in niu_xmac_interrupt()
3930 mp->tx_fifo_errors++; in niu_xmac_interrupt()
3932 mp->tx_overflow_errors++; in niu_xmac_interrupt()
3934 mp->tx_max_pkt_size_errors++; in niu_xmac_interrupt()
3936 mp->tx_underflow_errors++; in niu_xmac_interrupt()
3940 mp->rx_local_faults++; in niu_xmac_interrupt()
3942 mp->rx_remote_faults++; in niu_xmac_interrupt()
3944 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; in niu_xmac_interrupt()
3946 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; in niu_xmac_interrupt()
3948 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; in niu_xmac_interrupt()
3950 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; in niu_xmac_interrupt()
3952 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; in niu_xmac_interrupt()
3954 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; in niu_xmac_interrupt()
3956 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; in niu_xmac_interrupt()
3958 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; in niu_xmac_interrupt()
3960 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; in niu_xmac_interrupt()
3962 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; in niu_xmac_interrupt()
3964 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; in niu_xmac_interrupt()
3966 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; in niu_xmac_interrupt()
3968 mp->rx_octets += RXMAC_BT_CNT_COUNT; in niu_xmac_interrupt()
3970 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; in niu_xmac_interrupt()
3972 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; in niu_xmac_interrupt()
3974 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; in niu_xmac_interrupt()
3976 mp->rx_underflows++; in niu_xmac_interrupt()
3978 mp->rx_overflows++; in niu_xmac_interrupt()
3982 mp->pause_off_state++; in niu_xmac_interrupt()
3984 mp->pause_on_state++; in niu_xmac_interrupt()
3986 mp->pause_received++; in niu_xmac_interrupt()
3991 struct niu_bmac_stats *mp = &np->mac_stats.bmac; in niu_bmac_interrupt()
3996 mp->tx_underflow_errors++; in niu_bmac_interrupt()
3998 mp->tx_max_pkt_size_errors++; in niu_bmac_interrupt()
4000 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; in niu_bmac_interrupt()
4002 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; in niu_bmac_interrupt()
4006 mp->rx_overflows++; in niu_bmac_interrupt()
4008 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; in niu_bmac_interrupt()
4010 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; in niu_bmac_interrupt()
4012 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; in niu_bmac_interrupt()
4014 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; in niu_bmac_interrupt()
4018 mp->pause_off_state++; in niu_bmac_interrupt()
4020 mp->pause_on_state++; in niu_bmac_interrupt()
4022 mp->pause_received++; in niu_bmac_interrupt()
4027 if (np->flags & NIU_FLAGS_XMAC) in niu_mac_interrupt()
4037 netdev_err(np->dev, "Core device errors ( "); in niu_log_device_error()
4069 netdev_err(np->dev, "Core device error, stat[%llx]\n", in niu_device_error()
4074 return -ENODEV; in niu_device_error()
4083 lp->v0 = v0; in niu_slowpath_interrupt()
4084 lp->v1 = v1; in niu_slowpath_interrupt()
4085 lp->v2 = v2; in niu_slowpath_interrupt()
4090 for (i = 0; i < np->num_rx_rings; i++) { in niu_slowpath_interrupt()
4091 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_slowpath_interrupt()
4093 if (rx_vec & (1 << rp->rx_channel)) { in niu_slowpath_interrupt()
4099 nw64(RX_DMA_CTL_STAT(rp->rx_channel), in niu_slowpath_interrupt()
4108 for (i = 0; i < np->num_tx_rings; i++) { in niu_slowpath_interrupt()
4109 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_slowpath_interrupt()
4111 if (tx_vec & (1 << rp->tx_channel)) { in niu_slowpath_interrupt()
4145 struct rxdma_mailbox *mbox = rp->mbox; in niu_rxchan_intr()
4146 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); in niu_rxchan_intr()
4150 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); in niu_rxchan_intr()
4152 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_rxchan_intr()
4159 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); in niu_txchan_intr()
4161 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_txchan_intr()
4162 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); in niu_txchan_intr()
4167 struct niu_parent *parent = np->parent; in __niu_fastpath_interrupt()
4174 for (i = 0; i < np->num_rx_rings; i++) { in __niu_fastpath_interrupt()
4175 struct rx_ring_info *rp = &np->rx_rings[i]; in __niu_fastpath_interrupt()
4176 int ldn = LDN_RXDMA(rp->rx_channel); in __niu_fastpath_interrupt()
4178 if (parent->ldg_map[ldn] != ldg) in __niu_fastpath_interrupt()
4182 if (rx_vec & (1 << rp->rx_channel)) in __niu_fastpath_interrupt()
4186 for (i = 0; i < np->num_tx_rings; i++) { in __niu_fastpath_interrupt()
4187 struct tx_ring_info *rp = &np->tx_rings[i]; in __niu_fastpath_interrupt()
4188 int ldn = LDN_TXDMA(rp->tx_channel); in __niu_fastpath_interrupt()
4190 if (parent->ldg_map[ldn] != ldg) in __niu_fastpath_interrupt()
4194 if (tx_vec & (1 << rp->tx_channel)) in __niu_fastpath_interrupt()
4202 if (likely(napi_schedule_prep(&lp->napi))) { in niu_schedule_napi()
4203 lp->v0 = v0; in niu_schedule_napi()
4204 lp->v1 = v1; in niu_schedule_napi()
4205 lp->v2 = v2; in niu_schedule_napi()
4206 __niu_fastpath_interrupt(np, lp->ldg_num, v0); in niu_schedule_napi()
4207 __napi_schedule(&lp->napi); in niu_schedule_napi()
4214 struct niu *np = lp->np; in niu_interrupt()
4215 int ldg = lp->ldg_num; in niu_interrupt()
4223 spin_lock_irqsave(&np->lock, flags); in niu_interrupt()
4236 spin_unlock_irqrestore(&np->lock, flags); in niu_interrupt()
4250 spin_unlock_irqrestore(&np->lock, flags); in niu_interrupt()
4257 if (rp->mbox) { in niu_free_rx_ring_info()
4258 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4260 rp->mbox, rp->mbox_dma); in niu_free_rx_ring_info()
4261 rp->mbox = NULL; in niu_free_rx_ring_info()
4263 if (rp->rcr) { in niu_free_rx_ring_info()
4264 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4266 rp->rcr, rp->rcr_dma); in niu_free_rx_ring_info()
4267 rp->rcr = NULL; in niu_free_rx_ring_info()
4268 rp->rcr_table_size = 0; in niu_free_rx_ring_info()
4269 rp->rcr_index = 0; in niu_free_rx_ring_info()
4271 if (rp->rbr) { in niu_free_rx_ring_info()
4274 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4276 rp->rbr, rp->rbr_dma); in niu_free_rx_ring_info()
4277 rp->rbr = NULL; in niu_free_rx_ring_info()
4278 rp->rbr_table_size = 0; in niu_free_rx_ring_info()
4279 rp->rbr_index = 0; in niu_free_rx_ring_info()
4281 kfree(rp->rxhash); in niu_free_rx_ring_info()
4282 rp->rxhash = NULL; in niu_free_rx_ring_info()
4287 if (rp->mbox) { in niu_free_tx_ring_info()
4288 np->ops->free_coherent(np->device, in niu_free_tx_ring_info()
4290 rp->mbox, rp->mbox_dma); in niu_free_tx_ring_info()
4291 rp->mbox = NULL; in niu_free_tx_ring_info()
4293 if (rp->descr) { in niu_free_tx_ring_info()
4297 if (rp->tx_buffs[i].skb) in niu_free_tx_ring_info()
4301 np->ops->free_coherent(np->device, in niu_free_tx_ring_info()
4303 rp->descr, rp->descr_dma); in niu_free_tx_ring_info()
4304 rp->descr = NULL; in niu_free_tx_ring_info()
4305 rp->pending = 0; in niu_free_tx_ring_info()
4306 rp->prod = 0; in niu_free_tx_ring_info()
4307 rp->cons = 0; in niu_free_tx_ring_info()
4308 rp->wrap_bit = 0; in niu_free_tx_ring_info()
4316 if (np->rx_rings) { in niu_free_channels()
4317 for (i = 0; i < np->num_rx_rings; i++) { in niu_free_channels()
4318 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_free_channels()
4322 kfree(np->rx_rings); in niu_free_channels()
4323 np->rx_rings = NULL; in niu_free_channels()
4324 np->num_rx_rings = 0; in niu_free_channels()
4327 if (np->tx_rings) { in niu_free_channels()
4328 for (i = 0; i < np->num_tx_rings; i++) { in niu_free_channels()
4329 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_free_channels()
4333 kfree(np->tx_rings); in niu_free_channels()
4334 np->tx_rings = NULL; in niu_free_channels()
4335 np->num_tx_rings = 0; in niu_free_channels()
4344 rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), in niu_alloc_rx_ring_info()
4346 if (!rp->rxhash) in niu_alloc_rx_ring_info()
4347 return -ENOMEM; in niu_alloc_rx_ring_info()
4349 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4351 &rp->mbox_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4352 if (!rp->mbox) in niu_alloc_rx_ring_info()
4353 return -ENOMEM; in niu_alloc_rx_ring_info()
4354 if ((unsigned long)rp->mbox & (64UL - 1)) { in niu_alloc_rx_ring_info()
4355 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", in niu_alloc_rx_ring_info()
4356 rp->mbox); in niu_alloc_rx_ring_info()
4357 return -EINVAL; in niu_alloc_rx_ring_info()
4360 rp->rcr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4362 &rp->rcr_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4363 if (!rp->rcr) in niu_alloc_rx_ring_info()
4364 return -ENOMEM; in niu_alloc_rx_ring_info()
4365 if ((unsigned long)rp->rcr & (64UL - 1)) { in niu_alloc_rx_ring_info()
4366 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", in niu_alloc_rx_ring_info()
4367 rp->rcr); in niu_alloc_rx_ring_info()
4368 return -EINVAL; in niu_alloc_rx_ring_info()
4370 rp->rcr_table_size = MAX_RCR_RING_SIZE; in niu_alloc_rx_ring_info()
4371 rp->rcr_index = 0; in niu_alloc_rx_ring_info()
4373 rp->rbr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4375 &rp->rbr_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4376 if (!rp->rbr) in niu_alloc_rx_ring_info()
4377 return -ENOMEM; in niu_alloc_rx_ring_info()
4378 if ((unsigned long)rp->rbr & (64UL - 1)) { in niu_alloc_rx_ring_info()
4379 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", in niu_alloc_rx_ring_info()
4380 rp->rbr); in niu_alloc_rx_ring_info()
4381 return -EINVAL; in niu_alloc_rx_ring_info()
4383 rp->rbr_table_size = MAX_RBR_RING_SIZE; in niu_alloc_rx_ring_info()
4384 rp->rbr_index = 0; in niu_alloc_rx_ring_info()
4385 rp->rbr_pending = 0; in niu_alloc_rx_ring_info()
4392 int mtu = np->dev->mtu; in niu_set_max_burst()
4397 rp->max_burst = mtu + 32; in niu_set_max_burst()
4398 if (rp->max_burst > 4096) in niu_set_max_burst()
4399 rp->max_burst = 4096; in niu_set_max_burst()
4407 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4409 &rp->mbox_dma, GFP_KERNEL); in niu_alloc_tx_ring_info()
4410 if (!rp->mbox) in niu_alloc_tx_ring_info()
4411 return -ENOMEM; in niu_alloc_tx_ring_info()
4412 if ((unsigned long)rp->mbox & (64UL - 1)) { in niu_alloc_tx_ring_info()
4413 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", in niu_alloc_tx_ring_info()
4414 rp->mbox); in niu_alloc_tx_ring_info()
4415 return -EINVAL; in niu_alloc_tx_ring_info()
4418 rp->descr = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4420 &rp->descr_dma, GFP_KERNEL); in niu_alloc_tx_ring_info()
4421 if (!rp->descr) in niu_alloc_tx_ring_info()
4422 return -ENOMEM; in niu_alloc_tx_ring_info()
4423 if ((unsigned long)rp->descr & (64UL - 1)) { in niu_alloc_tx_ring_info()
4424 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", in niu_alloc_tx_ring_info()
4425 rp->descr); in niu_alloc_tx_ring_info()
4426 return -EINVAL; in niu_alloc_tx_ring_info()
4429 rp->pending = MAX_TX_RING_SIZE; in niu_alloc_tx_ring_info()
4430 rp->prod = 0; in niu_alloc_tx_ring_info()
4431 rp->cons = 0; in niu_alloc_tx_ring_info()
4432 rp->wrap_bit = 0; in niu_alloc_tx_ring_info()
4435 rp->mark_freq = rp->pending / 4; in niu_alloc_tx_ring_info()
4448 rp->rbr_block_size = 1 << bss; in niu_size_rbr()
4449 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); in niu_size_rbr()
4451 rp->rbr_sizes[0] = 256; in niu_size_rbr()
4452 rp->rbr_sizes[1] = 1024; in niu_size_rbr()
4453 if (np->dev->mtu > ETH_DATA_LEN) { in niu_size_rbr()
4456 rp->rbr_sizes[2] = 4096; in niu_size_rbr()
4460 rp->rbr_sizes[2] = 8192; in niu_size_rbr()
4464 rp->rbr_sizes[2] = 2048; in niu_size_rbr()
4466 rp->rbr_sizes[3] = rp->rbr_block_size; in niu_size_rbr()
4471 struct niu_parent *parent = np->parent; in niu_alloc_channels()
4478 port = np->port; in niu_alloc_channels()
4481 first_rx_channel += parent->rxchan_per_port[i]; in niu_alloc_channels()
4482 first_tx_channel += parent->txchan_per_port[i]; in niu_alloc_channels()
4485 num_rx_rings = parent->rxchan_per_port[port]; in niu_alloc_channels()
4486 num_tx_rings = parent->txchan_per_port[port]; in niu_alloc_channels()
4490 err = -ENOMEM; in niu_alloc_channels()
4494 np->num_rx_rings = num_rx_rings; in niu_alloc_channels()
4496 np->rx_rings = rx_rings; in niu_alloc_channels()
4498 netif_set_real_num_rx_queues(np->dev, num_rx_rings); in niu_alloc_channels()
4500 for (i = 0; i < np->num_rx_rings; i++) { in niu_alloc_channels()
4501 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_alloc_channels()
4503 rp->np = np; in niu_alloc_channels()
4504 rp->rx_channel = first_rx_channel + i; in niu_alloc_channels()
4513 rp->nonsyn_window = 64; in niu_alloc_channels()
4514 rp->nonsyn_threshold = rp->rcr_table_size - 64; in niu_alloc_channels()
4515 rp->syn_window = 64; in niu_alloc_channels()
4516 rp->syn_threshold = rp->rcr_table_size - 64; in niu_alloc_channels()
4517 rp->rcr_pkt_threshold = 16; in niu_alloc_channels()
4518 rp->rcr_timeout = 8; in niu_alloc_channels()
4519 rp->rbr_kick_thresh = RBR_REFILL_MIN; in niu_alloc_channels()
4520 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) in niu_alloc_channels()
4521 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; in niu_alloc_channels()
4530 err = -ENOMEM; in niu_alloc_channels()
4534 np->num_tx_rings = num_tx_rings; in niu_alloc_channels()
4536 np->tx_rings = tx_rings; in niu_alloc_channels()
4538 netif_set_real_num_tx_queues(np->dev, num_tx_rings); in niu_alloc_channels()
4540 for (i = 0; i < np->num_tx_rings; i++) { in niu_alloc_channels()
4541 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_alloc_channels()
4543 rp->np = np; in niu_alloc_channels()
4544 rp->tx_channel = first_tx_channel + i; in niu_alloc_channels()
4562 while (--limit > 0) { in niu_tx_cs_sng_poll()
4567 return -ENODEV; in niu_tx_cs_sng_poll()
4584 while (--limit > 0) { in niu_tx_cs_reset_poll()
4589 return -ENODEV; in niu_tx_cs_reset_poll()
4619 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; in niu_tx_channel_lpage_init()
4635 mask = (u64)1 << np->port; in niu_txc_enable_port()
4654 val &= ~TXC_INT_MASK_VAL(np->port); in niu_txc_set_imask()
4655 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); in niu_txc_set_imask()
4666 for (i = 0; i < np->num_tx_rings; i++) in niu_txc_port_dma_enable()
4667 val |= (1 << np->tx_rings[i].tx_channel); in niu_txc_port_dma_enable()
4669 nw64(TXC_PORT_DMA(np->port), val); in niu_txc_port_dma_enable()
4674 int err, channel = rp->tx_channel; in niu_init_one_tx_channel()
4689 nw64(TXC_DMA_MAX(channel), rp->max_burst); in niu_init_one_tx_channel()
4692 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | in niu_init_one_tx_channel()
4694 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", in niu_init_one_tx_channel()
4695 channel, (unsigned long long)rp->descr_dma); in niu_init_one_tx_channel()
4696 return -EINVAL; in niu_init_one_tx_channel()
4699 /* The length field in TX_RNG_CFIG is measured in 64-byte in niu_init_one_tx_channel()
4700 * blocks. rp->pending is the number of TX descriptors in in niu_init_one_tx_channel()
4704 ring_len = (rp->pending / 8); in niu_init_one_tx_channel()
4707 rp->descr_dma); in niu_init_one_tx_channel()
4710 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || in niu_init_one_tx_channel()
4711 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { in niu_init_one_tx_channel()
4712 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", in niu_init_one_tx_channel()
4713 channel, (unsigned long long)rp->mbox_dma); in niu_init_one_tx_channel()
4714 return -EINVAL; in niu_init_one_tx_channel()
4716 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); in niu_init_one_tx_channel()
4717 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); in niu_init_one_tx_channel()
4721 rp->last_pkt_cnt = 0; in niu_init_one_tx_channel()
4728 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; in niu_init_rdc_groups()
4729 int i, first_table_num = tp->first_table_num; in niu_init_rdc_groups()
4731 for (i = 0; i < tp->num_tables; i++) { in niu_init_rdc_groups()
4732 struct rdc_table *tbl = &tp->tables[i]; in niu_init_rdc_groups()
4738 tbl->rxdma_channel[slot]); in niu_init_rdc_groups()
4741 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); in niu_init_rdc_groups()
4746 int type = phy_decode(np->parent->port_phy, np->port); in niu_init_drr_weight()
4759 nw64(PT_DRR_WT(np->port), val); in niu_init_drr_weight()
4764 struct niu_parent *parent = np->parent; in niu_init_hostinfo()
4765 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_hostinfo()
4767 int first_rdc_table = tp->first_table_num; in niu_init_hostinfo()
4805 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; in niu_rx_channel_lpage_init()
4816 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | in niu_rx_channel_wred_init()
4817 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | in niu_rx_channel_wred_init()
4818 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | in niu_rx_channel_wred_init()
4819 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); in niu_rx_channel_wred_init()
4820 nw64(RDC_RED_PARA(rp->rx_channel), val); in niu_rx_channel_wred_init()
4828 switch (rp->rbr_block_size) { in niu_compute_rbr_cfig_b()
4842 return -EINVAL; in niu_compute_rbr_cfig_b()
4845 switch (rp->rbr_sizes[2]) { in niu_compute_rbr_cfig_b()
4860 return -EINVAL; in niu_compute_rbr_cfig_b()
4863 switch (rp->rbr_sizes[1]) { in niu_compute_rbr_cfig_b()
4878 return -EINVAL; in niu_compute_rbr_cfig_b()
4881 switch (rp->rbr_sizes[0]) { in niu_compute_rbr_cfig_b()
4896 return -EINVAL; in niu_compute_rbr_cfig_b()
4915 while (--limit > 0) { in niu_enable_rx_channel()
4921 return -ENODEV; in niu_enable_rx_channel()
4927 int err, channel = rp->rx_channel; in niu_init_one_rx_channel()
4946 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); in niu_init_one_rx_channel()
4948 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | in niu_init_one_rx_channel()
4951 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | in niu_init_one_rx_channel()
4952 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); in niu_init_one_rx_channel()
4958 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | in niu_init_one_rx_channel()
4959 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); in niu_init_one_rx_channel()
4961 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | in niu_init_one_rx_channel()
4963 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); in niu_init_one_rx_channel()
4969 nw64(RBR_KICK(channel), rp->rbr_index); in niu_init_one_rx_channel()
4985 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); in niu_init_rx_channels()
4998 for (i = 0; i < np->num_rx_rings; i++) { in niu_init_rx_channels()
4999 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_init_rx_channels()
5011 struct niu_parent *parent = np->parent; in niu_set_ip_frag_rule()
5012 struct niu_classifier *cp = &np->clas; in niu_set_ip_frag_rule()
5016 index = cp->tcam_top; in niu_set_ip_frag_rule()
5017 tp = &parent->tcam[index]; in niu_set_ip_frag_rule()
5023 tp->key[1] = TCAM_V4KEY1_NOPORT; in niu_set_ip_frag_rule()
5024 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; in niu_set_ip_frag_rule()
5025 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | in niu_set_ip_frag_rule()
5027 err = tcam_write(np, index, tp->key, tp->key_mask); in niu_set_ip_frag_rule()
5030 err = tcam_assoc_write(np, index, tp->assoc_data); in niu_set_ip_frag_rule()
5033 tp->valid = 1; in niu_set_ip_frag_rule()
5034 cp->tcam_valid_entries++; in niu_set_ip_frag_rule()
5041 struct niu_parent *parent = np->parent; in niu_init_classifier_hw()
5042 struct niu_classifier *cp = &np->clas; in niu_init_classifier_hw()
5045 nw64(H1POLY, cp->h1_init); in niu_init_classifier_hw()
5046 nw64(H2POLY, cp->h2_init); in niu_init_classifier_hw()
5053 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; in niu_init_classifier_hw()
5055 vlan_tbl_write(np, i, np->port, in niu_init_classifier_hw()
5056 vp->vlan_pref, vp->rdc_num); in niu_init_classifier_hw()
5059 for (i = 0; i < cp->num_alt_mac_mappings; i++) { in niu_init_classifier_hw()
5060 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; in niu_init_classifier_hw()
5062 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, in niu_init_classifier_hw()
5063 ap->rdc_num, ap->mac_pref); in niu_init_classifier_hw()
5069 int index = i - CLASS_CODE_USER_PROG1; in niu_init_classifier_hw()
5071 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); in niu_init_classifier_hw()
5074 err = niu_set_flow_key(np, i, parent->flow_key[index]); in niu_init_classifier_hw()
5099 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); in niu_zcp_write()
5112 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", in niu_zcp_read()
5120 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); in niu_zcp_read()
5125 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", in niu_zcp_read()
5143 val |= RESET_CFIFO_RST(np->port); in niu_zcp_cfifo_reset()
5147 val &= ~RESET_CFIFO_RST(np->port); in niu_zcp_cfifo_reset()
5156 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_init_zcp()
5157 if (np->port == 0 || np->port == 1) in niu_init_zcp()
5180 nw64(CFIFO_ECC(np->port), 0); in niu_init_zcp()
5223 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_init_ipp()
5224 if (np->port == 0 || np->port == 1) in niu_init_ipp()
5274 if ((np->flags & NIU_FLAGS_10G) != 0 && in niu_handle_led()
5275 (np->flags & NIU_FLAGS_FIBER) != 0) { in niu_handle_led()
5290 struct niu_link_config *lp = &np->link_config; in niu_init_xif_xmac()
5293 if (np->flags & NIU_FLAGS_XCVR_SERDES) { in niu_init_xif_xmac()
5304 if (lp->loopback_mode == LOOPBACK_MAC) { in niu_init_xif_xmac()
5311 if (np->flags & NIU_FLAGS_10G) { in niu_init_xif_xmac()
5315 if (!(np->flags & NIU_FLAGS_FIBER) && in niu_init_xif_xmac()
5316 !(np->flags & NIU_FLAGS_XCVR_SERDES)) in niu_init_xif_xmac()
5324 if (lp->active_speed == SPEED_100) in niu_init_xif_xmac()
5333 if (np->flags & NIU_FLAGS_10G) { in niu_init_xif_xmac()
5336 if (lp->active_speed == SPEED_1000) in niu_init_xif_xmac()
5347 struct niu_link_config *lp = &np->link_config; in niu_init_xif_bmac()
5352 if (lp->loopback_mode == LOOPBACK_MAC) in niu_init_xif_bmac()
5357 if (lp->active_speed == SPEED_1000) in niu_init_xif_bmac()
5365 if (!(np->flags & NIU_FLAGS_10G) && in niu_init_xif_bmac()
5366 !(np->flags & NIU_FLAGS_FIBER) && in niu_init_xif_bmac()
5367 lp->active_speed == SPEED_100) in niu_init_xif_bmac()
5377 if (np->flags & NIU_FLAGS_XMAC) in niu_init_xif()
5389 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { in niu_pcs_mii_reset()
5401 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { in niu_xpcs_reset()
5409 struct niu_link_config *lp = &np->link_config; in niu_init_pcs()
5412 switch (np->flags & (NIU_FLAGS_10G | in niu_init_pcs()
5426 if (!(np->flags & NIU_FLAGS_XMAC)) in niu_init_pcs()
5427 return -EINVAL; in niu_init_pcs()
5437 if (lp->loopback_mode == LOOPBACK_PHY) in niu_init_pcs()
5465 return -EINVAL; in niu_init_pcs()
5485 while (--limit >= 0) { in niu_reset_tx_bmac()
5491 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", in niu_reset_tx_bmac()
5492 np->port, in niu_reset_tx_bmac()
5494 return -ENODEV; in niu_reset_tx_bmac()
5502 if (np->flags & NIU_FLAGS_XMAC) in niu_reset_tx_mac()
5524 if (np->flags & NIU_FLAGS_10G) { in niu_init_tx_xmac()
5566 if (np->dev->mtu > ETH_DATA_LEN) in niu_init_tx_mac()
5576 if (np->flags & NIU_FLAGS_XMAC) in niu_init_tx_mac()
5589 while (--limit >= 0) { in niu_reset_rx_xmac()
5596 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", in niu_reset_rx_xmac()
5597 np->port, in niu_reset_rx_xmac()
5599 return -ENODEV; in niu_reset_rx_xmac()
5611 while (--limit >= 0) { in niu_reset_rx_bmac()
5617 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", in niu_reset_rx_bmac()
5618 np->port, in niu_reset_rx_bmac()
5620 return -ENODEV; in niu_reset_rx_bmac()
5628 if (np->flags & NIU_FLAGS_XMAC) in niu_reset_rx_mac()
5636 struct niu_parent *parent = np->parent; in niu_init_rx_xmac()
5637 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_rx_xmac()
5638 int first_rdc_table = tp->first_table_num; in niu_init_rx_xmac()
5688 struct niu_parent *parent = np->parent; in niu_init_rx_bmac()
5689 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_rx_bmac()
5690 int first_rdc_table = tp->first_table_num; in niu_init_rx_bmac()
5723 niu_set_primary_mac(np, np->dev->dev_addr); in niu_init_rx_mac()
5725 if (np->flags & NIU_FLAGS_XMAC) in niu_init_rx_mac()
5755 if (np->flags & NIU_FLAGS_XMAC) in niu_enable_tx_mac()
5768 if (np->flags & NIU_FLAGS_MCAST) in niu_enable_rx_xmac()
5770 if (np->flags & NIU_FLAGS_PROMISC) in niu_enable_rx_xmac()
5787 if (np->flags & NIU_FLAGS_MCAST) in niu_enable_rx_bmac()
5789 if (np->flags & NIU_FLAGS_PROMISC) in niu_enable_rx_bmac()
5801 if (np->flags & NIU_FLAGS_XMAC) in niu_enable_rx_mac()
5840 (void) niu_tx_channel_stop(np, rp->tx_channel); in niu_stop_one_tx_channel()
5847 for (i = 0; i < np->num_tx_rings; i++) { in niu_stop_tx_channels()
5848 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_stop_tx_channels()
5856 (void) niu_tx_channel_reset(np, rp->tx_channel); in niu_reset_one_tx_channel()
5863 for (i = 0; i < np->num_tx_rings; i++) { in niu_reset_tx_channels()
5864 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_tx_channels()
5872 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); in niu_stop_one_rx_channel()
5879 for (i = 0; i < np->num_rx_rings; i++) { in niu_stop_rx_channels()
5880 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_stop_rx_channels()
5888 int channel = rp->rx_channel; in niu_reset_one_rx_channel()
5900 for (i = 0; i < np->num_rx_rings; i++) { in niu_reset_rx_channels()
5901 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_rx_channels()
5915 while (--limit >= 0 && (rd != wr)) { in niu_disable_ipp()
5921 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", in niu_disable_ipp()
5940 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); in niu_init_hw()
5945 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); in niu_init_hw()
5946 for (i = 0; i < np->num_tx_rings; i++) { in niu_init_hw()
5947 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_init_hw()
5954 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); in niu_init_hw()
5959 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); in niu_init_hw()
5964 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); in niu_init_hw()
5969 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); in niu_init_hw()
5974 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); in niu_init_hw()
5982 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); in niu_init_hw()
5986 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); in niu_init_hw()
5991 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); in niu_init_hw()
6000 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); in niu_stop_hw()
6003 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); in niu_stop_hw()
6006 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); in niu_stop_hw()
6009 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); in niu_stop_hw()
6012 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); in niu_stop_hw()
6015 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); in niu_stop_hw()
6018 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); in niu_stop_hw()
6024 int port = np->port; in niu_set_irq_name()
6027 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); in niu_set_irq_name()
6030 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); in niu_set_irq_name()
6031 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); in niu_set_irq_name()
6035 for (i = 0; i < np->num_ldg - j; i++) { in niu_set_irq_name()
6036 if (i < np->num_rx_rings) in niu_set_irq_name()
6037 sprintf(np->irq_name[i+j], "%s-rx-%d", in niu_set_irq_name()
6038 np->dev->name, i); in niu_set_irq_name()
6039 else if (i < np->num_tx_rings + np->num_rx_rings) in niu_set_irq_name()
6040 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, in niu_set_irq_name()
6041 i - np->num_rx_rings); in niu_set_irq_name()
6052 for (i = 0; i < np->num_ldg; i++) { in niu_request_irq()
6053 struct niu_ldg *lp = &np->ldg[i]; in niu_request_irq()
6055 err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, in niu_request_irq()
6056 np->irq_name[i], lp); in niu_request_irq()
6066 struct niu_ldg *lp = &np->ldg[j]; in niu_request_irq()
6068 free_irq(lp->irq, lp); in niu_request_irq()
6077 for (i = 0; i < np->num_ldg; i++) { in niu_free_irq()
6078 struct niu_ldg *lp = &np->ldg[i]; in niu_free_irq()
6080 free_irq(lp->irq, lp); in niu_free_irq()
6088 for (i = 0; i < np->num_ldg; i++) in niu_enable_napi()
6089 napi_enable(&np->ldg[i].napi); in niu_enable_napi()
6096 for (i = 0; i < np->num_ldg; i++) in niu_disable_napi()
6097 napi_disable(&np->ldg[i].napi); in niu_disable_napi()
6121 spin_lock_irq(&np->lock); in niu_open()
6125 timer_setup(&np->timer, niu_timer, 0); in niu_open()
6126 np->timer.expires = jiffies + HZ; in niu_open()
6133 spin_unlock_irq(&np->lock); in niu_open()
6142 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) in niu_open()
6145 add_timer(&np->timer); in niu_open()
6161 cancel_work_sync(&np->reset_task); in niu_full_shutdown()
6166 del_timer_sync(&np->timer); in niu_full_shutdown()
6168 spin_lock_irq(&np->lock); in niu_full_shutdown()
6172 spin_unlock_irq(&np->lock); in niu_full_shutdown()
6192 struct niu_xmac_stats *mp = &np->mac_stats.xmac; in niu_sync_xmac_stats()
6194 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); in niu_sync_xmac_stats()
6195 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); in niu_sync_xmac_stats()
6197 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); in niu_sync_xmac_stats()
6198 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); in niu_sync_xmac_stats()
6199 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); in niu_sync_xmac_stats()
6200 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); in niu_sync_xmac_stats()
6201 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); in niu_sync_xmac_stats()
6202 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); in niu_sync_xmac_stats()
6203 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); in niu_sync_xmac_stats()
6204 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); in niu_sync_xmac_stats()
6205 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); in niu_sync_xmac_stats()
6206 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); in niu_sync_xmac_stats()
6207 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); in niu_sync_xmac_stats()
6208 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); in niu_sync_xmac_stats()
6209 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); in niu_sync_xmac_stats()
6210 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); in niu_sync_xmac_stats()
6211 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); in niu_sync_xmac_stats()
6212 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); in niu_sync_xmac_stats()
6217 struct niu_bmac_stats *mp = &np->mac_stats.bmac; in niu_sync_bmac_stats()
6219 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); in niu_sync_bmac_stats()
6220 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); in niu_sync_bmac_stats()
6222 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); in niu_sync_bmac_stats()
6223 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); in niu_sync_bmac_stats()
6224 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); in niu_sync_bmac_stats()
6225 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); in niu_sync_bmac_stats()
6230 if (np->flags & NIU_FLAGS_XMAC) in niu_sync_mac_stats()
6245 rx_rings = READ_ONCE(np->rx_rings); in niu_get_rx_stats()
6249 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_rx_stats()
6254 pkts += rp->rx_packets; in niu_get_rx_stats()
6255 bytes += rp->rx_bytes; in niu_get_rx_stats()
6256 dropped += rp->rx_dropped; in niu_get_rx_stats()
6257 errors += rp->rx_errors; in niu_get_rx_stats()
6261 stats->rx_packets = pkts; in niu_get_rx_stats()
6262 stats->rx_bytes = bytes; in niu_get_rx_stats()
6263 stats->rx_dropped = dropped; in niu_get_rx_stats()
6264 stats->rx_errors = errors; in niu_get_rx_stats()
6276 tx_rings = READ_ONCE(np->tx_rings); in niu_get_tx_stats()
6280 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_tx_stats()
6283 pkts += rp->tx_packets; in niu_get_tx_stats()
6284 bytes += rp->tx_bytes; in niu_get_tx_stats()
6285 errors += rp->tx_errors; in niu_get_tx_stats()
6289 stats->tx_packets = pkts; in niu_get_tx_stats()
6290 stats->tx_bytes = bytes; in niu_get_tx_stats()
6291 stats->tx_errors = errors; in niu_get_tx_stats()
6323 if (np->flags & NIU_FLAGS_XMAC) in niu_load_hash()
6337 spin_lock_irqsave(&np->lock, flags); in niu_set_rx_mode()
6340 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); in niu_set_rx_mode()
6341 if (dev->flags & IFF_PROMISC) in niu_set_rx_mode()
6342 np->flags |= NIU_FLAGS_PROMISC; in niu_set_rx_mode()
6343 if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) in niu_set_rx_mode()
6344 np->flags |= NIU_FLAGS_MCAST; in niu_set_rx_mode()
6349 np->flags |= NIU_FLAGS_PROMISC; in niu_set_rx_mode()
6356 err = niu_set_alt_mac(np, index, ha->addr); in niu_set_rx_mode()
6369 if (np->flags & NIU_FLAGS_XMAC) in niu_set_rx_mode()
6380 if (dev->flags & IFF_ALLMULTI) { in niu_set_rx_mode()
6385 u32 crc = ether_crc_le(ETH_ALEN, ha->addr); in niu_set_rx_mode()
6388 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); in niu_set_rx_mode()
6392 if (np->flags & NIU_FLAGS_MCAST) in niu_set_rx_mode()
6396 spin_unlock_irqrestore(&np->lock, flags); in niu_set_rx_mode()
6405 if (!is_valid_ether_addr(addr->sa_data)) in niu_set_mac_addr()
6406 return -EADDRNOTAVAIL; in niu_set_mac_addr()
6408 eth_hw_addr_set(dev, addr->sa_data); in niu_set_mac_addr()
6413 spin_lock_irqsave(&np->lock, flags); in niu_set_mac_addr()
6415 niu_set_primary_mac(np, dev->dev_addr); in niu_set_mac_addr()
6417 spin_unlock_irqrestore(&np->lock, flags); in niu_set_mac_addr()
6424 return -EOPNOTSUPP; in niu_ioctl()
6429 netif_trans_update(np->dev); /* prevent tx timeout */ in niu_netif_stop()
6433 netif_tx_disable(np->dev); in niu_netif_stop()
6442 netif_tx_wake_all_queues(np->dev); in niu_netif_start()
6453 if (np->rx_rings) { in niu_reset_buffers()
6454 for (i = 0; i < np->num_rx_rings; i++) { in niu_reset_buffers()
6455 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_buffers()
6460 page = rp->rxhash[j]; in niu_reset_buffers()
6463 u64 base = page->index; in niu_reset_buffers()
6465 rp->rbr[k++] = cpu_to_le32(base); in niu_reset_buffers()
6475 rp->rbr_index = rp->rbr_table_size - 1; in niu_reset_buffers()
6476 rp->rcr_index = 0; in niu_reset_buffers()
6477 rp->rbr_pending = 0; in niu_reset_buffers()
6478 rp->rbr_refill_pending = 0; in niu_reset_buffers()
6481 if (np->tx_rings) { in niu_reset_buffers()
6482 for (i = 0; i < np->num_tx_rings; i++) { in niu_reset_buffers()
6483 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_buffers()
6486 if (rp->tx_buffs[j].skb) in niu_reset_buffers()
6490 rp->pending = MAX_TX_RING_SIZE; in niu_reset_buffers()
6491 rp->prod = 0; in niu_reset_buffers()
6492 rp->cons = 0; in niu_reset_buffers()
6493 rp->wrap_bit = 0; in niu_reset_buffers()
6504 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6505 if (!netif_running(np->dev)) { in niu_reset_task()
6506 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6510 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6512 del_timer_sync(&np->timer); in niu_reset_task()
6516 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6520 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6524 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6528 np->timer.expires = jiffies + HZ; in niu_reset_task()
6529 add_timer(&np->timer); in niu_reset_task()
6533 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6540 dev_err(np->device, "%s: Transmit timed out, resetting\n", in niu_tx_timeout()
6541 dev->name); in niu_tx_timeout()
6543 schedule_work(&np->reset_task); in niu_tx_timeout()
6550 __le64 *desc = &rp->descr[index]; in niu_set_txd()
6566 eth_proto = be16_to_cpu(ehdr->h_proto); in niu_compute_tx_flags()
6570 __be16 val = vp->h_vlan_encapsulated_proto; in niu_compute_tx_flags()
6576 switch (skb->protocol) { in niu_compute_tx_flags()
6578 ip_proto = ip_hdr(skb)->protocol; in niu_compute_tx_flags()
6579 ihl = ip_hdr(skb)->ihl; in niu_compute_tx_flags()
6582 ip_proto = ipv6_hdr(skb)->nexthdr; in niu_compute_tx_flags()
6592 if (skb->ip_summed == CHECKSUM_PARTIAL) { in niu_compute_tx_flags()
6600 start = skb_checksum_start_offset(skb) - in niu_compute_tx_flags()
6602 stuff = start + skb->csum_offset; in niu_compute_tx_flags()
6608 l3off = skb_network_offset(skb) - in niu_compute_tx_flags()
6637 rp = &np->tx_rings[i]; in niu_start_xmit()
6640 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { in niu_start_xmit()
6642 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); in niu_start_xmit()
6643 rp->tx_errors++; in niu_start_xmit()
6662 align = ((unsigned long) skb->data & (16 - 1)); in niu_start_xmit()
6665 ehdr = (struct ethhdr *) skb->data; in niu_start_xmit()
6668 len = skb->len - sizeof(struct tx_pkt_hdr); in niu_start_xmit()
6669 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); in niu_start_xmit()
6670 tp->resv = 0; in niu_start_xmit()
6673 mapping = np->ops->map_single(np->device, skb->data, in niu_start_xmit()
6675 if (np->ops->mapping_error(np->device, mapping)) in niu_start_xmit()
6678 prod = rp->prod; in niu_start_xmit()
6680 rp->tx_buffs[prod].skb = skb; in niu_start_xmit()
6681 rp->tx_buffs[prod].mapping = mapping; in niu_start_xmit()
6684 if (++rp->mark_counter == rp->mark_freq) { in niu_start_xmit()
6685 rp->mark_counter = 0; in niu_start_xmit()
6687 rp->mark_pending++; in niu_start_xmit()
6691 nfg = skb_shinfo(skb)->nr_frags; in niu_start_xmit()
6693 tlen -= MAX_TX_DESC_LEN; in niu_start_xmit()
6708 len -= this_len; in niu_start_xmit()
6711 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in niu_start_xmit()
6712 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in niu_start_xmit()
6715 mapping = np->ops->map_page(np->device, skb_frag_page(frag), in niu_start_xmit()
6718 if (np->ops->mapping_error(np->device, mapping)) in niu_start_xmit()
6721 rp->tx_buffs[prod].skb = NULL; in niu_start_xmit()
6722 rp->tx_buffs[prod].mapping = mapping; in niu_start_xmit()
6729 if (prod < rp->prod) in niu_start_xmit()
6730 rp->wrap_bit ^= TX_RING_KICK_WRAP; in niu_start_xmit()
6731 rp->prod = prod; in niu_start_xmit()
6733 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); in niu_start_xmit()
6745 while (i--) { in niu_start_xmit()
6749 frag = &skb_shinfo(skb)->frags[i]; in niu_start_xmit()
6750 np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping, in niu_start_xmit()
6754 np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping, in niu_start_xmit()
6758 rp->tx_errors++; in niu_start_xmit()
6768 orig_jumbo = (dev->mtu > ETH_DATA_LEN); in niu_change_mtu()
6771 dev->mtu = new_mtu; in niu_change_mtu()
6787 spin_lock_irq(&np->lock); in niu_change_mtu()
6791 timer_setup(&np->timer, niu_timer, 0); in niu_change_mtu()
6792 np->timer.expires = jiffies + HZ; in niu_change_mtu()
6799 spin_unlock_irq(&np->lock); in niu_change_mtu()
6803 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) in niu_change_mtu()
6806 add_timer(&np->timer); in niu_change_mtu()
6816 struct niu_vpd *vpd = &np->vpd; in niu_get_drvinfo()
6818 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); in niu_get_drvinfo()
6819 strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); in niu_get_drvinfo()
6820 snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", in niu_get_drvinfo()
6821 vpd->fcode_major, vpd->fcode_minor); in niu_get_drvinfo()
6822 if (np->parent->plat_type != PLAT_TYPE_NIU) in niu_get_drvinfo()
6823 strscpy(info->bus_info, pci_name(np->pdev), in niu_get_drvinfo()
6824 sizeof(info->bus_info)); in niu_get_drvinfo()
6833 lp = &np->link_config; in niu_get_link_ksettings()
6836 cmd->base.phy_address = np->phy_addr; in niu_get_link_ksettings()
6837 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in niu_get_link_ksettings()
6838 lp->supported); in niu_get_link_ksettings()
6839 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in niu_get_link_ksettings()
6840 lp->active_advertising); in niu_get_link_ksettings()
6841 cmd->base.autoneg = lp->active_autoneg; in niu_get_link_ksettings()
6842 cmd->base.speed = lp->active_speed; in niu_get_link_ksettings()
6843 cmd->base.duplex = lp->active_duplex; in niu_get_link_ksettings()
6844 cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; in niu_get_link_ksettings()
6853 struct niu_link_config *lp = &np->link_config; in niu_set_link_ksettings()
6855 ethtool_convert_link_mode_to_legacy_u32(&lp->advertising, in niu_set_link_ksettings()
6856 cmd->link_modes.advertising); in niu_set_link_ksettings()
6857 lp->speed = cmd->base.speed; in niu_set_link_ksettings()
6858 lp->duplex = cmd->base.duplex; in niu_set_link_ksettings()
6859 lp->autoneg = cmd->base.autoneg; in niu_set_link_ksettings()
6866 return np->msg_enable; in niu_get_msglevel()
6872 np->msg_enable = value; in niu_set_msglevel()
6879 if (np->link_config.autoneg) in niu_nway_reset()
6889 return np->eeprom_len; in niu_get_eeprom_len()
6898 offset = eeprom->offset; in niu_get_eeprom()
6899 len = eeprom->len; in niu_get_eeprom()
6902 return -EINVAL; in niu_get_eeprom()
6903 if (offset >= np->eeprom_len) in niu_get_eeprom()
6904 return -EINVAL; in niu_get_eeprom()
6905 if (offset + len > np->eeprom_len) in niu_get_eeprom()
6906 len = eeprom->len = np->eeprom_len - offset; in niu_get_eeprom()
6912 b_count = 4 - b_offset; in niu_get_eeprom()
6916 val = nr64(ESPC_NCR((offset - b_offset) / 4)); in niu_get_eeprom()
6919 len -= b_count; in niu_get_eeprom()
6926 len -= 4; in niu_get_eeprom()
6999 return -EINVAL; in niu_class_to_ethflow()
7095 nfc->data = 0; in niu_get_hash_opts()
7097 if (!niu_ethflow_to_class(nfc->flow_type, &class)) in niu_get_hash_opts()
7098 return -EINVAL; in niu_get_hash_opts()
7100 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & in niu_get_hash_opts()
7102 nfc->data = RXH_DISCARD; in niu_get_hash_opts()
7104 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - in niu_get_hash_opts()
7115 tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7116 fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7118 tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7119 fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7121 tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7122 fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7124 tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7125 fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7127 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> in niu_get_ip4fs_from_tcam_key()
7129 fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> in niu_get_ip4fs_from_tcam_key()
7132 switch (fsp->flow_type) { in niu_get_ip4fs_from_tcam_key()
7136 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7138 fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7140 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7142 fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7144 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7146 fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7148 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7150 fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7154 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7156 fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7158 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7160 fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7163 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7165 fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7167 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7169 fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7171 fsp->h_u.usr_ip4_spec.proto = in niu_get_ip4fs_from_tcam_key()
7172 (tp->key[2] & TCAM_V4KEY2_PROTO) >> in niu_get_ip4fs_from_tcam_key()
7174 fsp->m_u.usr_ip4_spec.proto = in niu_get_ip4fs_from_tcam_key()
7175 (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> in niu_get_ip4fs_from_tcam_key()
7178 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; in niu_get_ip4fs_from_tcam_key()
7188 struct niu_parent *parent = np->parent; in niu_get_ethtool_tcam_entry()
7190 struct ethtool_rx_flow_spec *fsp = &nfc->fs; in niu_get_ethtool_tcam_entry()
7195 idx = tcam_get_index(np, (u16)nfc->fs.location); in niu_get_ethtool_tcam_entry()
7197 tp = &parent->tcam[idx]; in niu_get_ethtool_tcam_entry()
7198 if (!tp->valid) { in niu_get_ethtool_tcam_entry()
7199 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", in niu_get_ethtool_tcam_entry()
7200 parent->index, (u16)nfc->fs.location, idx); in niu_get_ethtool_tcam_entry()
7201 return -EINVAL; in niu_get_ethtool_tcam_entry()
7205 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> in niu_get_ethtool_tcam_entry()
7207 ret = niu_class_to_ethflow(class, &fsp->flow_type); in niu_get_ethtool_tcam_entry()
7209 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", in niu_get_ethtool_tcam_entry()
7210 parent->index); in niu_get_ethtool_tcam_entry()
7214 if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { in niu_get_ethtool_tcam_entry()
7215 u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> in niu_get_ethtool_tcam_entry()
7218 if (fsp->flow_type == AH_V4_FLOW) in niu_get_ethtool_tcam_entry()
7219 fsp->flow_type = ESP_V4_FLOW; in niu_get_ethtool_tcam_entry()
7221 fsp->flow_type = ESP_V6_FLOW; in niu_get_ethtool_tcam_entry()
7225 switch (fsp->flow_type) { in niu_get_ethtool_tcam_entry()
7239 ret = -EINVAL; in niu_get_ethtool_tcam_entry()
7245 ret = -EINVAL; in niu_get_ethtool_tcam_entry()
7252 if (tp->assoc_data & TCAM_ASSOCDATA_DISC) in niu_get_ethtool_tcam_entry()
7253 fsp->ring_cookie = RX_CLS_FLOW_DISC; in niu_get_ethtool_tcam_entry()
7255 fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> in niu_get_ethtool_tcam_entry()
7259 nfc->data = tcam_get_size(np); in niu_get_ethtool_tcam_entry()
7268 struct niu_parent *parent = np->parent; in niu_get_ethtool_tcam_all()
7275 nfc->data = tcam_get_size(np); in niu_get_ethtool_tcam_all()
7278 for (cnt = 0, i = 0; i < nfc->data; i++) { in niu_get_ethtool_tcam_all()
7280 tp = &parent->tcam[idx]; in niu_get_ethtool_tcam_all()
7281 if (!tp->valid) in niu_get_ethtool_tcam_all()
7283 if (cnt == nfc->rule_cnt) { in niu_get_ethtool_tcam_all()
7284 ret = -EMSGSIZE; in niu_get_ethtool_tcam_all()
7292 nfc->rule_cnt = cnt; in niu_get_ethtool_tcam_all()
7303 switch (cmd->cmd) { in niu_get_nfc()
7308 cmd->data = np->num_rx_rings; in niu_get_nfc()
7311 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); in niu_get_nfc()
7320 ret = -EINVAL; in niu_get_nfc()
7333 if (!niu_ethflow_to_class(nfc->flow_type, &class)) in niu_set_hash_opts()
7334 return -EINVAL; in niu_set_hash_opts()
7338 return -EINVAL; in niu_set_hash_opts()
7340 if (nfc->data & RXH_DISCARD) { in niu_set_hash_opts()
7342 flow_key = np->parent->tcam_key[class - in niu_set_hash_opts()
7345 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); in niu_set_hash_opts()
7346 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; in niu_set_hash_opts()
7351 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & in niu_set_hash_opts()
7354 flow_key = np->parent->tcam_key[class - in niu_set_hash_opts()
7357 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), in niu_set_hash_opts()
7359 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = in niu_set_hash_opts()
7365 if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) in niu_set_hash_opts()
7366 return -EINVAL; in niu_set_hash_opts()
7369 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); in niu_set_hash_opts()
7370 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; in niu_set_hash_opts()
7384 sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); in niu_get_tcamkey_from_ip4fs()
7385 sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); in niu_get_tcamkey_from_ip4fs()
7386 dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); in niu_get_tcamkey_from_ip4fs()
7387 dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); in niu_get_tcamkey_from_ip4fs()
7389 tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; in niu_get_tcamkey_from_ip4fs()
7390 tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; in niu_get_tcamkey_from_ip4fs()
7391 tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; in niu_get_tcamkey_from_ip4fs()
7392 tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; in niu_get_tcamkey_from_ip4fs()
7394 tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; in niu_get_tcamkey_from_ip4fs()
7395 tp->key[3] |= dip; in niu_get_tcamkey_from_ip4fs()
7397 tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; in niu_get_tcamkey_from_ip4fs()
7398 tp->key_mask[3] |= dipm; in niu_get_tcamkey_from_ip4fs()
7400 tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << in niu_get_tcamkey_from_ip4fs()
7402 tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << in niu_get_tcamkey_from_ip4fs()
7404 switch (fsp->flow_type) { in niu_get_tcamkey_from_ip4fs()
7408 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); in niu_get_tcamkey_from_ip4fs()
7409 spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); in niu_get_tcamkey_from_ip4fs()
7410 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); in niu_get_tcamkey_from_ip4fs()
7411 dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); in niu_get_tcamkey_from_ip4fs()
7413 tp->key[2] |= (((u64)sport << 16) | dport); in niu_get_tcamkey_from_ip4fs()
7414 tp->key_mask[2] |= (((u64)spm << 16) | dpm); in niu_get_tcamkey_from_ip4fs()
7415 niu_ethflow_to_l3proto(fsp->flow_type, &pid); in niu_get_tcamkey_from_ip4fs()
7419 spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); in niu_get_tcamkey_from_ip4fs()
7420 spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); in niu_get_tcamkey_from_ip4fs()
7422 tp->key[2] |= spi; in niu_get_tcamkey_from_ip4fs()
7423 tp->key_mask[2] |= spim; in niu_get_tcamkey_from_ip4fs()
7424 niu_ethflow_to_l3proto(fsp->flow_type, &pid); in niu_get_tcamkey_from_ip4fs()
7427 spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); in niu_get_tcamkey_from_ip4fs()
7428 spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); in niu_get_tcamkey_from_ip4fs()
7430 tp->key[2] |= spi; in niu_get_tcamkey_from_ip4fs()
7431 tp->key_mask[2] |= spim; in niu_get_tcamkey_from_ip4fs()
7432 pid = fsp->h_u.usr_ip4_spec.proto; in niu_get_tcamkey_from_ip4fs()
7438 tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); in niu_get_tcamkey_from_ip4fs()
7440 tp->key_mask[2] |= TCAM_V4KEY2_PROTO; in niu_get_tcamkey_from_ip4fs()
7447 struct niu_parent *parent = np->parent; in niu_add_ethtool_tcam_entry()
7449 struct ethtool_rx_flow_spec *fsp = &nfc->fs; in niu_add_ethtool_tcam_entry()
7450 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; in niu_add_ethtool_tcam_entry()
7451 int l2_rdc_table = rdc_table->first_table_num; in niu_add_ethtool_tcam_entry()
7459 idx = nfc->fs.location; in niu_add_ethtool_tcam_entry()
7461 return -EINVAL; in niu_add_ethtool_tcam_entry()
7463 if (fsp->flow_type == IP_USER_FLOW) { in niu_add_ethtool_tcam_entry()
7466 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; in niu_add_ethtool_tcam_entry()
7467 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; in niu_add_ethtool_tcam_entry()
7469 if (uspec->ip_ver != ETH_RX_NFC_IP4) in niu_add_ethtool_tcam_entry()
7470 return -EINVAL; in niu_add_ethtool_tcam_entry()
7475 if (parent->l3_cls[i]) { in niu_add_ethtool_tcam_entry()
7476 if (uspec->proto == parent->l3_cls_pid[i]) { in niu_add_ethtool_tcam_entry()
7477 class = parent->l3_cls[i]; in niu_add_ethtool_tcam_entry()
7478 parent->l3_cls_refcnt[i]++; in niu_add_ethtool_tcam_entry()
7502 uspec->proto, in niu_add_ethtool_tcam_entry()
7503 uspec->tos, in niu_add_ethtool_tcam_entry()
7504 umask->tos); in niu_add_ethtool_tcam_entry()
7511 parent->l3_cls[i] = class; in niu_add_ethtool_tcam_entry()
7512 parent->l3_cls_pid[i] = uspec->proto; in niu_add_ethtool_tcam_entry()
7513 parent->l3_cls_refcnt[i]++; in niu_add_ethtool_tcam_entry()
7519 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", in niu_add_ethtool_tcam_entry()
7520 parent->index, __func__, uspec->proto); in niu_add_ethtool_tcam_entry()
7521 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7526 if (!niu_ethflow_to_class(fsp->flow_type, &class)) { in niu_add_ethtool_tcam_entry()
7527 return -EINVAL; in niu_add_ethtool_tcam_entry()
7534 tp = &parent->tcam[idx]; in niu_add_ethtool_tcam_entry()
7539 switch (fsp->flow_type) { in niu_add_ethtool_tcam_entry()
7553 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", in niu_add_ethtool_tcam_entry()
7554 parent->index, __func__, fsp->flow_type); in niu_add_ethtool_tcam_entry()
7555 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7561 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", in niu_add_ethtool_tcam_entry()
7562 parent->index, __func__, fsp->flow_type); in niu_add_ethtool_tcam_entry()
7563 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7568 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { in niu_add_ethtool_tcam_entry()
7569 tp->assoc_data = TCAM_ASSOCDATA_DISC; in niu_add_ethtool_tcam_entry()
7571 if (fsp->ring_cookie >= np->num_rx_rings) { in niu_add_ethtool_tcam_entry()
7572 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", in niu_add_ethtool_tcam_entry()
7573 parent->index, __func__, in niu_add_ethtool_tcam_entry()
7574 (long long)fsp->ring_cookie); in niu_add_ethtool_tcam_entry()
7575 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7578 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | in niu_add_ethtool_tcam_entry()
7579 (fsp->ring_cookie << in niu_add_ethtool_tcam_entry()
7583 err = tcam_write(np, idx, tp->key, tp->key_mask); in niu_add_ethtool_tcam_entry()
7585 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7588 err = tcam_assoc_write(np, idx, tp->assoc_data); in niu_add_ethtool_tcam_entry()
7590 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7595 tp->valid = 1; in niu_add_ethtool_tcam_entry()
7596 np->clas.tcam_valid_entries++; in niu_add_ethtool_tcam_entry()
7605 struct niu_parent *parent = np->parent; in niu_del_ethtool_tcam_entry()
7613 return -EINVAL; in niu_del_ethtool_tcam_entry()
7618 tp = &parent->tcam[idx]; in niu_del_ethtool_tcam_entry()
7621 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> in niu_del_ethtool_tcam_entry()
7627 if (parent->l3_cls[i] == class) { in niu_del_ethtool_tcam_entry()
7628 parent->l3_cls_refcnt[i]--; in niu_del_ethtool_tcam_entry()
7629 if (!parent->l3_cls_refcnt[i]) { in niu_del_ethtool_tcam_entry()
7636 parent->l3_cls[i] = 0; in niu_del_ethtool_tcam_entry()
7637 parent->l3_cls_pid[i] = 0; in niu_del_ethtool_tcam_entry()
7643 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", in niu_del_ethtool_tcam_entry()
7644 parent->index, __func__, in niu_del_ethtool_tcam_entry()
7646 ret = -EINVAL; in niu_del_ethtool_tcam_entry()
7656 tp->valid = 0; in niu_del_ethtool_tcam_entry()
7657 np->clas.tcam_valid_entries--; in niu_del_ethtool_tcam_entry()
7669 switch (cmd->cmd) { in niu_set_nfc()
7677 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); in niu_set_nfc()
7680 ret = -EINVAL; in niu_set_nfc()
7773 if (np->flags & NIU_FLAGS_XMAC) { in niu_get_strings()
7782 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_strings()
7787 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_strings()
7799 return -EINVAL; in niu_get_sset_count()
7801 return (np->flags & NIU_FLAGS_XMAC ? in niu_get_sset_count()
7804 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + in niu_get_sset_count()
7805 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); in niu_get_sset_count()
7815 if (np->flags & NIU_FLAGS_XMAC) { in niu_get_ethtool_stats()
7816 memcpy(data, &np->mac_stats.xmac, in niu_get_ethtool_stats()
7820 memcpy(data, &np->mac_stats.bmac, in niu_get_ethtool_stats()
7824 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_ethtool_stats()
7825 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_get_ethtool_stats()
7829 data[0] = rp->rx_channel; in niu_get_ethtool_stats()
7830 data[1] = rp->rx_packets; in niu_get_ethtool_stats()
7831 data[2] = rp->rx_bytes; in niu_get_ethtool_stats()
7832 data[3] = rp->rx_dropped; in niu_get_ethtool_stats()
7833 data[4] = rp->rx_errors; in niu_get_ethtool_stats()
7836 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_ethtool_stats()
7837 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_get_ethtool_stats()
7839 data[0] = rp->tx_channel; in niu_get_ethtool_stats()
7840 data[1] = rp->tx_packets; in niu_get_ethtool_stats()
7841 data[2] = rp->tx_bytes; in niu_get_ethtool_stats()
7842 data[3] = rp->tx_errors; in niu_get_ethtool_stats()
7849 if (np->flags & NIU_FLAGS_XMAC) in niu_led_state_save()
7857 if (np->flags & NIU_FLAGS_XMAC) in niu_led_state_restore()
7867 if (np->flags & NIU_FLAGS_XMAC) { in niu_force_led()
7890 return -EAGAIN; in niu_set_phys_id()
7894 np->orig_led_state = niu_led_state_save(np); in niu_set_phys_id()
7906 niu_led_state_restore(np, np->orig_led_state); in niu_set_phys_id()
7934 return -EINVAL; in niu_ldg_assign_ldn()
7936 return -EINVAL; in niu_ldg_assign_ldn()
7938 parent->ldg_map[ldn] = ldg; in niu_ldg_assign_ldn()
7940 if (np->parent->plat_type == PLAT_TYPE_NIU) { in niu_ldg_assign_ldn()
7941 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by in niu_ldg_assign_ldn()
7947 dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n", in niu_ldg_assign_ldn()
7948 np->port, ldn, ldg, in niu_ldg_assign_ldn()
7950 return -EINVAL; in niu_ldg_assign_ldn()
7961 return -EINVAL; in niu_set_ldg_timer_res()
7974 return -EINVAL; in niu_set_ldg_sid()
7988 return -EINVAL; in niu_pci_eeprom_read()
7998 } while (limit--); in niu_pci_eeprom_read()
8000 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", in niu_pci_eeprom_read()
8002 return -ENODEV; in niu_pci_eeprom_read()
8013 } while (limit--); in niu_pci_eeprom_read()
8015 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", in niu_pci_eeprom_read()
8017 return -ENODEV; in niu_pci_eeprom_read()
8072 return -EINVAL; in niu_pci_vpd_get_propname()
8079 struct niu_vpd *vpd = &np->vpd; in niu_vpd_parse_version()
8080 int len = strlen(vpd->version) + 1; in niu_vpd_parse_version()
8081 const char *s = vpd->version; in niu_vpd_parse_version()
8084 for (i = 0; i < len - 5; i++) { in niu_vpd_parse_version()
8088 if (i >= len - 5) in niu_vpd_parse_version()
8092 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); in niu_vpd_parse_version()
8094 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_vpd_parse_version()
8096 vpd->fcode_major, vpd->fcode_minor); in niu_vpd_parse_version()
8097 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || in niu_vpd_parse_version()
8098 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && in niu_vpd_parse_version()
8099 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) in niu_vpd_parse_version()
8100 np->flags |= NIU_FLAGS_VPD_VALID; in niu_vpd_parse_version()
8115 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_vpd_scan_props()
8144 prop_buf = np->vpd.model; in niu_pci_vpd_scan_props()
8147 } else if (!strcmp(namebuf, "board-model")) { in niu_pci_vpd_scan_props()
8148 prop_buf = np->vpd.board_model; in niu_pci_vpd_scan_props()
8152 prop_buf = np->vpd.version; in niu_pci_vpd_scan_props()
8155 } else if (!strcmp(namebuf, "local-mac-address")) { in niu_pci_vpd_scan_props()
8156 prop_buf = np->vpd.local_mac; in niu_pci_vpd_scan_props()
8159 } else if (!strcmp(namebuf, "num-mac-addresses")) { in niu_pci_vpd_scan_props()
8160 prop_buf = &np->vpd.mac_num; in niu_pci_vpd_scan_props()
8163 } else if (!strcmp(namebuf, "phy-type")) { in niu_pci_vpd_scan_props()
8164 prop_buf = np->vpd.phy_type; in niu_pci_vpd_scan_props()
8170 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); in niu_pci_vpd_scan_props()
8171 return -EINVAL; in niu_pci_vpd_scan_props()
8178 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_vpd_scan_props()
8215 return -EINVAL; in niu_pci_vpd_fetch()
8295 /* 1G copper, MII */ in niu_phy_type_prop_decode()
8296 np->flags &= ~(NIU_FLAGS_FIBER | in niu_phy_type_prop_decode()
8298 np->mac_xcvr = MAC_XCVR_MII; in niu_phy_type_prop_decode()
8301 np->flags |= (NIU_FLAGS_10G | in niu_phy_type_prop_decode()
8303 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8306 np->flags &= ~NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8307 np->flags |= NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8308 np->mac_xcvr = MAC_XCVR_PCS; in niu_phy_type_prop_decode()
8311 np->flags |= NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8312 np->flags &= ~NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8313 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8316 np->flags |= NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8317 np->flags &= ~NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8318 np->flags |= NIU_FLAGS_XCVR_SERDES; in niu_phy_type_prop_decode()
8319 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8321 return -EINVAL; in niu_phy_type_prop_decode()
8330 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || in niu_pci_vpd_get_nports()
8331 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || in niu_pci_vpd_get_nports()
8332 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || in niu_pci_vpd_get_nports()
8333 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || in niu_pci_vpd_get_nports()
8334 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { in niu_pci_vpd_get_nports()
8336 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || in niu_pci_vpd_get_nports()
8337 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || in niu_pci_vpd_get_nports()
8338 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || in niu_pci_vpd_get_nports()
8339 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { in niu_pci_vpd_get_nports()
8348 struct net_device *dev = np->dev; in niu_pci_vpd_validate()
8349 struct niu_vpd *vpd = &np->vpd; in niu_pci_vpd_validate()
8353 if (!is_valid_ether_addr(&vpd->local_mac[0])) { in niu_pci_vpd_validate()
8354 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); in niu_pci_vpd_validate()
8356 np->flags &= ~NIU_FLAGS_VPD_VALID; in niu_pci_vpd_validate()
8360 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || in niu_pci_vpd_validate()
8361 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { in niu_pci_vpd_validate()
8362 np->flags |= NIU_FLAGS_10G; in niu_pci_vpd_validate()
8363 np->flags &= ~NIU_FLAGS_FIBER; in niu_pci_vpd_validate()
8364 np->flags |= NIU_FLAGS_XCVR_SERDES; in niu_pci_vpd_validate()
8365 np->mac_xcvr = MAC_XCVR_PCS; in niu_pci_vpd_validate()
8366 if (np->port > 1) { in niu_pci_vpd_validate()
8367 np->flags |= NIU_FLAGS_FIBER; in niu_pci_vpd_validate()
8368 np->flags &= ~NIU_FLAGS_10G; in niu_pci_vpd_validate()
8370 if (np->flags & NIU_FLAGS_10G) in niu_pci_vpd_validate()
8371 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_vpd_validate()
8372 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { in niu_pci_vpd_validate()
8373 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | in niu_pci_vpd_validate()
8375 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { in niu_pci_vpd_validate()
8376 dev_err(np->device, "Illegal phy string [%s]\n", in niu_pci_vpd_validate()
8377 np->vpd.phy_type); in niu_pci_vpd_validate()
8378 dev_err(np->device, "Falling back to SPROM\n"); in niu_pci_vpd_validate()
8379 np->flags &= ~NIU_FLAGS_VPD_VALID; in niu_pci_vpd_validate()
8383 ether_addr_copy(addr, vpd->local_mac); in niu_pci_vpd_validate()
8386 addr[5] += np->port; in niu_pci_vpd_validate()
8395 struct net_device *dev = np->dev; in niu_pci_probe_sprom()
8405 np->eeprom_len = len; in niu_pci_probe_sprom()
8407 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8418 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8421 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); in niu_pci_probe_sprom()
8422 return -EINVAL; in niu_pci_probe_sprom()
8426 switch (np->port) { in niu_pci_probe_sprom()
8444 dev_err(np->device, "Bogus port number %u\n", in niu_pci_probe_sprom()
8445 np->port); in niu_pci_probe_sprom()
8446 return -EINVAL; in niu_pci_probe_sprom()
8448 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8453 /* 1G copper, MII */ in niu_pci_probe_sprom()
8454 np->flags &= ~(NIU_FLAGS_FIBER | in niu_pci_probe_sprom()
8456 np->mac_xcvr = MAC_XCVR_MII; in niu_pci_probe_sprom()
8461 np->flags &= ~NIU_FLAGS_10G; in niu_pci_probe_sprom()
8462 np->flags |= NIU_FLAGS_FIBER; in niu_pci_probe_sprom()
8463 np->mac_xcvr = MAC_XCVR_PCS; in niu_pci_probe_sprom()
8468 np->flags |= NIU_FLAGS_10G; in niu_pci_probe_sprom()
8469 np->flags &= ~NIU_FLAGS_FIBER; in niu_pci_probe_sprom()
8470 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_probe_sprom()
8475 np->flags |= (NIU_FLAGS_10G | in niu_pci_probe_sprom()
8477 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_probe_sprom()
8481 dev_err(np->device, "Bogus SPROM phy type %u\n", val8); in niu_pci_probe_sprom()
8482 return -EINVAL; in niu_pci_probe_sprom()
8486 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8494 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8500 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", in niu_pci_probe_sprom()
8502 return -EINVAL; in niu_pci_probe_sprom()
8506 addr[5] += np->port; in niu_pci_probe_sprom()
8513 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8516 return -EINVAL; in niu_pci_probe_sprom()
8521 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; in niu_pci_probe_sprom()
8522 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; in niu_pci_probe_sprom()
8523 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; in niu_pci_probe_sprom()
8524 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; in niu_pci_probe_sprom()
8526 np->vpd.model[val] = '\0'; in niu_pci_probe_sprom()
8529 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8532 return -EINVAL; in niu_pci_probe_sprom()
8537 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; in niu_pci_probe_sprom()
8538 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; in niu_pci_probe_sprom()
8539 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; in niu_pci_probe_sprom()
8540 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; in niu_pci_probe_sprom()
8542 np->vpd.board_model[val] = '\0'; in niu_pci_probe_sprom()
8544 np->vpd.mac_num = in niu_pci_probe_sprom()
8546 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8547 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); in niu_pci_probe_sprom()
8554 struct niu_parent *parent = np->parent; in niu_get_and_validate_port()
8556 if (np->port <= 1) in niu_get_and_validate_port()
8557 np->flags |= NIU_FLAGS_XMAC; in niu_get_and_validate_port()
8559 if (!parent->num_ports) { in niu_get_and_validate_port()
8560 if (parent->plat_type == PLAT_TYPE_NIU) { in niu_get_and_validate_port()
8561 parent->num_ports = 2; in niu_get_and_validate_port()
8563 parent->num_ports = niu_pci_vpd_get_nports(np); in niu_get_and_validate_port()
8564 if (!parent->num_ports) { in niu_get_and_validate_port()
8568 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & in niu_get_and_validate_port()
8572 * Maramba on-board parts. in niu_get_and_validate_port()
8574 if (!parent->num_ports) in niu_get_and_validate_port()
8575 parent->num_ports = 4; in niu_get_and_validate_port()
8580 if (np->port >= parent->num_ports) in niu_get_and_validate_port()
8581 return -ENODEV; in niu_get_and_validate_port()
8607 parent->index, id, in phy_record()
8609 type == PHY_TYPE_PCS ? "PCS" : "MII", in phy_record()
8612 if (p->cur[type] >= NIU_MAX_PORTS) { in phy_record()
8614 return -EINVAL; in phy_record()
8616 idx = p->cur[type]; in phy_record()
8617 p->phy_id[type][idx] = id; in phy_record()
8618 p->phy_port[type][idx] = phy_port; in phy_record()
8619 p->cur[type] = idx + 1; in phy_record()
8627 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { in port_has_10g()
8628 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) in port_has_10g()
8631 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { in port_has_10g()
8632 if (p->phy_port[PHY_TYPE_PCS][i] == port) in port_has_10g()
8659 if (p->cur[PHY_TYPE_MII]) in count_1g_ports()
8660 *lowest = p->phy_port[PHY_TYPE_MII][0]; in count_1g_ports()
8662 return p->cur[PHY_TYPE_MII]; in count_1g_ports()
8667 int num_ports = parent->num_ports; in niu_n2_divide_channels()
8671 parent->rxchan_per_port[i] = (16 / num_ports); in niu_n2_divide_channels()
8672 parent->txchan_per_port[i] = (16 / num_ports); in niu_n2_divide_channels()
8675 parent->index, i, in niu_n2_divide_channels()
8676 parent->rxchan_per_port[i], in niu_n2_divide_channels()
8677 parent->txchan_per_port[i]); in niu_n2_divide_channels()
8684 int num_ports = parent->num_ports; in niu_divide_channels()
8696 rx_chans_per_10g = (NIU_NUM_RXCHAN - in niu_divide_channels()
8701 tx_chans_per_10g = (NIU_NUM_TXCHAN - in niu_divide_channels()
8708 int type = phy_decode(parent->port_phy, i); in niu_divide_channels()
8711 parent->rxchan_per_port[i] = rx_chans_per_10g; in niu_divide_channels()
8712 parent->txchan_per_port[i] = tx_chans_per_10g; in niu_divide_channels()
8714 parent->rxchan_per_port[i] = rx_chans_per_1g; in niu_divide_channels()
8715 parent->txchan_per_port[i] = tx_chans_per_1g; in niu_divide_channels()
8718 parent->index, i, in niu_divide_channels()
8719 parent->rxchan_per_port[i], in niu_divide_channels()
8720 parent->txchan_per_port[i]); in niu_divide_channels()
8721 tot_rx += parent->rxchan_per_port[i]; in niu_divide_channels()
8722 tot_tx += parent->txchan_per_port[i]; in niu_divide_channels()
8727 parent->index, tot_rx); in niu_divide_channels()
8729 parent->rxchan_per_port[i] = 1; in niu_divide_channels()
8733 parent->index, tot_tx); in niu_divide_channels()
8735 parent->txchan_per_port[i] = 1; in niu_divide_channels()
8739 parent->index, tot_rx, tot_tx); in niu_divide_channels()
8746 int i, num_ports = parent->num_ports; in niu_divide_rdc_groups()
8756 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; in niu_divide_rdc_groups()
8757 int grp, num_channels = parent->rxchan_per_port[i]; in niu_divide_rdc_groups()
8760 tp->first_table_num = rdc_group; in niu_divide_rdc_groups()
8761 tp->num_tables = rdc_groups_per_port; in niu_divide_rdc_groups()
8763 for (grp = 0; grp < tp->num_tables; grp++) { in niu_divide_rdc_groups()
8764 struct rdc_table *rt = &tp->tables[grp]; in niu_divide_rdc_groups() local
8768 parent->index, i, tp->first_table_num + grp); in niu_divide_rdc_groups()
8770 rt->rxdma_channel[slot] = in niu_divide_rdc_groups()
8773 pr_cont("%d ", rt->rxdma_channel[slot]); in niu_divide_rdc_groups()
8781 parent->rdc_default[i] = rdc_channel_base; in niu_divide_rdc_groups()
8832 struct phy_probe_info *info = &parent->phy_probe_info; in walk_phys()
8840 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || in walk_phys()
8841 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { in walk_phys()
8844 parent->plat_type = PLAT_TYPE_ATCA_CP3220; in walk_phys()
8845 parent->num_ports = 4; in walk_phys()
8850 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { in walk_phys()
8853 parent->num_ports = 2; in walk_phys()
8856 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && in walk_phys()
8857 (parent->plat_type == PLAT_TYPE_NIU)) { in walk_phys()
8859 if (np->flags & NIU_FLAGS_10G) { in walk_phys()
8877 parent->plat_type = PLAT_TYPE_VF_P0; in walk_phys()
8879 parent->plat_type = PLAT_TYPE_VF_P1; in walk_phys()
8897 val = phy_encode(PORT_TYPE_10G, np->port); in walk_phys()
8902 parent->plat_type = PLAT_TYPE_VF_P0; in walk_phys()
8904 parent->plat_type = PLAT_TYPE_VF_P1; in walk_phys()
8924 parent->plat_type = PLAT_TYPE_VF_P0; in walk_phys()
8926 parent->plat_type = PLAT_TYPE_VF_P1; in walk_phys()
8939 return -EINVAL; in walk_phys()
8943 parent->port_phy = val; in walk_phys()
8945 if (parent->plat_type == PLAT_TYPE_NIU) in walk_phys()
8956 return -EINVAL; in walk_phys()
8961 struct niu_parent *parent = np->parent; in niu_probe_ports()
8964 if (parent->port_phy == PORT_PHY_UNKNOWN) { in niu_probe_ports()
8974 if (parent->port_phy == PORT_PHY_INVALID) in niu_probe_ports()
8975 return -EINVAL; in niu_probe_ports()
8982 struct niu_classifier *cp = &np->clas; in niu_classifier_swstate_init()
8984 cp->tcam_top = (u16) np->port; in niu_classifier_swstate_init()
8985 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; in niu_classifier_swstate_init()
8986 cp->h1_init = 0xffffffff; in niu_classifier_swstate_init()
8987 cp->h2_init = 0xffff; in niu_classifier_swstate_init()
8994 struct niu_link_config *lp = &np->link_config; in niu_link_config_init()
8996 lp->advertising = (ADVERTISED_10baseT_Half | in niu_link_config_init()
9004 lp->speed = lp->active_speed = SPEED_INVALID; in niu_link_config_init()
9005 lp->duplex = DUPLEX_FULL; in niu_link_config_init()
9006 lp->active_duplex = DUPLEX_INVALID; in niu_link_config_init()
9007 lp->autoneg = 1; in niu_link_config_init()
9009 lp->loopback_mode = LOOPBACK_MAC; in niu_link_config_init()
9010 lp->active_speed = SPEED_10000; in niu_link_config_init()
9011 lp->active_duplex = DUPLEX_FULL; in niu_link_config_init()
9013 lp->loopback_mode = LOOPBACK_DISABLED; in niu_link_config_init()
9019 switch (np->port) { in niu_init_mac_ipp_pcs_base()
9021 np->mac_regs = np->regs + XMAC_PORT0_OFF; in niu_init_mac_ipp_pcs_base()
9022 np->ipp_off = 0x00000; in niu_init_mac_ipp_pcs_base()
9023 np->pcs_off = 0x04000; in niu_init_mac_ipp_pcs_base()
9024 np->xpcs_off = 0x02000; in niu_init_mac_ipp_pcs_base()
9028 np->mac_regs = np->regs + XMAC_PORT1_OFF; in niu_init_mac_ipp_pcs_base()
9029 np->ipp_off = 0x08000; in niu_init_mac_ipp_pcs_base()
9030 np->pcs_off = 0x0a000; in niu_init_mac_ipp_pcs_base()
9031 np->xpcs_off = 0x08000; in niu_init_mac_ipp_pcs_base()
9035 np->mac_regs = np->regs + BMAC_PORT2_OFF; in niu_init_mac_ipp_pcs_base()
9036 np->ipp_off = 0x04000; in niu_init_mac_ipp_pcs_base()
9037 np->pcs_off = 0x0e000; in niu_init_mac_ipp_pcs_base()
9038 np->xpcs_off = ~0UL; in niu_init_mac_ipp_pcs_base()
9042 np->mac_regs = np->regs + BMAC_PORT3_OFF; in niu_init_mac_ipp_pcs_base()
9043 np->ipp_off = 0x0c000; in niu_init_mac_ipp_pcs_base()
9044 np->pcs_off = 0x12000; in niu_init_mac_ipp_pcs_base()
9045 np->xpcs_off = ~0UL; in niu_init_mac_ipp_pcs_base()
9049 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); in niu_init_mac_ipp_pcs_base()
9050 return -EINVAL; in niu_init_mac_ipp_pcs_base()
9059 struct niu_parent *parent = np->parent; in niu_try_msix()
9060 struct pci_dev *pdev = np->pdev; in niu_try_msix()
9064 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; in niu_try_msix()
9065 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) in niu_try_msix()
9068 num_irqs = (parent->rxchan_per_port[np->port] + in niu_try_msix()
9069 parent->txchan_per_port[np->port] + in niu_try_msix()
9070 (np->port == 0 ? 3 : 1)); in niu_try_msix()
9071 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); in niu_try_msix()
9080 np->flags &= ~NIU_FLAGS_MSIX; in niu_try_msix()
9084 np->flags |= NIU_FLAGS_MSIX; in niu_try_msix()
9086 np->ldg[i].irq = msi_vec[i].vector; in niu_try_msix()
9087 np->num_ldg = num_irqs; in niu_try_msix()
9093 struct platform_device *op = np->op; in niu_n2_irq_init()
9097 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); in niu_n2_irq_init()
9099 return -ENODEV; in niu_n2_irq_init()
9101 for (i = 0; i < op->archdata.num_irqs; i++) { in niu_n2_irq_init()
9103 np->ldg[i].irq = op->archdata.irqs[i]; in niu_n2_irq_init()
9106 np->num_ldg = op->archdata.num_irqs; in niu_n2_irq_init()
9110 return -EINVAL; in niu_n2_irq_init()
9116 struct niu_parent *parent = np->parent; in niu_ldg_init()
9122 np->num_ldg = 1; in niu_ldg_init()
9123 np->ldg[0].irq = np->dev->irq; in niu_ldg_init()
9124 if (parent->plat_type == PLAT_TYPE_NIU) { in niu_ldg_init()
9131 port = np->port; in niu_ldg_init()
9132 for (i = 0; i < np->num_ldg; i++) { in niu_ldg_init()
9133 struct niu_ldg *lp = &np->ldg[i]; in niu_ldg_init()
9135 netif_napi_add(np->dev, &lp->napi, niu_poll); in niu_ldg_init()
9137 lp->np = np; in niu_ldg_init()
9138 lp->ldg_num = ldg_num_map[i]; in niu_ldg_init()
9139 lp->timer = 2; /* XXX */ in niu_ldg_init()
9145 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_ldg_init()
9146 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); in niu_ldg_init()
9171 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9182 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9192 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9199 first_chan += parent->rxchan_per_port[i]; in niu_ldg_init()
9200 num_chan = parent->rxchan_per_port[port]; in niu_ldg_init()
9209 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9215 first_chan += parent->txchan_per_port[i]; in niu_ldg_init()
9216 num_chan = parent->txchan_per_port[port]; in niu_ldg_init()
9224 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9233 if (np->flags & NIU_FLAGS_MSIX) in niu_ldg_free()
9234 pci_disable_msix(np->pdev); in niu_ldg_free()
9240 struct net_device *dev = np->dev; in niu_get_of_props()
9247 if (np->parent->plat_type == PLAT_TYPE_NIU) in niu_get_of_props()
9248 dp = np->op->dev.of_node; in niu_get_of_props()
9250 dp = pci_device_to_OF_node(np->pdev); in niu_get_of_props()
9252 phy_type = of_get_property(dp, "phy-type", NULL); in niu_get_of_props()
9254 netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); in niu_get_of_props()
9255 return -EINVAL; in niu_get_of_props()
9259 return -ENODEV; in niu_get_of_props()
9261 strcpy(np->vpd.phy_type, phy_type); in niu_get_of_props()
9263 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { in niu_get_of_props()
9265 dp, np->vpd.phy_type); in niu_get_of_props()
9266 return -EINVAL; in niu_get_of_props()
9269 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); in niu_get_of_props()
9271 netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", in niu_get_of_props()
9273 return -EINVAL; in niu_get_of_props()
9275 if (prop_len != dev->addr_len) { in niu_get_of_props()
9280 if (!is_valid_ether_addr(&dev->dev_addr[0])) { in niu_get_of_props()
9282 netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); in niu_get_of_props()
9283 return -EINVAL; in niu_get_of_props()
9289 strcpy(np->vpd.model, model); in niu_get_of_props()
9291 if (of_property_read_bool(dp, "hot-swappable-phy")) { in niu_get_of_props()
9292 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | in niu_get_of_props()
9298 return -EINVAL; in niu_get_of_props()
9308 if (err == -ENODEV) in niu_get_invariants()
9323 if (np->parent->plat_type == PLAT_TYPE_NIU) in niu_get_invariants()
9324 return -EINVAL; in niu_get_invariants()
9328 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_get_invariants()
9337 if (np->flags & NIU_FLAGS_VPD_VALID) { in niu_get_invariants()
9344 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { in niu_get_invariants()
9378 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in show_port_phy()
9379 u32 port_phy = p->port_phy; in show_port_phy()
9387 for (i = 0; i < p->num_ports; i++) { in show_port_phy()
9401 return buf - orig_buf; in show_port_phy()
9408 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in show_plat_type()
9411 switch (p->plat_type) { in show_plat_type()
9437 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in __show_chan_per_port()
9442 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); in __show_chan_per_port()
9444 for (i = 0; i < p->num_ports; i++) { in __show_chan_per_port()
9451 return buf - orig_buf; in __show_chan_per_port()
9470 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in show_num_ports()
9472 return sprintf(buf, "%d\n", p->num_ports); in show_num_ports()
9491 plat_dev = platform_device_register_simple("niu-board", niu_parent_index, in niu_new_parent()
9497 int err = device_create_file(&plat_dev->dev, in niu_new_parent()
9507 p->index = niu_parent_index++; in niu_new_parent()
9509 plat_dev->dev.platform_data = p; in niu_new_parent()
9510 p->plat_dev = plat_dev; in niu_new_parent()
9512 memcpy(&p->id, id, sizeof(*id)); in niu_new_parent()
9513 p->plat_type = ptype; in niu_new_parent()
9514 INIT_LIST_HEAD(&p->list); in niu_new_parent()
9515 atomic_set(&p->refcnt, 0); in niu_new_parent()
9516 list_add(&p->list, &niu_parent_list); in niu_new_parent()
9517 spin_lock_init(&p->lock); in niu_new_parent()
9519 p->rxdma_clock_divider = 7500; in niu_new_parent()
9521 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; in niu_new_parent()
9522 if (p->plat_type == PLAT_TYPE_NIU) in niu_new_parent()
9523 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; in niu_new_parent()
9526 int index = i - CLASS_CODE_USER_PROG1; in niu_new_parent()
9528 p->tcam_key[index] = TCAM_KEY_TSEL; in niu_new_parent()
9529 p->flow_key[index] = (FLOW_KEY_IPSA | in niu_new_parent()
9539 p->ldg_map[i] = LDG_INVALID; in niu_new_parent()
9552 int port = np->port; in niu_get_parent()
9557 if (!memcmp(id, &tmp->id, sizeof(*id))) { in niu_get_parent()
9570 err = sysfs_create_link(&p->plat_dev->dev.kobj, in niu_get_parent()
9571 &np->device->kobj, in niu_get_parent()
9574 p->ports[port] = np; in niu_get_parent()
9575 atomic_inc(&p->refcnt); in niu_get_parent()
9585 struct niu_parent *p = np->parent; in niu_put_parent()
9586 u8 port = np->port; in niu_put_parent()
9589 BUG_ON(!p || p->ports[port] != np); in niu_put_parent()
9591 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_put_parent()
9598 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); in niu_put_parent()
9600 p->ports[port] = NULL; in niu_put_parent()
9601 np->parent = NULL; in niu_put_parent()
9603 if (atomic_dec_and_test(&p->refcnt)) { in niu_put_parent()
9604 list_del(&p->list); in niu_put_parent()
9605 platform_device_unregister(p->plat_dev); in niu_put_parent()
9694 np->dev = dev; in niu_alloc_and_init()
9695 np->pdev = pdev; in niu_alloc_and_init()
9696 np->op = op; in niu_alloc_and_init()
9697 np->device = gen_dev; in niu_alloc_and_init()
9698 np->ops = ops; in niu_alloc_and_init()
9700 np->msg_enable = niu_debug; in niu_alloc_and_init()
9702 spin_lock_init(&np->lock); in niu_alloc_and_init()
9703 INIT_WORK(&np->reset_task, niu_reset_task); in niu_alloc_and_init()
9705 np->port = port; in niu_alloc_and_init()
9725 dev->netdev_ops = &niu_netdev_ops; in niu_assign_netdev_ops()
9726 dev->ethtool_ops = &niu_ethtool_ops; in niu_assign_netdev_ops()
9727 dev->watchdog_timeo = NIU_TX_TIMEOUT; in niu_assign_netdev_ops()
9732 struct net_device *dev = np->dev; in niu_device_announce()
9734 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); in niu_device_announce()
9736 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { in niu_device_announce()
9738 dev->name, in niu_device_announce()
9739 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), in niu_device_announce()
9740 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), in niu_device_announce()
9741 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), in niu_device_announce()
9742 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : in niu_device_announce()
9743 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), in niu_device_announce()
9744 np->vpd.phy_type); in niu_device_announce()
9747 dev->name, in niu_device_announce()
9748 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), in niu_device_announce()
9749 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), in niu_device_announce()
9750 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : in niu_device_announce()
9751 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : in niu_device_announce()
9753 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : in niu_device_announce()
9754 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), in niu_device_announce()
9755 np->vpd.phy_type); in niu_device_announce()
9761 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; in niu_set_basic_features()
9762 dev->features |= dev->hw_features | NETIF_F_RXCSUM; in niu_set_basic_features()
9777 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); in niu_pci_init_one()
9783 dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); in niu_pci_init_one()
9784 err = -ENODEV; in niu_pci_init_one()
9790 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); in niu_pci_init_one()
9795 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); in niu_pci_init_one()
9796 err = -ENODEV; in niu_pci_init_one()
9800 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, in niu_pci_init_one()
9801 &niu_pci_ops, PCI_FUNC(pdev->devfn)); in niu_pci_init_one()
9803 err = -ENOMEM; in niu_pci_init_one()
9809 parent_id.pci.domain = pci_domain_nr(pdev->bus); in niu_pci_init_one()
9810 parent_id.pci.bus = pdev->bus->number; in niu_pci_init_one()
9811 parent_id.pci.device = PCI_SLOT(pdev->devfn); in niu_pci_init_one()
9813 np->parent = niu_get_parent(np, &parent_id, in niu_pci_init_one()
9815 if (!np->parent) { in niu_pci_init_one()
9816 err = -ENOMEM; in niu_pci_init_one()
9826 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); in niu_pci_init_one()
9828 dev->features |= NETIF_F_HIGHDMA; in niu_pci_init_one()
9830 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); in niu_pci_init_one()
9832 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); in niu_pci_init_one()
9839 dev->priv_flags |= IFF_UNICAST_FLT; in niu_pci_init_one()
9841 np->regs = pci_ioremap_bar(pdev, 0); in niu_pci_init_one()
9842 if (!np->regs) { in niu_pci_init_one()
9843 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); in niu_pci_init_one()
9844 err = -ENOMEM; in niu_pci_init_one()
9851 dev->irq = pdev->irq; in niu_pci_init_one()
9853 /* MTU range: 68 - 9216 */ in niu_pci_init_one()
9854 dev->min_mtu = ETH_MIN_MTU; in niu_pci_init_one()
9855 dev->max_mtu = NIU_MAX_MTU; in niu_pci_init_one()
9861 if (err != -ENODEV) in niu_pci_init_one()
9862 dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); in niu_pci_init_one()
9868 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); in niu_pci_init_one()
9879 if (np->regs) { in niu_pci_init_one()
9880 iounmap(np->regs); in niu_pci_init_one()
9881 np->regs = NULL; in niu_pci_init_one()
9907 if (np->regs) { in niu_pci_remove_one()
9908 iounmap(np->regs); in niu_pci_remove_one()
9909 np->regs = NULL; in niu_pci_remove_one()
9931 flush_work(&np->reset_task); in niu_suspend()
9934 del_timer_sync(&np->timer); in niu_suspend()
9936 spin_lock_irqsave(&np->lock, flags); in niu_suspend()
9938 spin_unlock_irqrestore(&np->lock, flags); in niu_suspend()
9942 spin_lock_irqsave(&np->lock, flags); in niu_suspend()
9944 spin_unlock_irqrestore(&np->lock, flags); in niu_suspend()
9961 spin_lock_irqsave(&np->lock, flags); in niu_resume()
9965 np->timer.expires = jiffies + HZ; in niu_resume()
9966 add_timer(&np->timer); in niu_resume()
9970 spin_unlock_irqrestore(&np->lock, flags); in niu_resume()
10060 reg = of_get_property(op->dev.of_node, "reg", NULL); in niu_of_probe()
10062 dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", in niu_of_probe()
10063 op->dev.of_node); in niu_of_probe()
10064 return -ENODEV; in niu_of_probe()
10067 dev = niu_alloc_and_init(&op->dev, NULL, op, in niu_of_probe()
10070 err = -ENOMEM; in niu_of_probe()
10076 parent_id.of = of_get_parent(op->dev.of_node); in niu_of_probe()
10078 np->parent = niu_get_parent(np, &parent_id, in niu_of_probe()
10080 if (!np->parent) { in niu_of_probe()
10081 err = -ENOMEM; in niu_of_probe()
10087 np->regs = of_ioremap(&op->resource[1], 0, in niu_of_probe()
10088 resource_size(&op->resource[1]), in niu_of_probe()
10090 if (!np->regs) { in niu_of_probe()
10091 dev_err(&op->dev, "Cannot map device registers, aborting\n"); in niu_of_probe()
10092 err = -ENOMEM; in niu_of_probe()
10096 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, in niu_of_probe()
10097 resource_size(&op->resource[2]), in niu_of_probe()
10098 "niu vregs-1"); in niu_of_probe()
10099 if (!np->vir_regs_1) { in niu_of_probe()
10100 dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); in niu_of_probe()
10101 err = -ENOMEM; in niu_of_probe()
10105 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, in niu_of_probe()
10106 resource_size(&op->resource[3]), in niu_of_probe()
10107 "niu vregs-2"); in niu_of_probe()
10108 if (!np->vir_regs_2) { in niu_of_probe()
10109 dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); in niu_of_probe()
10110 err = -ENOMEM; in niu_of_probe()
10118 if (err != -ENODEV) in niu_of_probe()
10119 dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); in niu_of_probe()
10125 dev_err(&op->dev, "Cannot register net device, aborting\n"); in niu_of_probe()
10136 if (np->vir_regs_1) { in niu_of_probe()
10137 of_iounmap(&op->resource[2], np->vir_regs_1, in niu_of_probe()
10138 resource_size(&op->resource[2])); in niu_of_probe()
10139 np->vir_regs_1 = NULL; in niu_of_probe()
10142 if (np->vir_regs_2) { in niu_of_probe()
10143 of_iounmap(&op->resource[3], np->vir_regs_2, in niu_of_probe()
10144 resource_size(&op->resource[3])); in niu_of_probe()
10145 np->vir_regs_2 = NULL; in niu_of_probe()
10148 if (np->regs) { in niu_of_probe()
10149 of_iounmap(&op->resource[1], np->regs, in niu_of_probe()
10150 resource_size(&op->resource[1])); in niu_of_probe()
10151 np->regs = NULL; in niu_of_probe()
10173 if (np->vir_regs_1) { in niu_of_remove()
10174 of_iounmap(&op->resource[2], np->vir_regs_1, in niu_of_remove()
10175 resource_size(&op->resource[2])); in niu_of_remove()
10176 np->vir_regs_1 = NULL; in niu_of_remove()
10179 if (np->vir_regs_2) { in niu_of_remove()
10180 of_iounmap(&op->resource[3], np->vir_regs_2, in niu_of_remove()
10181 resource_size(&op->resource[3])); in niu_of_remove()
10182 np->vir_regs_2 = NULL; in niu_of_remove()
10185 if (np->regs) { in niu_of_remove()
10186 of_iounmap(&op->resource[1], np->regs, in niu_of_remove()
10187 resource_size(&op->resource[1])); in niu_of_remove()
10188 np->regs = NULL; in niu_of_remove()