• Home
  • Raw
  • Download

Lines Matching refs:np

727 		__this_cpu_inc(np->txrx_stats->member)
729 __this_cpu_add(np->txrx_stats->member, (count))
961 static bool nv_optimized(struct fe_priv *np) in nv_optimized() argument
963 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) in nv_optimized()
998 struct fe_priv *np = get_nvpriv(dev); in setup_hw_rings() local
1001 if (!nv_optimized(np)) { in setup_hw_rings()
1003 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1005 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysA… in setup_hw_rings()
1008 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1009 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); in setup_hw_rings()
1012 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPh… in setup_hw_rings()
1013 …writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingP… in setup_hw_rings()
1020 struct fe_priv *np = get_nvpriv(dev); in free_rings() local
1022 if (!nv_optimized(np)) { in free_rings()
1023 if (np->rx_ring.orig) in free_rings()
1024 dma_free_coherent(&np->pci_dev->dev, in free_rings()
1026 (np->rx_ring_size + in free_rings()
1027 np->tx_ring_size), in free_rings()
1028 np->rx_ring.orig, np->ring_addr); in free_rings()
1030 if (np->rx_ring.ex) in free_rings()
1031 dma_free_coherent(&np->pci_dev->dev, in free_rings()
1033 (np->rx_ring_size + in free_rings()
1034 np->tx_ring_size), in free_rings()
1035 np->rx_ring.ex, np->ring_addr); in free_rings()
1037 kfree(np->rx_skb); in free_rings()
1038 kfree(np->tx_skb); in free_rings()
1043 struct fe_priv *np = get_nvpriv(dev); in using_multi_irqs() local
1045 if (!(np->msi_flags & NV_MSI_X_ENABLED) || in using_multi_irqs()
1046 ((np->msi_flags & NV_MSI_X_ENABLED) && in using_multi_irqs()
1047 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) in using_multi_irqs()
1055 struct fe_priv *np = get_nvpriv(dev); in nv_txrx_gate() local
1059 if (!np->mac_in_use && in nv_txrx_gate()
1060 (np->driver_data & DEV_HAS_POWER_CNTRL)) { in nv_txrx_gate()
1072 struct fe_priv *np = get_nvpriv(dev); in nv_enable_irq() local
1075 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_enable_irq()
1076 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_enable_irq()
1078 enable_irq(np->pci_dev->irq); in nv_enable_irq()
1080 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_enable_irq()
1081 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_enable_irq()
1082 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_enable_irq()
1088 struct fe_priv *np = get_nvpriv(dev); in nv_disable_irq() local
1091 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_disable_irq()
1092 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_disable_irq()
1094 disable_irq(np->pci_dev->irq); in nv_disable_irq()
1096 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_disable_irq()
1097 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_disable_irq()
1098 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_disable_irq()
1112 struct fe_priv *np = get_nvpriv(dev); in nv_disable_hw_interrupts() local
1115 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_disable_hw_interrupts()
1118 if (np->msi_flags & NV_MSI_ENABLED) in nv_disable_hw_interrupts()
1126 struct fe_priv *np = get_nvpriv(dev); in nv_napi_enable() local
1128 napi_enable(&np->napi); in nv_napi_enable()
1133 struct fe_priv *np = get_nvpriv(dev); in nv_napi_disable() local
1135 napi_disable(&np->napi); in nv_napi_disable()
1181 struct fe_priv *np = netdev_priv(dev); in phy_reset() local
1186 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) in phy_reset()
1195 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_reset()
1203 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) in init_realtek_8211b() argument
1220 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) in init_realtek_8211b()
1227 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) in init_realtek_8211c() argument
1242 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); in init_realtek_8211c()
1244 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) in init_realtek_8211c()
1246 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1249 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); in init_realtek_8211c()
1252 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) in init_realtek_8211c()
1255 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1262 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) in init_realtek_8201() argument
1266 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { in init_realtek_8201()
1267 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201()
1270 if (mii_rw(dev, np->phyaddr, in init_realtek_8201()
1278 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) in init_realtek_8201_cross() argument
1283 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1286 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1290 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1293 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1301 static int init_cicada(struct net_device *dev, struct fe_priv *np, in init_cicada() argument
1307 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); in init_cicada()
1310 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) in init_cicada()
1312 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in init_cicada()
1314 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) in init_cicada()
1317 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); in init_cicada()
1319 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) in init_cicada()
1325 static int init_vitesse(struct net_device *dev, struct fe_priv *np) in init_vitesse() argument
1329 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1332 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1335 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1337 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1339 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1343 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1345 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1348 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1351 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1355 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1357 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1359 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1361 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1364 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1367 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1369 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1371 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1375 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1377 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1380 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1389 struct fe_priv *np = get_nvpriv(dev); in phy_init() local
1395 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in phy_init()
1396 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in phy_init()
1398 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { in phy_init()
1400 pci_name(np->pci_dev)); in phy_init()
1404 if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1405 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1406 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1407 if (init_realtek_8211b(dev, np)) { in phy_init()
1409 pci_name(np->pci_dev)); in phy_init()
1412 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1413 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1414 if (init_realtek_8211c(dev, np)) { in phy_init()
1416 pci_name(np->pci_dev)); in phy_init()
1419 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1420 if (init_realtek_8201(dev, np)) { in phy_init()
1422 pci_name(np->pci_dev)); in phy_init()
1429 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in phy_init()
1433 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { in phy_init()
1435 pci_name(np->pci_dev)); in phy_init()
1443 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in phy_init()
1445 np->gigabit = PHY_GIGABIT; in phy_init()
1446 mii_control_1000 = mii_rw(dev, np->phyaddr, in phy_init()
1454 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { in phy_init()
1456 pci_name(np->pci_dev)); in phy_init()
1460 np->gigabit = 0; in phy_init()
1462 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1465 if (np->phy_oui == PHY_OUI_REALTEK && in phy_init()
1466 np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1467 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1470 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { in phy_init()
1472 pci_name(np->pci_dev)); in phy_init()
1481 pci_name(np->pci_dev)); in phy_init()
1487 if (np->phy_oui == PHY_OUI_CICADA) { in phy_init()
1488 if (init_cicada(dev, np, phyinterface)) { in phy_init()
1490 pci_name(np->pci_dev)); in phy_init()
1493 } else if (np->phy_oui == PHY_OUI_VITESSE) { in phy_init()
1494 if (init_vitesse(dev, np)) { in phy_init()
1496 pci_name(np->pci_dev)); in phy_init()
1499 } else if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1500 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1501 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1503 if (init_realtek_8211b(dev, np)) { in phy_init()
1505 pci_name(np->pci_dev)); in phy_init()
1508 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1509 if (init_realtek_8201(dev, np) || in phy_init()
1510 init_realtek_8201_cross(dev, np)) { in phy_init()
1512 pci_name(np->pci_dev)); in phy_init()
1519 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); in phy_init()
1522 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1526 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) in phy_init()
1534 struct fe_priv *np = netdev_priv(dev); in nv_start_rx() local
1539 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { in nv_start_rx()
1544 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_start_rx()
1547 if (np->mac_in_use) in nv_start_rx()
1555 struct fe_priv *np = netdev_priv(dev); in nv_stop_rx() local
1559 if (!np->mac_in_use) in nv_stop_rx()
1570 if (!np->mac_in_use) in nv_stop_rx()
1576 struct fe_priv *np = netdev_priv(dev); in nv_start_tx() local
1581 if (np->mac_in_use) in nv_start_tx()
1589 struct fe_priv *np = netdev_priv(dev); in nv_stop_tx() local
1593 if (!np->mac_in_use) in nv_stop_tx()
1604 if (!np->mac_in_use) in nv_stop_tx()
1623 struct fe_priv *np = netdev_priv(dev); in nv_txrx_reset() local
1626 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1629 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1635 struct fe_priv *np = netdev_priv(dev); in nv_mac_reset() local
1639 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1659 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1666 struct fe_priv *np = netdev_priv(dev); in nv_update_stats() local
1673 assert_spin_locked(&np->hwstats_lock); in nv_update_stats()
1676 np->estats.tx_bytes += readl(base + NvRegTxCnt); in nv_update_stats()
1677 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); in nv_update_stats()
1678 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); in nv_update_stats()
1679 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); in nv_update_stats()
1680 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); in nv_update_stats()
1681 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); in nv_update_stats()
1682 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); in nv_update_stats()
1683 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); in nv_update_stats()
1684 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); in nv_update_stats()
1685 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); in nv_update_stats()
1686 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); in nv_update_stats()
1687 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); in nv_update_stats()
1688 np->estats.rx_runt += readl(base + NvRegRxRunt); in nv_update_stats()
1689 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); in nv_update_stats()
1690 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); in nv_update_stats()
1691 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); in nv_update_stats()
1692 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); in nv_update_stats()
1693 np->estats.rx_length_error += readl(base + NvRegRxLenErr); in nv_update_stats()
1694 np->estats.rx_unicast += readl(base + NvRegRxUnicast); in nv_update_stats()
1695 np->estats.rx_multicast += readl(base + NvRegRxMulticast); in nv_update_stats()
1696 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); in nv_update_stats()
1697 np->estats.rx_packets = in nv_update_stats()
1698 np->estats.rx_unicast + in nv_update_stats()
1699 np->estats.rx_multicast + in nv_update_stats()
1700 np->estats.rx_broadcast; in nv_update_stats()
1701 np->estats.rx_errors_total = in nv_update_stats()
1702 np->estats.rx_crc_errors + in nv_update_stats()
1703 np->estats.rx_over_errors + in nv_update_stats()
1704 np->estats.rx_frame_error + in nv_update_stats()
1705 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + in nv_update_stats()
1706 np->estats.rx_late_collision + in nv_update_stats()
1707 np->estats.rx_runt + in nv_update_stats()
1708 np->estats.rx_frame_too_long; in nv_update_stats()
1709 np->estats.tx_errors_total = in nv_update_stats()
1710 np->estats.tx_late_collision + in nv_update_stats()
1711 np->estats.tx_fifo_errors + in nv_update_stats()
1712 np->estats.tx_carrier_errors + in nv_update_stats()
1713 np->estats.tx_excess_deferral + in nv_update_stats()
1714 np->estats.tx_retry_error; in nv_update_stats()
1716 if (np->driver_data & DEV_HAS_STATISTICS_V2) { in nv_update_stats()
1717 np->estats.tx_deferral += readl(base + NvRegTxDef); in nv_update_stats()
1718 np->estats.tx_packets += readl(base + NvRegTxFrame); in nv_update_stats()
1719 np->estats.rx_bytes += readl(base + NvRegRxCnt); in nv_update_stats()
1720 np->estats.tx_pause += readl(base + NvRegTxPause); in nv_update_stats()
1721 np->estats.rx_pause += readl(base + NvRegRxPause); in nv_update_stats()
1722 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); in nv_update_stats()
1723 np->estats.rx_errors_total += np->estats.rx_drop_frame; in nv_update_stats()
1726 if (np->driver_data & DEV_HAS_STATISTICS_V3) { in nv_update_stats()
1727 np->estats.tx_unicast += readl(base + NvRegTxUnicast); in nv_update_stats()
1728 np->estats.tx_multicast += readl(base + NvRegTxMulticast); in nv_update_stats()
1729 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); in nv_update_stats()
1733 static void nv_get_stats(int cpu, struct fe_priv *np, in nv_get_stats() argument
1736 struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); in nv_get_stats()
1742 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); in nv_get_stats()
1747 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); in nv_get_stats()
1755 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); in nv_get_stats()
1759 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); in nv_get_stats()
1777 struct fe_priv *np = netdev_priv(dev); in nv_get_stats64() local
1792 nv_get_stats(cpu, np, storage); in nv_get_stats64()
1795 if (np->driver_data & DEV_HAS_STATISTICS_V123) { in nv_get_stats64()
1796 spin_lock_bh(&np->hwstats_lock); in nv_get_stats64()
1801 storage->rx_errors = np->estats.rx_errors_total; in nv_get_stats64()
1802 storage->tx_errors = np->estats.tx_errors_total; in nv_get_stats64()
1805 storage->multicast = np->estats.rx_multicast; in nv_get_stats64()
1808 storage->rx_length_errors = np->estats.rx_length_error; in nv_get_stats64()
1809 storage->rx_over_errors = np->estats.rx_over_errors; in nv_get_stats64()
1810 storage->rx_crc_errors = np->estats.rx_crc_errors; in nv_get_stats64()
1811 storage->rx_frame_errors = np->estats.rx_frame_align_error; in nv_get_stats64()
1812 storage->rx_fifo_errors = np->estats.rx_drop_frame; in nv_get_stats64()
1815 storage->tx_carrier_errors = np->estats.tx_carrier_errors; in nv_get_stats64()
1816 storage->tx_fifo_errors = np->estats.tx_fifo_errors; in nv_get_stats64()
1818 spin_unlock_bh(&np->hwstats_lock); in nv_get_stats64()
1829 struct fe_priv *np = netdev_priv(dev); in nv_alloc_rx() local
1832 less_rx = np->get_rx.orig; in nv_alloc_rx()
1833 if (less_rx-- == np->rx_ring.orig) in nv_alloc_rx()
1834 less_rx = np->last_rx.orig; in nv_alloc_rx()
1836 while (np->put_rx.orig != less_rx) { in nv_alloc_rx()
1837 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx()
1839 np->put_rx_ctx->skb = skb; in nv_alloc_rx()
1840 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_alloc_rx()
1844 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_alloc_rx()
1845 np->put_rx_ctx->dma))) { in nv_alloc_rx()
1849 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx()
1850 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); in nv_alloc_rx()
1852 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); in nv_alloc_rx()
1853 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) in nv_alloc_rx()
1854 np->put_rx.orig = np->rx_ring.orig; in nv_alloc_rx()
1855 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx()
1856 np->put_rx_ctx = np->rx_skb; in nv_alloc_rx()
1859 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx()
1861 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx()
1870 struct fe_priv *np = netdev_priv(dev); in nv_alloc_rx_optimized() local
1873 less_rx = np->get_rx.ex; in nv_alloc_rx_optimized()
1874 if (less_rx-- == np->rx_ring.ex) in nv_alloc_rx_optimized()
1875 less_rx = np->last_rx.ex; in nv_alloc_rx_optimized()
1877 while (np->put_rx.ex != less_rx) { in nv_alloc_rx_optimized()
1878 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx_optimized()
1880 np->put_rx_ctx->skb = skb; in nv_alloc_rx_optimized()
1881 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_alloc_rx_optimized()
1885 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_alloc_rx_optimized()
1886 np->put_rx_ctx->dma))) { in nv_alloc_rx_optimized()
1890 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx_optimized()
1891 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1892 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1894 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); in nv_alloc_rx_optimized()
1895 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) in nv_alloc_rx_optimized()
1896 np->put_rx.ex = np->rx_ring.ex; in nv_alloc_rx_optimized()
1897 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx_optimized()
1898 np->put_rx_ctx = np->rx_skb; in nv_alloc_rx_optimized()
1901 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1903 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1913 struct fe_priv *np = from_timer(np, t, oom_kick); in nv_do_rx_refill() local
1916 napi_schedule(&np->napi); in nv_do_rx_refill()
1921 struct fe_priv *np = netdev_priv(dev); in nv_init_rx() local
1924 np->get_rx = np->rx_ring; in nv_init_rx()
1925 np->put_rx = np->rx_ring; in nv_init_rx()
1927 if (!nv_optimized(np)) in nv_init_rx()
1928 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; in nv_init_rx()
1930 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; in nv_init_rx()
1931 np->get_rx_ctx = np->rx_skb; in nv_init_rx()
1932 np->put_rx_ctx = np->rx_skb; in nv_init_rx()
1933 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; in nv_init_rx()
1935 for (i = 0; i < np->rx_ring_size; i++) { in nv_init_rx()
1936 if (!nv_optimized(np)) { in nv_init_rx()
1937 np->rx_ring.orig[i].flaglen = 0; in nv_init_rx()
1938 np->rx_ring.orig[i].buf = 0; in nv_init_rx()
1940 np->rx_ring.ex[i].flaglen = 0; in nv_init_rx()
1941 np->rx_ring.ex[i].txvlan = 0; in nv_init_rx()
1942 np->rx_ring.ex[i].bufhigh = 0; in nv_init_rx()
1943 np->rx_ring.ex[i].buflow = 0; in nv_init_rx()
1945 np->rx_skb[i].skb = NULL; in nv_init_rx()
1946 np->rx_skb[i].dma = 0; in nv_init_rx()
1952 struct fe_priv *np = netdev_priv(dev); in nv_init_tx() local
1955 np->get_tx = np->tx_ring; in nv_init_tx()
1956 np->put_tx = np->tx_ring; in nv_init_tx()
1958 if (!nv_optimized(np)) in nv_init_tx()
1959 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; in nv_init_tx()
1961 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; in nv_init_tx()
1962 np->get_tx_ctx = np->tx_skb; in nv_init_tx()
1963 np->put_tx_ctx = np->tx_skb; in nv_init_tx()
1964 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; in nv_init_tx()
1965 netdev_reset_queue(np->dev); in nv_init_tx()
1966 np->tx_pkts_in_progress = 0; in nv_init_tx()
1967 np->tx_change_owner = NULL; in nv_init_tx()
1968 np->tx_end_flip = NULL; in nv_init_tx()
1969 np->tx_stop = 0; in nv_init_tx()
1971 for (i = 0; i < np->tx_ring_size; i++) { in nv_init_tx()
1972 if (!nv_optimized(np)) { in nv_init_tx()
1973 np->tx_ring.orig[i].flaglen = 0; in nv_init_tx()
1974 np->tx_ring.orig[i].buf = 0; in nv_init_tx()
1976 np->tx_ring.ex[i].flaglen = 0; in nv_init_tx()
1977 np->tx_ring.ex[i].txvlan = 0; in nv_init_tx()
1978 np->tx_ring.ex[i].bufhigh = 0; in nv_init_tx()
1979 np->tx_ring.ex[i].buflow = 0; in nv_init_tx()
1981 np->tx_skb[i].skb = NULL; in nv_init_tx()
1982 np->tx_skb[i].dma = 0; in nv_init_tx()
1983 np->tx_skb[i].dma_len = 0; in nv_init_tx()
1984 np->tx_skb[i].dma_single = 0; in nv_init_tx()
1985 np->tx_skb[i].first_tx_desc = NULL; in nv_init_tx()
1986 np->tx_skb[i].next_tx_ctx = NULL; in nv_init_tx()
1992 struct fe_priv *np = netdev_priv(dev); in nv_init_ring() local
1997 if (!nv_optimized(np)) in nv_init_ring()
2003 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) in nv_unmap_txskb() argument
2007 dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, in nv_unmap_txskb()
2011 dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, in nv_unmap_txskb()
2018 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) in nv_release_txskb() argument
2020 nv_unmap_txskb(np, tx_skb); in nv_release_txskb()
2031 struct fe_priv *np = netdev_priv(dev); in nv_drain_tx() local
2034 for (i = 0; i < np->tx_ring_size; i++) { in nv_drain_tx()
2035 if (!nv_optimized(np)) { in nv_drain_tx()
2036 np->tx_ring.orig[i].flaglen = 0; in nv_drain_tx()
2037 np->tx_ring.orig[i].buf = 0; in nv_drain_tx()
2039 np->tx_ring.ex[i].flaglen = 0; in nv_drain_tx()
2040 np->tx_ring.ex[i].txvlan = 0; in nv_drain_tx()
2041 np->tx_ring.ex[i].bufhigh = 0; in nv_drain_tx()
2042 np->tx_ring.ex[i].buflow = 0; in nv_drain_tx()
2044 if (nv_release_txskb(np, &np->tx_skb[i])) { in nv_drain_tx()
2045 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_drain_tx()
2047 u64_stats_update_end(&np->swstats_tx_syncp); in nv_drain_tx()
2049 np->tx_skb[i].dma = 0; in nv_drain_tx()
2050 np->tx_skb[i].dma_len = 0; in nv_drain_tx()
2051 np->tx_skb[i].dma_single = 0; in nv_drain_tx()
2052 np->tx_skb[i].first_tx_desc = NULL; in nv_drain_tx()
2053 np->tx_skb[i].next_tx_ctx = NULL; in nv_drain_tx()
2055 np->tx_pkts_in_progress = 0; in nv_drain_tx()
2056 np->tx_change_owner = NULL; in nv_drain_tx()
2057 np->tx_end_flip = NULL; in nv_drain_tx()
2062 struct fe_priv *np = netdev_priv(dev); in nv_drain_rx() local
2065 for (i = 0; i < np->rx_ring_size; i++) { in nv_drain_rx()
2066 if (!nv_optimized(np)) { in nv_drain_rx()
2067 np->rx_ring.orig[i].flaglen = 0; in nv_drain_rx()
2068 np->rx_ring.orig[i].buf = 0; in nv_drain_rx()
2070 np->rx_ring.ex[i].flaglen = 0; in nv_drain_rx()
2071 np->rx_ring.ex[i].txvlan = 0; in nv_drain_rx()
2072 np->rx_ring.ex[i].bufhigh = 0; in nv_drain_rx()
2073 np->rx_ring.ex[i].buflow = 0; in nv_drain_rx()
2076 if (np->rx_skb[i].skb) { in nv_drain_rx()
2077 dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, in nv_drain_rx()
2078 (skb_end_pointer(np->rx_skb[i].skb) - in nv_drain_rx()
2079 np->rx_skb[i].skb->data), in nv_drain_rx()
2081 dev_kfree_skb(np->rx_skb[i].skb); in nv_drain_rx()
2082 np->rx_skb[i].skb = NULL; in nv_drain_rx()
2093 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) in nv_get_empty_tx_slots() argument
2095 …return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_r… in nv_get_empty_tx_slots()
2212 struct fe_priv *np = netdev_priv(dev); in nv_start_xmit() local
2214 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_start_xmit()
2238 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2239 empty_slots = nv_get_empty_tx_slots(np); in nv_start_xmit()
2242 np->tx_stop = 1; in nv_start_xmit()
2243 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2251 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2253 start_tx = put_tx = np->put_tx.orig; in nv_start_xmit()
2258 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_start_xmit()
2261 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit()
2262 np->put_tx_ctx->dma))) { in nv_start_xmit()
2265 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2267 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2273 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2274 np->put_tx_ctx->dma_single = 1; in nv_start_xmit()
2275 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2278 tx_flags = np->tx_flags; in nv_start_xmit()
2281 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2282 put_tx = np->tx_ring.orig; in nv_start_xmit()
2283 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2284 np->put_tx_ctx = np->tx_skb; in nv_start_xmit()
2295 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit()
2298 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit()
2299 &np->pci_dev->dev, in nv_start_xmit()
2303 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit()
2304 np->put_tx_ctx->dma))) { in nv_start_xmit()
2308 nv_unmap_txskb(np, start_tx_ctx); in nv_start_xmit()
2309 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2310 tmp_tx_ctx = np->tx_skb; in nv_start_xmit()
2311 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit()
2313 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit()
2314 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2316 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2323 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2324 np->put_tx_ctx->dma_single = 0; in nv_start_xmit()
2325 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2330 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2331 put_tx = np->tx_ring.orig; in nv_start_xmit()
2332 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2333 np->put_tx_ctx = np->tx_skb; in nv_start_xmit()
2337 if (unlikely(put_tx == np->tx_ring.orig)) in nv_start_xmit()
2338 prev_tx = np->last_tx.orig; in nv_start_xmit()
2342 if (unlikely(np->put_tx_ctx == np->tx_skb)) in nv_start_xmit()
2343 prev_tx_ctx = np->last_tx_ctx; in nv_start_xmit()
2345 prev_tx_ctx = np->put_tx_ctx - 1; in nv_start_xmit()
2359 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2364 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit()
2368 np->put_tx.orig = put_tx; in nv_start_xmit()
2370 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2376 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; in nv_start_xmit()
2386 struct fe_priv *np = netdev_priv(dev); in nv_start_xmit_optimized() local
2413 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2414 empty_slots = nv_get_empty_tx_slots(np); in nv_start_xmit_optimized()
2417 np->tx_stop = 1; in nv_start_xmit_optimized()
2418 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2427 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2429 start_tx = put_tx = np->put_tx.ex; in nv_start_xmit_optimized()
2430 start_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2435 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_start_xmit_optimized()
2438 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit_optimized()
2439 np->put_tx_ctx->dma))) { in nv_start_xmit_optimized()
2442 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2444 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2450 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2451 np->put_tx_ctx->dma_single = 1; in nv_start_xmit_optimized()
2452 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2453 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2459 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2460 put_tx = np->tx_ring.ex; in nv_start_xmit_optimized()
2461 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2462 np->put_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2474 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2475 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit_optimized()
2476 &np->pci_dev->dev, in nv_start_xmit_optimized()
2481 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit_optimized()
2482 np->put_tx_ctx->dma))) { in nv_start_xmit_optimized()
2486 nv_unmap_txskb(np, start_tx_ctx); in nv_start_xmit_optimized()
2487 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2488 tmp_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2489 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit_optimized()
2491 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit_optimized()
2492 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2494 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2500 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2501 np->put_tx_ctx->dma_single = 0; in nv_start_xmit_optimized()
2502 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2503 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2508 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2509 put_tx = np->tx_ring.ex; in nv_start_xmit_optimized()
2510 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2511 np->put_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2515 if (unlikely(put_tx == np->tx_ring.ex)) in nv_start_xmit_optimized()
2516 prev_tx = np->last_tx.ex; in nv_start_xmit_optimized()
2520 if (unlikely(np->put_tx_ctx == np->tx_skb)) in nv_start_xmit_optimized()
2521 prev_tx_ctx = np->last_tx_ctx; in nv_start_xmit_optimized()
2523 prev_tx_ctx = np->put_tx_ctx - 1; in nv_start_xmit_optimized()
2544 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2546 if (np->tx_limit) { in nv_start_xmit_optimized()
2552 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { in nv_start_xmit_optimized()
2553 if (!np->tx_change_owner) in nv_start_xmit_optimized()
2554 np->tx_change_owner = start_tx_ctx; in nv_start_xmit_optimized()
2559 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2560 np->tx_end_flip = np->put_tx_ctx; in nv_start_xmit_optimized()
2562 np->tx_pkts_in_progress++; in nv_start_xmit_optimized()
2569 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit_optimized()
2573 np->put_tx.ex = put_tx; in nv_start_xmit_optimized()
2575 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2581 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; in nv_start_xmit_optimized()
2590 struct fe_priv *np = netdev_priv(dev); in nv_tx_flip_ownership() local
2592 np->tx_pkts_in_progress--; in nv_tx_flip_ownership()
2593 if (np->tx_change_owner) { in nv_tx_flip_ownership()
2594 np->tx_change_owner->first_tx_desc->flaglen |= in nv_tx_flip_ownership()
2596 np->tx_pkts_in_progress++; in nv_tx_flip_ownership()
2598 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; in nv_tx_flip_ownership()
2599 if (np->tx_change_owner == np->tx_end_flip) in nv_tx_flip_ownership()
2600 np->tx_change_owner = NULL; in nv_tx_flip_ownership()
2602 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_tx_flip_ownership()
2613 struct fe_priv *np = netdev_priv(dev); in nv_tx_done() local
2616 struct ring_desc *orig_get_tx = np->get_tx.orig; in nv_tx_done()
2619 while ((np->get_tx.orig != np->put_tx.orig) && in nv_tx_done()
2620 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && in nv_tx_done()
2623 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done()
2625 if (np->desc_ver == DESC_VER_1) { in nv_tx_done()
2634 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2636 len = np->get_tx_ctx->skb->len; in nv_tx_done()
2638 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2640 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2641 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2642 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2654 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2656 len = np->get_tx_ctx->skb->len; in nv_tx_done()
2658 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2660 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2661 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2662 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2666 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) in nv_tx_done()
2667 np->get_tx.orig = np->tx_ring.orig; in nv_tx_done()
2668 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done()
2669 np->get_tx_ctx = np->tx_skb; in nv_tx_done()
2672 netdev_completed_queue(np->dev, tx_work, bytes_compl); in nv_tx_done()
2674 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { in nv_tx_done()
2675 np->tx_stop = 0; in nv_tx_done()
2683 struct fe_priv *np = netdev_priv(dev); in nv_tx_done_optimized() local
2686 struct ring_desc_ex *orig_get_tx = np->get_tx.ex; in nv_tx_done_optimized()
2689 while ((np->get_tx.ex != np->put_tx.ex) && in nv_tx_done_optimized()
2690 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && in nv_tx_done_optimized()
2693 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done_optimized()
2699 if (np->driver_data & DEV_HAS_GEAR_MODE) in nv_tx_done_optimized()
2707 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2709 len = np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2711 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2714 bytes_cleaned += np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2715 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done_optimized()
2716 np->get_tx_ctx->skb = NULL; in nv_tx_done_optimized()
2719 if (np->tx_limit) in nv_tx_done_optimized()
2723 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) in nv_tx_done_optimized()
2724 np->get_tx.ex = np->tx_ring.ex; in nv_tx_done_optimized()
2725 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done_optimized()
2726 np->get_tx_ctx = np->tx_skb; in nv_tx_done_optimized()
2729 netdev_completed_queue(np->dev, tx_work, bytes_cleaned); in nv_tx_done_optimized()
2731 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { in nv_tx_done_optimized()
2732 np->tx_stop = 0; in nv_tx_done_optimized()
2744 struct fe_priv *np = netdev_priv(dev); in nv_tx_timeout() local
2750 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_tx_timeout()
2760 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); in nv_tx_timeout()
2762 for (i = 0; i <= np->register_size; i += 32) { in nv_tx_timeout()
2773 for (i = 0; i < np->tx_ring_size; i += 4) { in nv_tx_timeout()
2774 if (!nv_optimized(np)) { in nv_tx_timeout()
2779 le32_to_cpu(np->tx_ring.orig[i].buf), in nv_tx_timeout()
2780 le32_to_cpu(np->tx_ring.orig[i].flaglen), in nv_tx_timeout()
2781 le32_to_cpu(np->tx_ring.orig[i+1].buf), in nv_tx_timeout()
2782 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), in nv_tx_timeout()
2783 le32_to_cpu(np->tx_ring.orig[i+2].buf), in nv_tx_timeout()
2784 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), in nv_tx_timeout()
2785 le32_to_cpu(np->tx_ring.orig[i+3].buf), in nv_tx_timeout()
2786 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); in nv_tx_timeout()
2794 le32_to_cpu(np->tx_ring.ex[i].bufhigh), in nv_tx_timeout()
2795 le32_to_cpu(np->tx_ring.ex[i].buflow), in nv_tx_timeout()
2796 le32_to_cpu(np->tx_ring.ex[i].flaglen), in nv_tx_timeout()
2797 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), in nv_tx_timeout()
2798 le32_to_cpu(np->tx_ring.ex[i+1].buflow), in nv_tx_timeout()
2799 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), in nv_tx_timeout()
2800 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), in nv_tx_timeout()
2801 le32_to_cpu(np->tx_ring.ex[i+2].buflow), in nv_tx_timeout()
2802 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), in nv_tx_timeout()
2803 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), in nv_tx_timeout()
2804 le32_to_cpu(np->tx_ring.ex[i+3].buflow), in nv_tx_timeout()
2805 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); in nv_tx_timeout()
2810 spin_lock_irq(&np->lock); in nv_tx_timeout()
2816 saved_tx_limit = np->tx_limit; in nv_tx_timeout()
2817 np->tx_limit = 0; /* prevent giving HW any limited pkts */ in nv_tx_timeout()
2818 np->tx_stop = 0; /* prevent waking tx queue */ in nv_tx_timeout()
2819 if (!nv_optimized(np)) in nv_tx_timeout()
2820 nv_tx_done(dev, np->tx_ring_size); in nv_tx_timeout()
2822 nv_tx_done_optimized(dev, np->tx_ring_size); in nv_tx_timeout()
2825 if (np->tx_change_owner) in nv_tx_timeout()
2826 put_tx.ex = np->tx_change_owner->first_tx_desc; in nv_tx_timeout()
2828 put_tx = np->put_tx; in nv_tx_timeout()
2835 np->get_tx = np->put_tx = put_tx; in nv_tx_timeout()
2836 np->tx_limit = saved_tx_limit; in nv_tx_timeout()
2841 spin_unlock_irq(&np->lock); in nv_tx_timeout()
2887 static void rx_missing_handler(u32 flags, struct fe_priv *np) in rx_missing_handler() argument
2890 u64_stats_update_begin(&np->swstats_rx_syncp); in rx_missing_handler()
2892 u64_stats_update_end(&np->swstats_rx_syncp); in rx_missing_handler()
2898 struct fe_priv *np = netdev_priv(dev); in nv_rx_process() local
2904 while ((np->get_rx.orig != np->put_rx.orig) && in nv_rx_process()
2905 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && in nv_rx_process()
2913 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, in nv_rx_process()
2914 np->get_rx_ctx->dma_len, in nv_rx_process()
2916 skb = np->get_rx_ctx->skb; in nv_rx_process()
2917 np->get_rx_ctx->skb = NULL; in nv_rx_process()
2920 if (np->desc_ver == DESC_VER_1) { in nv_rx_process()
2938 rx_missing_handler(flags, np); in nv_rx_process()
2980 napi_gro_receive(&np->napi, skb); in nv_rx_process()
2981 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process()
2984 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process()
2986 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) in nv_rx_process()
2987 np->get_rx.orig = np->rx_ring.orig; in nv_rx_process()
2988 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process()
2989 np->get_rx_ctx = np->rx_skb; in nv_rx_process()
2999 struct fe_priv *np = netdev_priv(dev); in nv_rx_process_optimized() local
3006 while ((np->get_rx.ex != np->put_rx.ex) && in nv_rx_process_optimized()
3007 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && in nv_rx_process_optimized()
3015 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, in nv_rx_process_optimized()
3016 np->get_rx_ctx->dma_len, in nv_rx_process_optimized()
3018 skb = np->get_rx_ctx->skb; in nv_rx_process_optimized()
3019 np->get_rx_ctx->skb = NULL; in nv_rx_process_optimized()
3053 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); in nv_rx_process_optimized()
3066 napi_gro_receive(&np->napi, skb); in nv_rx_process_optimized()
3067 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process_optimized()
3070 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process_optimized()
3075 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) in nv_rx_process_optimized()
3076 np->get_rx.ex = np->rx_ring.ex; in nv_rx_process_optimized()
3077 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process_optimized()
3078 np->get_rx_ctx = np->rx_skb; in nv_rx_process_optimized()
3088 struct fe_priv *np = netdev_priv(dev); in set_bufsize() local
3091 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; in set_bufsize()
3093 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; in set_bufsize()
3102 struct fe_priv *np = netdev_priv(dev); in nv_change_mtu() local
3125 spin_lock(&np->lock); in nv_change_mtu()
3134 if (!np->in_shutdown) in nv_change_mtu()
3135 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_change_mtu()
3138 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_change_mtu()
3140 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_change_mtu()
3143 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_change_mtu()
3148 spin_unlock(&np->lock); in nv_change_mtu()
3176 struct fe_priv *np = netdev_priv(dev); in nv_set_mac_address() local
3188 spin_lock_irq(&np->lock); in nv_set_mac_address()
3198 spin_unlock_irq(&np->lock); in nv_set_mac_address()
3213 struct fe_priv *np = netdev_priv(dev); in nv_set_multicast() local
3260 spin_lock_irq(&np->lock); in nv_set_multicast()
3268 spin_unlock_irq(&np->lock); in nv_set_multicast()
3273 struct fe_priv *np = netdev_priv(dev); in nv_update_pause() local
3276 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); in nv_update_pause()
3278 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { in nv_update_pause()
3282 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_update_pause()
3287 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { in nv_update_pause()
3291 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) in nv_update_pause()
3293 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { in nv_update_pause()
3300 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_update_pause()
3310 struct fe_priv *np = netdev_priv(dev); in nv_force_linkspeed() local
3315 np->linkspeed = NVREG_LINKSPEED_FORCE|speed; in nv_force_linkspeed()
3316 np->duplex = duplex; in nv_force_linkspeed()
3319 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_force_linkspeed()
3321 np->gigabit = PHY_GIGABIT; in nv_force_linkspeed()
3324 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) in nv_force_linkspeed()
3326 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3328 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_force_linkspeed()
3335 if (np->duplex == 0) in nv_force_linkspeed()
3337 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3339 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3345 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3355 if (np->desc_ver == DESC_VER_1) { in nv_force_linkspeed()
3358 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3366 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_force_linkspeed()
3369 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_force_linkspeed()
3386 struct fe_priv *np = netdev_priv(dev); in nv_update_linkspeed() local
3391 int newls = np->linkspeed; in nv_update_linkspeed()
3392 int newdup = np->duplex; in nv_update_linkspeed()
3403 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_update_linkspeed()
3416 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3417 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3426 if (np->autoneg == 0) { in nv_update_linkspeed()
3427 if (np->fixed_mode & LPA_100FULL) { in nv_update_linkspeed()
3430 } else if (np->fixed_mode & LPA_100HALF) { in nv_update_linkspeed()
3433 } else if (np->fixed_mode & LPA_10FULL) { in nv_update_linkspeed()
3452 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_update_linkspeed()
3453 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); in nv_update_linkspeed()
3456 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3457 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_update_linkspeed()
3458 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); in nv_update_linkspeed()
3488 if (np->duplex == newdup && np->linkspeed == newls) in nv_update_linkspeed()
3491 np->duplex = newdup; in nv_update_linkspeed()
3492 np->linkspeed = newls; in nv_update_linkspeed()
3504 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3507 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || in nv_update_linkspeed()
3508 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) in nv_update_linkspeed()
3510 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3517 if (np->duplex == 0) in nv_update_linkspeed()
3519 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_update_linkspeed()
3521 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3525 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ in nv_update_linkspeed()
3527 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { in nv_update_linkspeed()
3530 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { in nv_update_linkspeed()
3531 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) in nv_update_linkspeed()
3540 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) in nv_update_linkspeed()
3547 if (np->desc_ver == DESC_VER_1) { in nv_update_linkspeed()
3550 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3557 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_update_linkspeed()
3560 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_update_linkspeed()
3565 if (netif_running(dev) && (np->duplex != 0)) { in nv_update_linkspeed()
3566 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { in nv_update_linkspeed()
3574 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3585 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3593 pause_flags = np->pause_flags; in nv_update_linkspeed()
3637 static void nv_msi_workaround(struct fe_priv *np) in nv_msi_workaround() argument
3643 if (np->msi_flags & NV_MSI_ENABLED) { in nv_msi_workaround()
3644 u8 __iomem *base = np->base; in nv_msi_workaround()
3653 struct fe_priv *np = netdev_priv(dev); in nv_change_interrupt_mode() local
3658 np->quiet_count = 0; in nv_change_interrupt_mode()
3659 if (np->irqmask != NVREG_IRQMASK_CPU) { in nv_change_interrupt_mode()
3660 np->irqmask = NVREG_IRQMASK_CPU; in nv_change_interrupt_mode()
3664 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { in nv_change_interrupt_mode()
3665 np->quiet_count++; in nv_change_interrupt_mode()
3669 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { in nv_change_interrupt_mode()
3670 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_change_interrupt_mode()
3682 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq() local
3685 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq()
3686 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq()
3687 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq()
3689 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq()
3690 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq()
3692 if (!(np->events & np->irqmask)) in nv_nic_irq()
3695 nv_msi_workaround(np); in nv_nic_irq()
3697 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq()
3702 __napi_schedule(&np->napi); in nv_nic_irq()
3715 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_optimized() local
3718 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_optimized()
3719 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq_optimized()
3720 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq_optimized()
3722 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3723 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3725 if (!(np->events & np->irqmask)) in nv_nic_irq_optimized()
3728 nv_msi_workaround(np); in nv_nic_irq_optimized()
3730 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq_optimized()
3735 __napi_schedule(&np->napi); in nv_nic_irq_optimized()
3744 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_tx() local
3754 if (!(events & np->irqmask)) in nv_nic_irq_tx()
3757 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3759 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3762 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3767 if (!np->in_shutdown) { in nv_nic_irq_tx()
3768 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; in nv_nic_irq_tx()
3769 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_tx()
3771 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3784 struct fe_priv *np = container_of(napi, struct fe_priv, napi); in nv_napi_poll() local
3785 struct net_device *dev = np->dev; in nv_napi_poll()
3792 if (!nv_optimized(np)) { in nv_napi_poll()
3793 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3794 tx_work += nv_tx_done(dev, np->tx_ring_size); in nv_napi_poll()
3795 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3800 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3801 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); in nv_napi_poll()
3802 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3812 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3813 if (!np->in_shutdown) in nv_napi_poll()
3814 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_napi_poll()
3815 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3820 if (unlikely(np->events & NVREG_IRQ_LINK)) { in nv_napi_poll()
3821 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3823 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3825 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { in nv_napi_poll()
3826 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3828 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3829 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_napi_poll()
3831 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { in nv_napi_poll()
3832 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3833 if (!np->in_shutdown) { in nv_napi_poll()
3834 np->nic_poll_irq = np->irqmask; in nv_napi_poll()
3835 np->recover_error = 1; in nv_napi_poll()
3836 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_napi_poll()
3838 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3848 writel(np->irqmask, base + NvRegIrqMask); in nv_napi_poll()
3856 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_rx() local
3866 if (!(events & np->irqmask)) in nv_nic_irq_rx()
3871 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3872 if (!np->in_shutdown) in nv_nic_irq_rx()
3873 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_nic_irq_rx()
3874 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3879 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3884 if (!np->in_shutdown) { in nv_nic_irq_rx()
3885 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; in nv_nic_irq_rx()
3886 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_rx()
3888 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3901 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_other() local
3911 if (!(events & np->irqmask)) in nv_nic_irq_other()
3915 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3917 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3920 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3922 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3924 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { in nv_nic_irq_other()
3925 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3927 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3928 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_nic_irq_other()
3931 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3936 if (!np->in_shutdown) { in nv_nic_irq_other()
3937 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3938 np->recover_error = 1; in nv_nic_irq_other()
3939 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3941 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3945 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3950 if (!np->in_shutdown) { in nv_nic_irq_other()
3951 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3952 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3954 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3968 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_test() local
3972 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_test()
3983 nv_msi_workaround(np); in nv_nic_irq_test()
3985 spin_lock(&np->lock); in nv_nic_irq_test()
3986 np->intr_test = 1; in nv_nic_irq_test()
3987 spin_unlock(&np->lock); in nv_nic_irq_test()
4018 struct fe_priv *np = get_nvpriv(dev); in nv_request_irq() local
4027 if (nv_optimized(np)) in nv_request_irq()
4033 if (np->msi_flags & NV_MSI_X_CAPABLE) { in nv_request_irq()
4034 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_request_irq()
4035 np->msi_x_entry[i].entry = i; in nv_request_irq()
4036 ret = pci_enable_msix_range(np->pci_dev, in nv_request_irq()
4037 np->msi_x_entry, in nv_request_irq()
4038 np->msi_flags & NV_MSI_X_VECTORS_MASK, in nv_request_irq()
4039 np->msi_flags & NV_MSI_X_VECTORS_MASK); in nv_request_irq()
4041 np->msi_flags |= NV_MSI_X_ENABLED; in nv_request_irq()
4044 sprintf(np->name_rx, "%s-rx", dev->name); in nv_request_irq()
4045 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, in nv_request_irq()
4046 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); in nv_request_irq()
4051 pci_disable_msix(np->pci_dev); in nv_request_irq()
4052 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4056 sprintf(np->name_tx, "%s-tx", dev->name); in nv_request_irq()
4057 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, in nv_request_irq()
4058 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); in nv_request_irq()
4063 pci_disable_msix(np->pci_dev); in nv_request_irq()
4064 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4068 sprintf(np->name_other, "%s-other", dev->name); in nv_request_irq()
4069 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, in nv_request_irq()
4070 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); in nv_request_irq()
4075 pci_disable_msix(np->pci_dev); in nv_request_irq()
4076 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4087 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, in nv_request_irq()
4093 pci_disable_msix(np->pci_dev); in nv_request_irq()
4094 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4106 if (np->msi_flags & NV_MSI_CAPABLE) { in nv_request_irq()
4107 ret = pci_enable_msi(np->pci_dev); in nv_request_irq()
4109 np->msi_flags |= NV_MSI_ENABLED; in nv_request_irq()
4110 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); in nv_request_irq()
4114 pci_disable_msi(np->pci_dev); in nv_request_irq()
4115 np->msi_flags &= ~NV_MSI_ENABLED; in nv_request_irq()
4129 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) in nv_request_irq()
4134 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); in nv_request_irq()
4136 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); in nv_request_irq()
4143 struct fe_priv *np = get_nvpriv(dev); in nv_free_irq() local
4146 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_free_irq()
4147 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_free_irq()
4148 free_irq(np->msi_x_entry[i].vector, dev); in nv_free_irq()
4149 pci_disable_msix(np->pci_dev); in nv_free_irq()
4150 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_free_irq()
4152 free_irq(np->pci_dev->irq, dev); in nv_free_irq()
4153 if (np->msi_flags & NV_MSI_ENABLED) { in nv_free_irq()
4154 pci_disable_msi(np->pci_dev); in nv_free_irq()
4155 np->msi_flags &= ~NV_MSI_ENABLED; in nv_free_irq()
4162 struct fe_priv *np = from_timer(np, t, nic_poll); in nv_do_nic_poll() local
4163 struct net_device *dev = np->dev; in nv_do_nic_poll()
4176 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_do_nic_poll()
4177 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; in nv_do_nic_poll()
4179 irq = np->pci_dev->irq; in nv_do_nic_poll()
4180 mask = np->irqmask; in nv_do_nic_poll()
4182 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4183 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; in nv_do_nic_poll()
4186 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4187 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; in nv_do_nic_poll()
4190 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4191 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; in nv_do_nic_poll()
4199 if (np->recover_error) { in nv_do_nic_poll()
4200 np->recover_error = 0; in nv_do_nic_poll()
4205 spin_lock(&np->lock); in nv_do_nic_poll()
4208 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_do_nic_poll()
4216 if (!np->in_shutdown) in nv_do_nic_poll()
4217 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_do_nic_poll()
4220 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_do_nic_poll()
4222 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_do_nic_poll()
4225 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_do_nic_poll()
4228 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_do_nic_poll()
4235 spin_unlock(&np->lock); in nv_do_nic_poll()
4245 np->nic_poll_irq = 0; in nv_do_nic_poll()
4246 if (nv_optimized(np)) in nv_do_nic_poll()
4251 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4252 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; in nv_do_nic_poll()
4255 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4256 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; in nv_do_nic_poll()
4259 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4260 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; in nv_do_nic_poll()
4271 struct fe_priv *np = netdev_priv(dev); in nv_poll_controller() local
4273 nv_do_nic_poll(&np->nic_poll); in nv_poll_controller()
4281 struct fe_priv *np = from_timer(np, t, stats_poll); in nv_do_stats_poll() local
4282 struct net_device *dev = np->dev; in nv_do_stats_poll()
4286 if (spin_trylock(&np->hwstats_lock)) { in nv_do_stats_poll()
4288 spin_unlock(&np->hwstats_lock); in nv_do_stats_poll()
4291 if (!np->in_shutdown) in nv_do_stats_poll()
4292 mod_timer(&np->stats_poll, in nv_do_stats_poll()
4298 struct fe_priv *np = netdev_priv(dev); in nv_get_drvinfo() local
4301 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in nv_get_drvinfo()
4306 struct fe_priv *np = netdev_priv(dev); in nv_get_wol() local
4309 spin_lock_irq(&np->lock); in nv_get_wol()
4310 if (np->wolenabled) in nv_get_wol()
4312 spin_unlock_irq(&np->lock); in nv_get_wol()
4317 struct fe_priv *np = netdev_priv(dev); in nv_set_wol() local
4322 np->wolenabled = 0; in nv_set_wol()
4324 np->wolenabled = 1; in nv_set_wol()
4328 spin_lock_irq(&np->lock); in nv_set_wol()
4330 spin_unlock_irq(&np->lock); in nv_set_wol()
4332 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); in nv_set_wol()
4339 struct fe_priv *np = netdev_priv(dev); in nv_get_link_ksettings() local
4343 spin_lock_irq(&np->lock); in nv_get_link_ksettings()
4356 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { in nv_get_link_ksettings()
4371 if (np->duplex) in nv_get_link_ksettings()
4378 cmd->base.autoneg = np->autoneg; in nv_get_link_ksettings()
4381 if (np->autoneg) { in nv_get_link_ksettings()
4383 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_get_link_ksettings()
4392 if (np->gigabit == PHY_GIGABIT) { in nv_get_link_ksettings()
4393 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_get_link_ksettings()
4402 if (np->gigabit == PHY_GIGABIT) in nv_get_link_ksettings()
4405 cmd->base.phy_address = np->phyaddr; in nv_get_link_ksettings()
4413 spin_unlock_irq(&np->lock); in nv_get_link_ksettings()
4420 struct fe_priv *np = netdev_priv(dev); in nv_set_link_ksettings() local
4429 if (cmd->base.phy_address != np->phyaddr) { in nv_set_link_ksettings()
4439 if (np->gigabit == PHY_GIGABIT) in nv_set_link_ksettings()
4466 spin_lock_irqsave(&np->lock, flags); in nv_set_link_ksettings()
4477 spin_unlock_irqrestore(&np->lock, flags); in nv_set_link_ksettings()
4485 np->autoneg = 1; in nv_set_link_ksettings()
4488 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_link_ksettings()
4498 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx p… in nv_set_link_ksettings()
4500 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_link_ksettings()
4502 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_link_ksettings()
4504 if (np->gigabit == PHY_GIGABIT) { in nv_set_link_ksettings()
4505 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_link_ksettings()
4509 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_link_ksettings()
4514 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_link_ksettings()
4515 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_set_link_ksettings()
4525 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_link_ksettings()
4530 np->autoneg = 0; in nv_set_link_ksettings()
4532 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_link_ksettings()
4542 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_link_ksettings()
4543 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx p… in nv_set_link_ksettings()
4545 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_link_ksettings()
4547 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { in nv_set_link_ksettings()
4549 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_link_ksettings()
4551 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_link_ksettings()
4552 np->fixed_mode = adv; in nv_set_link_ksettings()
4554 if (np->gigabit == PHY_GIGABIT) { in nv_set_link_ksettings()
4555 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_link_ksettings()
4557 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_link_ksettings()
4560 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_link_ksettings()
4562 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) in nv_set_link_ksettings()
4564 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) in nv_set_link_ksettings()
4566 if (np->phy_oui == PHY_OUI_MARVELL) { in nv_set_link_ksettings()
4573 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_link_ksettings()
4594 struct fe_priv *np = netdev_priv(dev); in nv_get_regs_len() local
4595 return np->register_size; in nv_get_regs_len()
4600 struct fe_priv *np = netdev_priv(dev); in nv_get_regs() local
4606 spin_lock_irq(&np->lock); in nv_get_regs()
4607 for (i = 0; i < np->register_size/sizeof(u32); i++) in nv_get_regs()
4609 spin_unlock_irq(&np->lock); in nv_get_regs()
4614 struct fe_priv *np = netdev_priv(dev); in nv_nway_reset() local
4617 if (np->autoneg) { in nv_nway_reset()
4625 spin_lock(&np->lock); in nv_nway_reset()
4628 spin_unlock(&np->lock); in nv_nway_reset()
4634 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_nway_reset()
4635 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_nway_reset()
4644 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_nway_reset()
4661 struct fe_priv *np = netdev_priv(dev); in nv_get_ringparam() local
4663 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4664 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4666 ring->rx_pending = np->rx_ring_size; in nv_get_ringparam()
4667 ring->tx_pending = np->tx_ring_size; in nv_get_ringparam()
4672 struct fe_priv *np = netdev_priv(dev); in nv_set_ringparam() local
4681 (np->desc_ver == DESC_VER_1 && in nv_set_ringparam()
4684 (np->desc_ver != DESC_VER_1 && in nv_set_ringparam()
4691 if (!nv_optimized(np)) { in nv_set_ringparam()
4692 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4698 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4710 if (!nv_optimized(np)) { in nv_set_ringparam()
4712 dma_free_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4719 dma_free_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4736 spin_lock(&np->lock); in nv_set_ringparam()
4747 np->rx_ring_size = ring->rx_pending; in nv_set_ringparam()
4748 np->tx_ring_size = ring->tx_pending; in nv_set_ringparam()
4750 if (!nv_optimized(np)) { in nv_set_ringparam()
4751 np->rx_ring.orig = (struct ring_desc *)rxtx_ring; in nv_set_ringparam()
4752 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_set_ringparam()
4754 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; in nv_set_ringparam()
4755 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_set_ringparam()
4757 np->rx_skb = (struct nv_skb_map *)rx_skbuff; in nv_set_ringparam()
4758 np->tx_skb = (struct nv_skb_map *)tx_skbuff; in nv_set_ringparam()
4759 np->ring_addr = ring_addr; in nv_set_ringparam()
4761 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); in nv_set_ringparam()
4762 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); in nv_set_ringparam()
4768 if (!np->in_shutdown) in nv_set_ringparam()
4769 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_set_ringparam()
4773 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_set_ringparam()
4775 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_set_ringparam()
4778 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_set_ringparam()
4783 spin_unlock(&np->lock); in nv_set_ringparam()
4796 struct fe_priv *np = netdev_priv(dev); in nv_get_pauseparam() local
4798 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; in nv_get_pauseparam()
4799 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; in nv_get_pauseparam()
4800 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; in nv_get_pauseparam()
4805 struct fe_priv *np = netdev_priv(dev); in nv_set_pauseparam() local
4808 if ((!np->autoneg && np->duplex == 0) || in nv_set_pauseparam()
4809 (np->autoneg && !pause->autoneg && np->duplex == 0)) { in nv_set_pauseparam()
4813 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { in nv_set_pauseparam()
4823 spin_lock(&np->lock); in nv_set_pauseparam()
4826 spin_unlock(&np->lock); in nv_set_pauseparam()
4831 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); in nv_set_pauseparam()
4833 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; in nv_set_pauseparam()
4835 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; in nv_set_pauseparam()
4837 if (np->autoneg && pause->autoneg) { in nv_set_pauseparam()
4838 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; in nv_set_pauseparam()
4840 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_pauseparam()
4842 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pa… in nv_set_pauseparam()
4844 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_pauseparam()
4846 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_pauseparam()
4850 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_pauseparam()
4852 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_pauseparam()
4854 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_pauseparam()
4856 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_pauseparam()
4858 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_pauseparam()
4863 nv_update_pause(dev, np->pause_flags); in nv_set_pauseparam()
4875 struct fe_priv *np = netdev_priv(dev); in nv_set_loopback() local
4880 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4881 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_loopback()
4884 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4891 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); in nv_set_loopback()
4894 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4904 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4910 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4916 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4921 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4923 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4940 struct fe_priv *np = get_nvpriv(dev); in nv_vlan_mode() local
4942 spin_lock_irq(&np->lock); in nv_vlan_mode()
4945 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4947 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4950 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4952 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4954 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_vlan_mode()
4956 spin_unlock_irq(&np->lock); in nv_vlan_mode()
4961 struct fe_priv *np = netdev_priv(dev); in nv_set_features() local
4973 spin_lock_irq(&np->lock); in nv_set_features()
4976 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4978 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4981 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_set_features()
4983 spin_unlock_irq(&np->lock); in nv_set_features()
4994 struct fe_priv *np = netdev_priv(dev); in nv_get_sset_count() local
4998 if (np->driver_data & DEV_HAS_TEST_EXTENDED) in nv_get_sset_count()
5003 if (np->driver_data & DEV_HAS_STATISTICS_V3) in nv_get_sset_count()
5005 else if (np->driver_data & DEV_HAS_STATISTICS_V2) in nv_get_sset_count()
5007 else if (np->driver_data & DEV_HAS_STATISTICS_V1) in nv_get_sset_count()
5021 struct fe_priv *np = netdev_priv(dev); in nv_get_ethtool_stats() local
5023 spin_lock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
5025 memcpy(buffer, &np->estats, in nv_get_ethtool_stats()
5027 spin_unlock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
5032 struct fe_priv *np = netdev_priv(dev); in nv_link_test() local
5035 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
5036 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
5075 struct fe_priv *np = netdev_priv(dev); in nv_interrupt_test() local
5088 np->intr_test = 0; in nv_interrupt_test()
5091 save_msi_flags = np->msi_flags; in nv_interrupt_test()
5092 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; in nv_interrupt_test()
5093 np->msi_flags |= 0x001; /* setup 1 vector */ in nv_interrupt_test()
5106 spin_lock_irq(&np->lock); in nv_interrupt_test()
5109 testcnt = np->intr_test; in nv_interrupt_test()
5114 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_interrupt_test()
5119 spin_unlock_irq(&np->lock); in nv_interrupt_test()
5123 np->msi_flags = save_msi_flags; in nv_interrupt_test()
5138 struct fe_priv *np = netdev_priv(dev); in nv_loopback_test() local
5142 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_loopback_test()
5167 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_loopback_test()
5169 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_loopback_test()
5183 test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, in nv_loopback_test()
5186 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_loopback_test()
5195 if (!nv_optimized(np)) { in nv_loopback_test()
5196 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); in nv_loopback_test()
5197 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5199 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); in nv_loopback_test()
5200 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); in nv_loopback_test()
5201 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5203 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_loopback_test()
5209 if (!nv_optimized(np)) { in nv_loopback_test()
5210 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); in nv_loopback_test()
5211 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); in nv_loopback_test()
5214 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); in nv_loopback_test()
5215 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); in nv_loopback_test()
5220 } else if (np->desc_ver == DESC_VER_1) { in nv_loopback_test()
5232 rx_skb = np->rx_skb[0].skb; in nv_loopback_test()
5242 dma_unmap_single(&np->pci_dev->dev, test_dma_addr, in nv_loopback_test()
5264 struct fe_priv *np = netdev_priv(dev); in nv_self_test() local
5282 spin_lock_irq(&np->lock); in nv_self_test()
5283 nv_disable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5284 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_self_test()
5293 spin_unlock_irq(&np->lock); in nv_self_test()
5322 if (!np->in_shutdown) in nv_self_test()
5323 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_self_test()
5326 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_self_test()
5328 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_self_test()
5331 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_self_test()
5337 nv_enable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5378 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_acquire_sema() local
5402 np->mgmt_sema = 1; in nv_mgmt_acquire_sema()
5413 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_release_sema() local
5417 if (np->driver_data & DEV_HAS_MGMT_UNIT) { in nv_mgmt_release_sema()
5418 if (np->mgmt_sema) { in nv_mgmt_release_sema()
5429 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_get_version() local
5451 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; in nv_mgmt_get_version()
5458 struct fe_priv *np = netdev_priv(dev); in nv_open() local
5465 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_open()
5466 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); in nv_open()
5470 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_open()
5483 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) in nv_open()
5495 np->in_shutdown = 0; in nv_open()
5499 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_open()
5502 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_open()
5503 if (np->desc_ver == DESC_VER_1) in nv_open()
5507 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5508 writel(np->vlanctl_bits, base + NvRegVlanControl); in nv_open()
5510 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5524 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_open()
5530 if (np->desc_ver == DESC_VER_1) { in nv_open()
5533 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { in nv_open()
5551 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, in nv_open()
5555 if (np->wolenabled) in nv_open()
5566 nv_disable_hw_interrupts(dev, np->irqmask); in nv_open()
5576 nv_enable_hw_interrupts(dev, np->irqmask); in nv_open()
5578 spin_lock_irq(&np->lock); in nv_open()
5592 np->linkspeed = 0; in nv_open()
5605 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_open()
5608 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) in nv_open()
5609 mod_timer(&np->stats_poll, in nv_open()
5612 spin_unlock_irq(&np->lock); in nv_open()
5628 struct fe_priv *np = netdev_priv(dev); in nv_close() local
5631 spin_lock_irq(&np->lock); in nv_close()
5632 np->in_shutdown = 1; in nv_close()
5633 spin_unlock_irq(&np->lock); in nv_close()
5635 synchronize_irq(np->pci_dev->irq); in nv_close()
5637 del_timer_sync(&np->oom_kick); in nv_close()
5638 del_timer_sync(&np->nic_poll); in nv_close()
5639 del_timer_sync(&np->stats_poll); in nv_close()
5642 spin_lock_irq(&np->lock); in nv_close()
5649 nv_disable_hw_interrupts(dev, np->irqmask); in nv_close()
5652 spin_unlock_irq(&np->lock); in nv_close()
5658 if (np->wolenabled || !phy_power_down) { in nv_close()
5664 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_close()
5665 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); in nv_close()
5711 struct fe_priv *np; in nv_probe() local
5729 np = netdev_priv(dev); in nv_probe()
5730 np->dev = dev; in nv_probe()
5731 np->pci_dev = pci_dev; in nv_probe()
5732 spin_lock_init(&np->lock); in nv_probe()
5733 spin_lock_init(&np->hwstats_lock); in nv_probe()
5735 u64_stats_init(&np->swstats_rx_syncp); in nv_probe()
5736 u64_stats_init(&np->swstats_tx_syncp); in nv_probe()
5737 np->txrx_stats = alloc_percpu(struct nv_txrx_stats); in nv_probe()
5738 if (!np->txrx_stats) { in nv_probe()
5744 timer_setup(&np->oom_kick, nv_do_rx_refill, 0); in nv_probe()
5745 timer_setup(&np->nic_poll, nv_do_nic_poll, 0); in nv_probe()
5746 timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE); in nv_probe()
5759 np->register_size = NV_PCI_REGSZ_VER3; in nv_probe()
5761 np->register_size = NV_PCI_REGSZ_VER2; in nv_probe()
5763 np->register_size = NV_PCI_REGSZ_VER1; in nv_probe()
5769 pci_resource_len(pci_dev, i) >= np->register_size) { in nv_probe()
5780 np->driver_data = id->driver_data; in nv_probe()
5782 np->device_id = id->device; in nv_probe()
5787 np->desc_ver = DESC_VER_3; in nv_probe()
5788 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; in nv_probe()
5802 np->desc_ver = DESC_VER_2; in nv_probe()
5803 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; in nv_probe()
5806 np->desc_ver = DESC_VER_1; in nv_probe()
5807 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; in nv_probe()
5810 np->pkt_limit = NV_PKTLIMIT_1; in nv_probe()
5812 np->pkt_limit = NV_PKTLIMIT_2; in nv_probe()
5815 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_probe()
5820 np->vlanctl_bits = 0; in nv_probe()
5822 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; in nv_probe()
5834 dev->max_mtu = np->pkt_limit; in nv_probe()
5836 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; in nv_probe()
5840 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; in nv_probe()
5844 np->base = ioremap(addr, np->register_size); in nv_probe()
5845 if (!np->base) in nv_probe()
5848 np->rx_ring_size = RX_RING_DEFAULT; in nv_probe()
5849 np->tx_ring_size = TX_RING_DEFAULT; in nv_probe()
5851 if (!nv_optimized(np)) { in nv_probe()
5852 np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev, in nv_probe()
5854 (np->rx_ring_size + in nv_probe()
5855 np->tx_ring_size), in nv_probe()
5856 &np->ring_addr, in nv_probe()
5858 if (!np->rx_ring.orig) in nv_probe()
5860 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_probe()
5862 np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev, in nv_probe()
5864 (np->rx_ring_size + in nv_probe()
5865 np->tx_ring_size), in nv_probe()
5866 &np->ring_addr, GFP_KERNEL); in nv_probe()
5867 if (!np->rx_ring.ex) in nv_probe()
5869 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_probe()
5871 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5872 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5873 if (!np->rx_skb || !np->tx_skb) in nv_probe()
5876 if (!nv_optimized(np)) in nv_probe()
5881 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); in nv_probe()
5889 np->orig_mac[0] = readl(base + NvRegMacAddrA); in nv_probe()
5890 np->orig_mac[1] = readl(base + NvRegMacAddrB); in nv_probe()
5896 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5897 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5898 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5899 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5900 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5901 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5904 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5905 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5906 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5907 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5908 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5909 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5915 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + in nv_probe()
5917 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); in nv_probe()
5920 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5921 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5922 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5923 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5924 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5925 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5950 np->wolenabled = 0; in nv_probe()
5964 if (np->desc_ver == DESC_VER_1) in nv_probe()
5965 np->tx_flags = NV_TX_VALID; in nv_probe()
5967 np->tx_flags = NV_TX2_VALID; in nv_probe()
5969 np->msi_flags = 0; in nv_probe()
5971 np->msi_flags |= NV_MSI_CAPABLE; in nv_probe()
5978 np->msi_flags |= NV_MSI_X_CAPABLE; in nv_probe()
5983 np->irqmask = NVREG_IRQMASK_CPU; in nv_probe()
5984 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5985 np->msi_flags |= 0x0001; in nv_probe()
5989 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5991 np->msi_flags &= ~NV_MSI_X_CAPABLE; in nv_probe()
5994 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5995 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5996 np->msi_flags |= 0x0003; in nv_probe()
6000 np->irqmask |= NVREG_IRQ_TIMER; in nv_probe()
6002 np->need_linktimer = 1; in nv_probe()
6003 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_probe()
6005 np->need_linktimer = 0; in nv_probe()
6010 np->tx_limit = 1; in nv_probe()
6013 np->tx_limit = 0; in nv_probe()
6032 np->mac_in_use = 1; in nv_probe()
6033 if (np->mgmt_version > 0) in nv_probe()
6034 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; in nv_probe()
6036 if (np->mac_in_use && in nv_probe()
6052 spin_lock_irq(&np->lock); in nv_probe()
6054 spin_unlock_irq(&np->lock); in nv_probe()
6057 spin_lock_irq(&np->lock); in nv_probe()
6059 spin_unlock_irq(&np->lock); in nv_probe()
6063 np->phy_model = id2 & PHYID2_MODEL_MASK; in nv_probe()
6066 np->phyaddr = phyaddr; in nv_probe()
6067 np->phy_oui = id1 | id2; in nv_probe()
6070 if (np->phy_oui == PHY_OUI_REALTEK2) in nv_probe()
6071 np->phy_oui = PHY_OUI_REALTEK; in nv_probe()
6073 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) in nv_probe()
6074 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; in nv_probe()
6088 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_probe()
6090 np->gigabit = PHY_GIGABIT; in nv_probe()
6094 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; in nv_probe()
6095 np->duplex = 0; in nv_probe()
6096 np->autoneg = 1; in nv_probe()
6118 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); in nv_probe()
6132 np->gigabit == PHY_GIGABIT ? "gbit " : "", in nv_probe()
6133 np->need_linktimer ? "lnktim " : "", in nv_probe()
6134 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", in nv_probe()
6135 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", in nv_probe()
6136 np->desc_ver); in nv_probe()
6152 free_percpu(np->txrx_stats); in nv_probe()
6161 struct fe_priv *np = netdev_priv(dev); in nv_restore_phy() local
6164 if (np->phy_oui == PHY_OUI_REALTEK && in nv_restore_phy()
6165 np->phy_model == PHY_MODEL_REALTEK_8201 && in nv_restore_phy()
6167 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); in nv_restore_phy()
6168 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); in nv_restore_phy()
6171 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); in nv_restore_phy()
6172 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); in nv_restore_phy()
6175 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_restore_phy()
6177 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); in nv_restore_phy()
6184 struct fe_priv *np = netdev_priv(dev); in nv_restore_mac_addr() local
6190 writel(np->orig_mac[0], base + NvRegMacAddrA); in nv_restore_mac_addr()
6191 writel(np->orig_mac[1], base + NvRegMacAddrB); in nv_restore_mac_addr()
6199 struct fe_priv *np = netdev_priv(dev); in nv_remove() local
6201 free_percpu(np->txrx_stats); in nv_remove()
6224 struct fe_priv *np = netdev_priv(dev); in nv_suspend() local
6235 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_suspend()
6236 np->saved_config_space[i] = readl(base + i*sizeof(u32)); in nv_suspend()
6245 struct fe_priv *np = netdev_priv(dev); in nv_resume() local
6250 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_resume()
6251 writel(np->saved_config_space[i], base+i*sizeof(u32)); in nv_resume()
6253 if (np->driver_data & DEV_NEED_MSI_FIX) in nv_resume()
6278 struct fe_priv *np = netdev_priv(dev); in nv_shutdown() local
6297 pci_wake_from_d3(pdev, np->wolenabled); in nv_shutdown()