• Home
  • Raw
  • Download

Lines Matching refs:np

592 	struct netdev_private *np = netdev_priv(dev);  in netdev_vlan_rx_add_vid()  local
594 spin_lock(&np->lock); in netdev_vlan_rx_add_vid()
597 set_bit(vid, np->active_vlans); in netdev_vlan_rx_add_vid()
599 spin_unlock(&np->lock); in netdev_vlan_rx_add_vid()
607 struct netdev_private *np = netdev_priv(dev); in netdev_vlan_rx_kill_vid() local
609 spin_lock(&np->lock); in netdev_vlan_rx_kill_vid()
612 clear_bit(vid, np->active_vlans); in netdev_vlan_rx_kill_vid()
614 spin_unlock(&np->lock); in netdev_vlan_rx_kill_vid()
641 struct netdev_private *np; in starfire_init_one() local
659 dev = alloc_etherdev(sizeof(*np)); in starfire_init_one()
727 np = netdev_priv(dev); in starfire_init_one()
728 np->dev = dev; in starfire_init_one()
729 np->base = base; in starfire_init_one()
730 spin_lock_init(&np->lock); in starfire_init_one()
733 np->pci_dev = pdev; in starfire_init_one()
735 np->mii_if.dev = dev; in starfire_init_one()
736 np->mii_if.mdio_read = mdio_read; in starfire_init_one()
737 np->mii_if.mdio_write = mdio_write; in starfire_init_one()
738 np->mii_if.phy_id_mask = 0x1f; in starfire_init_one()
739 np->mii_if.reg_num_mask = 0x1f; in starfire_init_one()
743 np->speed100 = 1; in starfire_init_one()
746 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) | in starfire_init_one()
750 np->intr_timer_ctrl |= SmallFrameBypass; in starfire_init_one()
753 np->intr_timer_ctrl |= SmallFrame64; in starfire_init_one()
756 np->intr_timer_ctrl |= SmallFrame128; in starfire_init_one()
759 np->intr_timer_ctrl |= SmallFrame256; in starfire_init_one()
762 np->intr_timer_ctrl |= SmallFrame512; in starfire_init_one()
773 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); in starfire_init_one()
801 np->phys[phy_idx++] = phy; in starfire_init_one()
802 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); in starfire_init_one()
805 dev->name, phy, mii_status, np->mii_if.advertising); in starfire_init_one()
810 np->phy_cnt = phy_idx; in starfire_init_one()
811 if (np->phy_cnt > 0) in starfire_init_one()
812 np->mii_if.phy_id = np->phys[0]; in starfire_init_one()
814 memset(&np->mii_if, 0, sizeof(np->mii_if)); in starfire_init_one()
834 struct netdev_private *np = netdev_priv(dev); in mdio_read() local
835 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); in mdio_read()
851 struct netdev_private *np = netdev_priv(dev); in mdio_write() local
852 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); in mdio_write()
862 struct netdev_private *np = netdev_priv(dev); in netdev_open() local
863 void __iomem *ioaddr = np->base; in netdev_open()
864 const int irq = np->pci_dev->irq; in netdev_open()
883 if (!np->queue_mem) { in netdev_open()
888 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; in netdev_open()
889 np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev, in netdev_open()
890 np->queue_mem_size, in netdev_open()
891 &np->queue_mem_dma, GFP_ATOMIC); in netdev_open()
892 if (np->queue_mem == NULL) { in netdev_open()
897 np->tx_done_q = np->queue_mem; in netdev_open()
898 np->tx_done_q_dma = np->queue_mem_dma; in netdev_open()
899 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size; in netdev_open()
900 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size; in netdev_open()
901 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size; in netdev_open()
902 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size; in netdev_open()
903 np->rx_ring = (void *) np->tx_ring + tx_ring_size; in netdev_open()
904 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size; in netdev_open()
911 writel((np->rx_buf_sz << RxBufferLenShift) | in netdev_open()
934 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr); in netdev_open()
935 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr); in netdev_open()
936 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr); in netdev_open()
937 writel(np->rx_ring_dma, ioaddr + RxDescQAddr); in netdev_open()
938 writel(np->tx_ring_dma, ioaddr + TxRingPtr); in netdev_open()
940 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr); in netdev_open()
941 writel(np->rx_done_q_dma | in netdev_open()
967 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */ in netdev_open()
968 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode); in netdev_open()
970 writel(np->tx_mode, ioaddr + TxMode); in netdev_open()
971 np->tx_threshold = 4; in netdev_open()
972 writel(np->tx_threshold, ioaddr + TxThreshold); in netdev_open()
974 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); in netdev_open()
976 napi_enable(&np->napi); in netdev_open()
984 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE); in netdev_open()
1004 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev); in netdev_open()
1016 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev); in netdev_open()
1062 struct netdev_private *np = netdev_priv(dev); in check_duplex() local
1066 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising); in check_duplex()
1067 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET); in check_duplex()
1069 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET) in check_duplex()
1076 reg0 = mdio_read(dev, np->phys[0], MII_BMCR); in check_duplex()
1078 if (!np->mii_if.force_media) { in check_duplex()
1082 if (np->speed100) in check_duplex()
1084 if (np->mii_if.full_duplex) in check_duplex()
1088 np->speed100 ? "100" : "10", in check_duplex()
1089 np->mii_if.full_duplex ? "full" : "half"); in check_duplex()
1091 mdio_write(dev, np->phys[0], MII_BMCR, reg0); in check_duplex()
1097 struct netdev_private *np = netdev_priv(dev); in tx_timeout() local
1098 void __iomem *ioaddr = np->base; in tx_timeout()
1127 struct netdev_private *np = netdev_priv(dev); in init_ring() local
1130 np->cur_rx = np->cur_tx = np->reap_tx = 0; in init_ring()
1131 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0; in init_ring()
1133 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in init_ring()
1137 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); in init_ring()
1138 np->rx_info[i].skb = skb; in init_ring()
1141 np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev, in init_ring()
1143 np->rx_buf_sz, in init_ring()
1145 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) { in init_ring()
1147 np->rx_info[i].skb = NULL; in init_ring()
1151 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); in init_ring()
1153 writew(i - 1, np->base + RxDescQIdx); in init_ring()
1154 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_ring()
1158 np->rx_ring[i].rxaddr = 0; in init_ring()
1159 np->rx_info[i].skb = NULL; in init_ring()
1160 np->rx_info[i].mapping = 0; in init_ring()
1163 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing); in init_ring()
1167 np->rx_done_q[i].status = 0; in init_ring()
1168 np->tx_done_q[i].status = 0; in init_ring()
1172 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i])); in init_ring()
1178 struct netdev_private *np = netdev_priv(dev); in start_tx() local
1188 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { in start_tx()
1200 prev_tx = np->cur_tx; in start_tx()
1201 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1207 np->tx_info[entry].skb = skb; in start_tx()
1213 if (np->reap_tx) { in start_tx()
1215 np->reap_tx = 0; in start_tx()
1223 np->tx_info[entry].mapping = in start_tx()
1224 dma_map_single(&np->pci_dev->dev, skb->data, in start_tx()
1230 np->tx_info[entry].mapping = in start_tx()
1231 dma_map_single(&np->pci_dev->dev, in start_tx()
1236 if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) { in start_tx()
1241 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); in start_tx()
1242 np->tx_ring[entry].status = cpu_to_le32(status); in start_tx()
1245 dev->name, np->cur_tx, np->dirty_tx, in start_tx()
1248 np->tx_info[entry].used_slots = TX_RING_SIZE - entry; in start_tx()
1249 np->cur_tx += np->tx_info[entry].used_slots; in start_tx()
1252 np->tx_info[entry].used_slots = 1; in start_tx()
1253 np->cur_tx += np->tx_info[entry].used_slots; in start_tx()
1257 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) in start_tx()
1258 np->reap_tx = 1; in start_tx()
1267 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx); in start_tx()
1270 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) in start_tx()
1277 np->tx_info[entry].skb = NULL; in start_tx()
1279 dma_unmap_single(&np->pci_dev->dev, in start_tx()
1280 np->tx_info[entry].mapping, in start_tx()
1282 np->tx_info[entry].mapping = 0; in start_tx()
1283 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; in start_tx()
1285 dma_unmap_single(&np->pci_dev->dev, in start_tx()
1286 np->tx_info[entry].mapping, in start_tx()
1293 np->cur_tx = prev_tx; in start_tx()
1302 struct netdev_private *np = netdev_priv(dev); in intr_handler() local
1303 void __iomem *ioaddr = np->base; in intr_handler()
1324 if (likely(napi_schedule_prep(&np->napi))) { in intr_handler()
1325 __napi_schedule(&np->napi); in intr_handler()
1352 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) { in intr_handler()
1355 dev->name, np->dirty_tx, np->tx_done, tx_status); in intr_handler()
1360 struct sk_buff *skb = np->tx_info[entry].skb; in intr_handler()
1361 np->tx_info[entry].skb = NULL; in intr_handler()
1362 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1363 np->tx_info[entry].mapping, in intr_handler()
1366 np->tx_info[entry].mapping = 0; in intr_handler()
1367 np->dirty_tx += np->tx_info[entry].used_slots; in intr_handler()
1368 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; in intr_handler()
1372 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1373 np->tx_info[entry].mapping, in intr_handler()
1376 np->dirty_tx++; in intr_handler()
1383 np->tx_done_q[np->tx_done].status = 0; in intr_handler()
1384 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; in intr_handler()
1386 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); in intr_handler()
1389 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { in intr_handler()
1428 struct netdev_private *np = netdev_priv(dev); in __netdev_rx() local
1433 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) { in __netdev_rx()
1437 rx_done_desc *desc = &np->rx_done_q[np->rx_done]; in __netdev_rx()
1440 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); in __netdev_rx()
1467 dma_sync_single_for_cpu(&np->pci_dev->dev, in __netdev_rx()
1468 np->rx_info[entry].mapping, in __netdev_rx()
1470 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); in __netdev_rx()
1471 dma_sync_single_for_device(&np->pci_dev->dev, in __netdev_rx()
1472 np->rx_info[entry].mapping, in __netdev_rx()
1476 dma_unmap_single(&np->pci_dev->dev, in __netdev_rx()
1477 np->rx_info[entry].mapping, in __netdev_rx()
1478 np->rx_buf_sz, DMA_FROM_DEVICE); in __netdev_rx()
1479 skb = np->rx_info[entry].skb; in __netdev_rx()
1481 np->rx_info[entry].skb = NULL; in __netdev_rx()
1482 np->rx_info[entry].mapping = 0; in __netdev_rx()
1496 …printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->stat… in __netdev_rx()
1531 np->cur_rx++; in __netdev_rx()
1533 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; in __netdev_rx()
1540 writew(np->rx_done, np->base + CompletionQConsumerIdx); in __netdev_rx()
1546 retcode, np->rx_done, desc_status); in __netdev_rx()
1552 struct netdev_private *np = container_of(napi, struct netdev_private, napi); in netdev_poll() local
1553 struct net_device *dev = np->dev; in netdev_poll()
1555 void __iomem *ioaddr = np->base; in netdev_poll()
1583 struct netdev_private *np = netdev_priv(dev); in refill_rx_ring() local
1588 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { in refill_rx_ring()
1589 entry = np->dirty_rx % RX_RING_SIZE; in refill_rx_ring()
1590 if (np->rx_info[entry].skb == NULL) { in refill_rx_ring()
1591 skb = netdev_alloc_skb(dev, np->rx_buf_sz); in refill_rx_ring()
1592 np->rx_info[entry].skb = skb; in refill_rx_ring()
1595 np->rx_info[entry].mapping = in refill_rx_ring()
1596 dma_map_single(&np->pci_dev->dev, skb->data, in refill_rx_ring()
1597 np->rx_buf_sz, DMA_FROM_DEVICE); in refill_rx_ring()
1598 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) { in refill_rx_ring()
1600 np->rx_info[entry].skb = NULL; in refill_rx_ring()
1603 np->rx_ring[entry].rxaddr = in refill_rx_ring()
1604 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); in refill_rx_ring()
1607 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing); in refill_rx_ring()
1610 writew(entry, np->base + RxDescQIdx); in refill_rx_ring()
1616 struct netdev_private *np = netdev_priv(dev); in netdev_media_change() local
1617 void __iomem *ioaddr = np->base; in netdev_media_change()
1623 mdio_read(dev, np->phys[0], MII_BMCR); in netdev_media_change()
1624 mdio_read(dev, np->phys[0], MII_BMSR); in netdev_media_change()
1626 reg0 = mdio_read(dev, np->phys[0], MII_BMCR); in netdev_media_change()
1627 reg1 = mdio_read(dev, np->phys[0], MII_BMSR); in netdev_media_change()
1633 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE); in netdev_media_change()
1634 reg5 = mdio_read(dev, np->phys[0], MII_LPA); in netdev_media_change()
1636 np->speed100 = 1; in netdev_media_change()
1637 np->mii_if.full_duplex = 1; in netdev_media_change()
1639 np->speed100 = 1; in netdev_media_change()
1640 np->mii_if.full_duplex = 0; in netdev_media_change()
1642 np->speed100 = 0; in netdev_media_change()
1643 np->mii_if.full_duplex = 1; in netdev_media_change()
1645 np->speed100 = 0; in netdev_media_change()
1646 np->mii_if.full_duplex = 0; in netdev_media_change()
1651 np->speed100 = 1; in netdev_media_change()
1653 np->speed100 = 0; in netdev_media_change()
1655 np->mii_if.full_duplex = 1; in netdev_media_change()
1657 np->mii_if.full_duplex = 0; in netdev_media_change()
1662 np->speed100 ? "100" : "10", in netdev_media_change()
1663 np->mii_if.full_duplex ? "full" : "half"); in netdev_media_change()
1665 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */ in netdev_media_change()
1666 if (np->mii_if.full_duplex) in netdev_media_change()
1668 if (np->tx_mode != new_tx_mode) { in netdev_media_change()
1669 np->tx_mode = new_tx_mode; in netdev_media_change()
1670 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode); in netdev_media_change()
1672 writel(np->tx_mode, ioaddr + TxMode); in netdev_media_change()
1675 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X; in netdev_media_change()
1676 if (np->speed100) in netdev_media_change()
1678 if (np->intr_timer_ctrl != new_intr_timer_ctrl) { in netdev_media_change()
1679 np->intr_timer_ctrl = new_intr_timer_ctrl; in netdev_media_change()
1691 struct netdev_private *np = netdev_priv(dev); in netdev_error() local
1695 if (np->tx_threshold <= PKT_BUF_SZ / 16) { in netdev_error()
1696 writel(++np->tx_threshold, np->base + TxThreshold); in netdev_error()
1698 dev->name, np->tx_threshold * 16); in netdev_error()
1718 struct netdev_private *np = netdev_priv(dev); in get_stats() local
1719 void __iomem *ioaddr = np->base; in get_stats()
1743 static u32 set_vlan_mode(struct netdev_private *np) in set_vlan_mode() argument
1747 void __iomem *filter_addr = np->base + HashTable + 8; in set_vlan_mode()
1750 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) { in set_vlan_mode()
1771 struct netdev_private *np = netdev_priv(dev); in set_rx_mode() local
1772 void __iomem *ioaddr = np->base; in set_rx_mode()
1778 rx_mode |= set_vlan_mode(np); in set_rx_mode()
1844 struct netdev_private *np = netdev_priv(dev); in get_drvinfo() local
1846 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
1852 struct netdev_private *np = netdev_priv(dev); in get_link_ksettings() local
1853 spin_lock_irq(&np->lock); in get_link_ksettings()
1854 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); in get_link_ksettings()
1855 spin_unlock_irq(&np->lock); in get_link_ksettings()
1862 struct netdev_private *np = netdev_priv(dev); in set_link_ksettings() local
1864 spin_lock_irq(&np->lock); in set_link_ksettings()
1865 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); in set_link_ksettings()
1866 spin_unlock_irq(&np->lock); in set_link_ksettings()
1873 struct netdev_private *np = netdev_priv(dev); in nway_reset() local
1874 return mii_nway_restart(&np->mii_if); in nway_reset()
1879 struct netdev_private *np = netdev_priv(dev); in get_link() local
1880 return mii_link_ok(&np->mii_if); in get_link()
1906 struct netdev_private *np = netdev_priv(dev); in netdev_ioctl() local
1913 spin_lock_irq(&np->lock); in netdev_ioctl()
1914 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); in netdev_ioctl()
1915 spin_unlock_irq(&np->lock); in netdev_ioctl()
1917 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0])) in netdev_ioctl()
1925 struct netdev_private *np = netdev_priv(dev); in netdev_close() local
1926 void __iomem *ioaddr = np->base; in netdev_close()
1931 napi_disable(&np->napi); in netdev_close()
1937 dev->name, np->cur_tx, np->dirty_tx, in netdev_close()
1938 np->cur_rx, np->dirty_rx); in netdev_close()
1950 (long long) np->tx_ring_dma); in netdev_close()
1953 i, le32_to_cpu(np->tx_ring[i].status), in netdev_close()
1954 (long long) dma_to_cpu(np->tx_ring[i].addr), in netdev_close()
1955 le32_to_cpu(np->tx_done_q[i].status)); in netdev_close()
1957 (long long) np->rx_ring_dma, np->rx_done_q); in netdev_close()
1958 if (np->rx_done_q) in netdev_close()
1961 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); in netdev_close()
1965 free_irq(np->pci_dev->irq, dev); in netdev_close()
1969 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ in netdev_close()
1970 if (np->rx_info[i].skb != NULL) { in netdev_close()
1971 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1972 np->rx_info[i].mapping, in netdev_close()
1973 np->rx_buf_sz, DMA_FROM_DEVICE); in netdev_close()
1974 dev_kfree_skb(np->rx_info[i].skb); in netdev_close()
1976 np->rx_info[i].skb = NULL; in netdev_close()
1977 np->rx_info[i].mapping = 0; in netdev_close()
1980 struct sk_buff *skb = np->tx_info[i].skb; in netdev_close()
1983 dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping, in netdev_close()
1985 np->tx_info[i].mapping = 0; in netdev_close()
1987 np->tx_info[i].skb = NULL; in netdev_close()
2020 struct netdev_private *np = netdev_priv(dev); in starfire_remove_one() local
2026 if (np->queue_mem) in starfire_remove_one()
2027 dma_free_coherent(&pdev->dev, np->queue_mem_size, in starfire_remove_one()
2028 np->queue_mem, np->queue_mem_dma); in starfire_remove_one()
2035 iounmap(np->base); in starfire_remove_one()