Lines Matching refs:np
448 struct netdev_private *np = netdev_priv(dev); in sundance_reset() local
449 void __iomem *ioaddr = np->base + ASICCtrl; in sundance_reset()
468 struct netdev_private *np = netdev_priv(dev); in sundance_poll_controller() local
470 disable_irq(np->pci_dev->irq); in sundance_poll_controller()
471 intr_handler(np->pci_dev->irq, dev); in sundance_poll_controller()
472 enable_irq(np->pci_dev->irq); in sundance_poll_controller()
496 struct netdev_private *np; in sundance_probe1() local
518 dev = alloc_etherdev(sizeof(*np)); in sundance_probe1()
534 np = netdev_priv(dev); in sundance_probe1()
535 np->ndev = dev; in sundance_probe1()
536 np->base = ioaddr; in sundance_probe1()
537 np->pci_dev = pdev; in sundance_probe1()
538 np->chip_id = chip_idx; in sundance_probe1()
539 np->msg_enable = (1 << debug) - 1; in sundance_probe1()
540 spin_lock_init(&np->lock); in sundance_probe1()
541 spin_lock_init(&np->statlock); in sundance_probe1()
542 tasklet_setup(&np->rx_tasklet, rx_poll); in sundance_probe1()
543 tasklet_setup(&np->tx_tasklet, tx_poll); in sundance_probe1()
549 np->tx_ring = (struct netdev_desc *)ring_space; in sundance_probe1()
550 np->tx_ring_dma = ring_dma; in sundance_probe1()
556 np->rx_ring = (struct netdev_desc *)ring_space; in sundance_probe1()
557 np->rx_ring_dma = ring_dma; in sundance_probe1()
559 np->mii_if.dev = dev; in sundance_probe1()
560 np->mii_if.mdio_read = mdio_read; in sundance_probe1()
561 np->mii_if.mdio_write = mdio_write; in sundance_probe1()
562 np->mii_if.phy_id_mask = 0x1f; in sundance_probe1()
563 np->mii_if.reg_num_mask = 0x1f; in sundance_probe1()
584 np->phys[0] = 1; /* Default setting */ in sundance_probe1()
585 np->mii_preamble_required++; in sundance_probe1()
591 if (sundance_pci_tbl[np->chip_id].device == 0x0200) { in sundance_probe1()
602 np->phys[phy_idx++] = phyx; in sundance_probe1()
603 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); in sundance_probe1()
605 np->mii_preamble_required++; in sundance_probe1()
608 dev->name, phyx, mii_status, np->mii_if.advertising); in sundance_probe1()
611 np->mii_preamble_required--; in sundance_probe1()
619 np->mii_if.phy_id = np->phys[0]; in sundance_probe1()
622 np->an_enable = 1; in sundance_probe1()
625 np->an_enable = 0; in sundance_probe1()
628 np->speed = 100; in sundance_probe1()
629 np->mii_if.full_duplex = 1; in sundance_probe1()
632 np->speed = 100; in sundance_probe1()
633 np->mii_if.full_duplex = 0; in sundance_probe1()
636 np->speed = 10; in sundance_probe1()
637 np->mii_if.full_duplex = 1; in sundance_probe1()
640 np->speed = 10; in sundance_probe1()
641 np->mii_if.full_duplex = 0; in sundance_probe1()
643 np->an_enable = 1; in sundance_probe1()
647 np->flowctrl = 1; in sundance_probe1()
653 if (np->an_enable) { in sundance_probe1()
654 np->speed = 100; in sundance_probe1()
655 np->mii_if.full_duplex = 1; in sundance_probe1()
656 np->an_enable = 0; in sundance_probe1()
660 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); in sundance_probe1()
663 if (np->flowctrl) in sundance_probe1()
664 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); in sundance_probe1()
665 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); in sundance_probe1()
667 if (!np->an_enable) { in sundance_probe1()
669 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; in sundance_probe1()
670 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; in sundance_probe1()
671 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); in sundance_probe1()
673 np->speed, np->mii_if.full_duplex ? "Full" : "Half"); in sundance_probe1()
679 if (netif_msg_hw(np)) in sundance_probe1()
682 if (netif_msg_hw(np)) in sundance_probe1()
692 np->rx_ring, np->rx_ring_dma); in sundance_probe1()
695 np->tx_ring, np->tx_ring_dma); in sundance_probe1()
761 struct netdev_private *np = netdev_priv(dev); in mdio_read() local
762 void __iomem *mdio_addr = np->base + MIICtrl; in mdio_read()
766 if (np->mii_preamble_required) in mdio_read()
791 struct netdev_private *np = netdev_priv(dev); in mdio_write() local
792 void __iomem *mdio_addr = np->base + MIICtrl; in mdio_write()
796 if (np->mii_preamble_required) in mdio_write()
821 struct netdev_private *np; in mdio_wait_link() local
823 np = netdev_priv(dev); in mdio_wait_link()
824 phy_id = np->phys[0]; in mdio_wait_link()
837 struct netdev_private *np = netdev_priv(dev); in netdev_open() local
838 void __iomem *ioaddr = np->base; in netdev_open()
839 const int irq = np->pci_dev->irq; in netdev_open()
849 if (netif_msg_ifup(np)) in netdev_open()
854 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); in netdev_open()
870 dev->if_port = np->default_port; in netdev_open()
872 spin_lock_init(&np->mcastlock); in netdev_open()
881 if (np->pci_dev->revision >= 0x14) in netdev_open()
885 spin_lock_irqsave(&np->lock, flags); in netdev_open()
887 spin_unlock_irqrestore(&np->lock, flags); in netdev_open()
893 np->wol_enabled = 0; in netdev_open()
895 if (netif_msg_ifup(np)) in netdev_open()
903 timer_setup(&np->timer, netdev_timer, 0); in netdev_open()
904 np->timer.expires = jiffies + 3*HZ; in netdev_open()
905 add_timer(&np->timer); in netdev_open()
915 struct netdev_private *np = netdev_priv(dev); in check_duplex() local
916 void __iomem *ioaddr = np->base; in check_duplex()
917 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); in check_duplex()
918 int negotiated = mii_lpa & np->mii_if.advertising; in check_duplex()
922 if (!np->an_enable || mii_lpa == 0xffff) { in check_duplex()
923 if (np->mii_if.full_duplex) in check_duplex()
931 if (np->mii_if.full_duplex != duplex) { in check_duplex()
932 np->mii_if.full_duplex = duplex; in check_duplex()
933 if (netif_msg_link(np)) in check_duplex()
936 duplex ? "full" : "half", np->phys[0], negotiated); in check_duplex()
943 struct netdev_private *np = from_timer(np, t, timer); in netdev_timer() local
944 struct net_device *dev = np->mii_if.dev; in netdev_timer()
945 void __iomem *ioaddr = np->base; in netdev_timer()
948 if (netif_msg_timer(np)) { in netdev_timer()
955 np->timer.expires = jiffies + next_tick; in netdev_timer()
956 add_timer(&np->timer); in netdev_timer()
961 struct netdev_private *np = netdev_priv(dev); in tx_timeout() local
962 void __iomem *ioaddr = np->base; in tx_timeout()
966 tasklet_disable(&np->tx_tasklet); in tx_timeout()
977 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), in tx_timeout()
978 le32_to_cpu(np->tx_ring[i].next_desc), in tx_timeout()
979 le32_to_cpu(np->tx_ring[i].status), in tx_timeout()
980 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, in tx_timeout()
981 le32_to_cpu(np->tx_ring[i].frag[0].addr), in tx_timeout()
982 le32_to_cpu(np->tx_ring[i].frag[0].length)); in tx_timeout()
985 ioread32(np->base + TxListPtr), in tx_timeout()
988 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
989 np->dirty_tx, np->dirty_tx % TX_RING_SIZE); in tx_timeout()
990 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); in tx_timeout()
991 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); in tx_timeout()
993 spin_lock_irqsave(&np->lock, flag); in tx_timeout()
997 spin_unlock_irqrestore(&np->lock, flag); in tx_timeout()
1003 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in tx_timeout()
1007 tasklet_enable(&np->tx_tasklet); in tx_timeout()
1014 struct netdev_private *np = netdev_priv(dev); in init_ring() local
1017 np->cur_rx = np->cur_tx = 0; in init_ring()
1018 np->dirty_rx = np->dirty_tx = 0; in init_ring()
1019 np->cur_task = 0; in init_ring()
1021 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); in init_ring()
1025 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + in init_ring()
1026 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); in init_ring()
1027 np->rx_ring[i].status = 0; in init_ring()
1028 np->rx_ring[i].frag[0].length = 0; in init_ring()
1029 np->rx_skbuff[i] = NULL; in init_ring()
1035 netdev_alloc_skb(dev, np->rx_buf_sz + 2); in init_ring()
1036 np->rx_skbuff[i] = skb; in init_ring()
1040 np->rx_ring[i].frag[0].addr = cpu_to_le32( in init_ring()
1041 dma_map_single(&np->pci_dev->dev, skb->data, in init_ring()
1042 np->rx_buf_sz, DMA_FROM_DEVICE)); in init_ring()
1043 if (dma_mapping_error(&np->pci_dev->dev, in init_ring()
1044 np->rx_ring[i].frag[0].addr)) { in init_ring()
1046 np->rx_skbuff[i] = NULL; in init_ring()
1049 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); in init_ring()
1051 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_ring()
1054 np->tx_skbuff[i] = NULL; in init_ring()
1055 np->tx_ring[i].status = 0; in init_ring()
1061 struct netdev_private *np = from_tasklet(np, t, tx_tasklet); in tx_poll() local
1062 unsigned head = np->cur_task % TX_RING_SIZE; in tx_poll()
1064 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; in tx_poll()
1067 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { in tx_poll()
1068 int entry = np->cur_task % TX_RING_SIZE; in tx_poll()
1069 txdesc = &np->tx_ring[entry]; in tx_poll()
1070 if (np->last_tx) { in tx_poll()
1071 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + in tx_poll()
1074 np->last_tx = txdesc; in tx_poll()
1079 if (ioread32 (np->base + TxListPtr) == 0) in tx_poll()
1080 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), in tx_poll()
1081 np->base + TxListPtr); in tx_poll()
1087 struct netdev_private *np = netdev_priv(dev); in start_tx() local
1092 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1093 np->tx_skbuff[entry] = skb; in start_tx()
1094 txdesc = &np->tx_ring[entry]; in start_tx()
1098 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, in start_tx()
1100 if (dma_mapping_error(&np->pci_dev->dev, in start_tx()
1106 np->cur_tx++; in start_tx()
1109 tasklet_schedule(&np->tx_tasklet); in start_tx()
1112 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && in start_tx()
1118 if (netif_msg_tx_queued(np)) { in start_tx()
1121 dev->name, np->cur_tx, entry); in start_tx()
1127 np->tx_skbuff[entry] = NULL; in start_tx()
1136 struct netdev_private *np = netdev_priv(dev); in reset_tx() local
1137 void __iomem *ioaddr = np->base; in reset_tx()
1147 np->tx_ring[i].next_desc = 0; in reset_tx()
1149 skb = np->tx_skbuff[i]; in reset_tx()
1151 dma_unmap_single(&np->pci_dev->dev, in reset_tx()
1152 le32_to_cpu(np->tx_ring[i].frag[0].addr), in reset_tx()
1155 np->tx_skbuff[i] = NULL; in reset_tx()
1159 np->cur_tx = np->dirty_tx = 0; in reset_tx()
1160 np->cur_task = 0; in reset_tx()
1162 np->last_tx = NULL; in reset_tx()
1174 struct netdev_private *np = netdev_priv(dev); in intr_handler() local
1175 void __iomem *ioaddr = np->base; in intr_handler()
1186 if (netif_msg_intr(np)) in intr_handler()
1198 if (np->budget < 0) in intr_handler()
1199 np->budget = RX_BUDGET; in intr_handler()
1200 tasklet_schedule(&np->rx_tasklet); in intr_handler()
1205 if (netif_msg_tx_done(np)) in intr_handler()
1210 if (netif_msg_tx_err(np)) in intr_handler()
1254 if (np->pci_dev->revision >= 0x14) { in intr_handler()
1255 spin_lock(&np->lock); in intr_handler()
1256 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in intr_handler()
1257 int entry = np->dirty_tx % TX_RING_SIZE; in intr_handler()
1261 np->tx_ring[entry].status) >> 2) & 0xff; in intr_handler()
1263 !(le32_to_cpu(np->tx_ring[entry].status) in intr_handler()
1269 skb = np->tx_skbuff[entry]; in intr_handler()
1271 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1272 le32_to_cpu(np->tx_ring[entry].frag[0].addr), in intr_handler()
1274 dev_consume_skb_irq(np->tx_skbuff[entry]); in intr_handler()
1275 np->tx_skbuff[entry] = NULL; in intr_handler()
1276 np->tx_ring[entry].frag[0].addr = 0; in intr_handler()
1277 np->tx_ring[entry].frag[0].length = 0; in intr_handler()
1279 spin_unlock(&np->lock); in intr_handler()
1281 spin_lock(&np->lock); in intr_handler()
1282 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in intr_handler()
1283 int entry = np->dirty_tx % TX_RING_SIZE; in intr_handler()
1285 if (!(le32_to_cpu(np->tx_ring[entry].status) in intr_handler()
1288 skb = np->tx_skbuff[entry]; in intr_handler()
1290 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1291 le32_to_cpu(np->tx_ring[entry].frag[0].addr), in intr_handler()
1293 dev_consume_skb_irq(np->tx_skbuff[entry]); in intr_handler()
1294 np->tx_skbuff[entry] = NULL; in intr_handler()
1295 np->tx_ring[entry].frag[0].addr = 0; in intr_handler()
1296 np->tx_ring[entry].frag[0].length = 0; in intr_handler()
1298 spin_unlock(&np->lock); in intr_handler()
1302 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in intr_handler()
1310 if (netif_msg_intr(np)) in intr_handler()
1318 struct netdev_private *np = from_tasklet(np, t, rx_tasklet); in rx_poll() local
1319 struct net_device *dev = np->ndev; in rx_poll()
1320 int entry = np->cur_rx % RX_RING_SIZE; in rx_poll()
1321 int boguscnt = np->budget; in rx_poll()
1322 void __iomem *ioaddr = np->base; in rx_poll()
1327 struct netdev_desc *desc = &(np->rx_ring[entry]); in rx_poll()
1337 if (netif_msg_rx_status(np)) in rx_poll()
1342 if (netif_msg_rx_err(np)) in rx_poll()
1362 if (netif_msg_rx_status(np)) in rx_poll()
1372 dma_sync_single_for_cpu(&np->pci_dev->dev, in rx_poll()
1374 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1375 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); in rx_poll()
1376 dma_sync_single_for_device(&np->pci_dev->dev, in rx_poll()
1378 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1381 dma_unmap_single(&np->pci_dev->dev, in rx_poll()
1383 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1384 skb_put(skb = np->rx_skbuff[entry], pkt_len); in rx_poll()
1385 np->rx_skbuff[entry] = NULL; in rx_poll()
1394 np->cur_rx = entry; in rx_poll()
1396 np->budget -= received; in rx_poll()
1401 np->cur_rx = entry; in rx_poll()
1405 np->budget -= received; in rx_poll()
1406 if (np->budget <= 0) in rx_poll()
1407 np->budget = RX_BUDGET; in rx_poll()
1408 tasklet_schedule(&np->rx_tasklet); in rx_poll()
1413 struct netdev_private *np = netdev_priv(dev); in refill_rx() local
1418 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; in refill_rx()
1419 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { in refill_rx()
1421 entry = np->dirty_rx % RX_RING_SIZE; in refill_rx()
1422 if (np->rx_skbuff[entry] == NULL) { in refill_rx()
1423 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); in refill_rx()
1424 np->rx_skbuff[entry] = skb; in refill_rx()
1428 np->rx_ring[entry].frag[0].addr = cpu_to_le32( in refill_rx()
1429 dma_map_single(&np->pci_dev->dev, skb->data, in refill_rx()
1430 np->rx_buf_sz, DMA_FROM_DEVICE)); in refill_rx()
1431 if (dma_mapping_error(&np->pci_dev->dev, in refill_rx()
1432 np->rx_ring[entry].frag[0].addr)) { in refill_rx()
1434 np->rx_skbuff[entry] = NULL; in refill_rx()
1439 np->rx_ring[entry].frag[0].length = in refill_rx()
1440 cpu_to_le32(np->rx_buf_sz | LastFrag); in refill_rx()
1441 np->rx_ring[entry].status = 0; in refill_rx()
1447 struct netdev_private *np = netdev_priv(dev); in netdev_error() local
1448 void __iomem *ioaddr = np->base; in netdev_error()
1455 if (np->an_enable) { in netdev_error()
1456 mii_advertise = mdio_read(dev, np->phys[0], in netdev_error()
1458 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); in netdev_error()
1463 np->speed = 100; in netdev_error()
1466 np->speed = 100; in netdev_error()
1469 np->speed = 10; in netdev_error()
1472 np->speed = 10; in netdev_error()
1478 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); in netdev_error()
1480 np->speed = speed; in netdev_error()
1488 if (np->flowctrl && np->mii_if.full_duplex) { in netdev_error()
1512 struct netdev_private *np = netdev_priv(dev); in get_stats() local
1513 void __iomem *ioaddr = np->base; in get_stats()
1517 spin_lock_irqsave(&np->statlock, flags); in get_stats()
1525 np->xstats.tx_multiple_collisions += mult_coll; in get_stats()
1527 np->xstats.tx_single_collisions += single_coll; in get_stats()
1529 np->xstats.tx_late_collisions += late_coll; in get_stats()
1534 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); in get_stats()
1535 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); in get_stats()
1536 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); in get_stats()
1537 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); in get_stats()
1538 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); in get_stats()
1539 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); in get_stats()
1540 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); in get_stats()
1547 spin_unlock_irqrestore(&np->statlock, flags); in get_stats()
1554 struct netdev_private *np = netdev_priv(dev); in set_rx_mode() local
1555 void __iomem *ioaddr = np->base; in set_rx_mode()
1585 if (np->mii_if.full_duplex && np->flowctrl) in set_rx_mode()
1595 struct netdev_private *np = netdev_priv(dev); in __set_mac_addr() local
1599 iowrite16(addr16, np->base + StationAddr); in __set_mac_addr()
1601 iowrite16(addr16, np->base + StationAddr+2); in __set_mac_addr()
1603 iowrite16(addr16, np->base + StationAddr+4); in __set_mac_addr()
1644 struct netdev_private *np = netdev_priv(dev); in get_drvinfo() local
1646 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
1652 struct netdev_private *np = netdev_priv(dev); in get_link_ksettings() local
1653 spin_lock_irq(&np->lock); in get_link_ksettings()
1654 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); in get_link_ksettings()
1655 spin_unlock_irq(&np->lock); in get_link_ksettings()
1662 struct netdev_private *np = netdev_priv(dev); in set_link_ksettings() local
1664 spin_lock_irq(&np->lock); in set_link_ksettings()
1665 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); in set_link_ksettings()
1666 spin_unlock_irq(&np->lock); in set_link_ksettings()
1672 struct netdev_private *np = netdev_priv(dev); in nway_reset() local
1673 return mii_nway_restart(&np->mii_if); in nway_reset()
1678 struct netdev_private *np = netdev_priv(dev); in get_link() local
1679 return mii_link_ok(&np->mii_if); in get_link()
1684 struct netdev_private *np = netdev_priv(dev); in get_msglevel() local
1685 return np->msg_enable; in get_msglevel()
1690 struct netdev_private *np = netdev_priv(dev); in set_msglevel() local
1691 np->msg_enable = val; in set_msglevel()
1714 struct netdev_private *np = netdev_priv(dev); in get_ethtool_stats() local
1718 data[i++] = np->xstats.tx_multiple_collisions; in get_ethtool_stats()
1719 data[i++] = np->xstats.tx_single_collisions; in get_ethtool_stats()
1720 data[i++] = np->xstats.tx_late_collisions; in get_ethtool_stats()
1721 data[i++] = np->xstats.tx_deferred; in get_ethtool_stats()
1722 data[i++] = np->xstats.tx_deferred_excessive; in get_ethtool_stats()
1723 data[i++] = np->xstats.tx_aborted; in get_ethtool_stats()
1724 data[i++] = np->xstats.tx_bcasts; in get_ethtool_stats()
1725 data[i++] = np->xstats.rx_bcasts; in get_ethtool_stats()
1726 data[i++] = np->xstats.tx_mcasts; in get_ethtool_stats()
1727 data[i++] = np->xstats.rx_mcasts; in get_ethtool_stats()
1735 struct netdev_private *np = netdev_priv(dev); in sundance_get_wol() local
1736 void __iomem *ioaddr = np->base; in sundance_get_wol()
1742 if (!np->wol_enabled) in sundance_get_wol()
1755 struct netdev_private *np = netdev_priv(dev); in sundance_set_wol() local
1756 void __iomem *ioaddr = np->base; in sundance_set_wol()
1759 if (!device_can_wakeup(&np->pci_dev->dev)) in sundance_set_wol()
1762 np->wol_enabled = !!(wol->wolopts); in sundance_set_wol()
1767 if (np->wol_enabled) { in sundance_set_wol()
1775 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled); in sundance_set_wol()
1802 struct netdev_private *np = netdev_priv(dev); in netdev_ioctl() local
1808 spin_lock_irq(&np->lock); in netdev_ioctl()
1809 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
1810 spin_unlock_irq(&np->lock); in netdev_ioctl()
1817 struct netdev_private *np = netdev_priv(dev); in netdev_close() local
1818 void __iomem *ioaddr = np->base; in netdev_close()
1823 tasklet_kill(&np->rx_tasklet); in netdev_close()
1824 tasklet_kill(&np->tx_tasklet); in netdev_close()
1825 np->cur_tx = 0; in netdev_close()
1826 np->dirty_tx = 0; in netdev_close()
1827 np->cur_task = 0; in netdev_close()
1828 np->last_tx = NULL; in netdev_close()
1832 if (netif_msg_ifdown(np)) { in netdev_close()
1838 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); in netdev_close()
1866 if (netif_msg_hw(np)) { in netdev_close()
1868 (int)(np->tx_ring_dma)); in netdev_close()
1871 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, in netdev_close()
1872 np->tx_ring[i].frag[0].length); in netdev_close()
1874 (int)(np->rx_ring_dma)); in netdev_close()
1877 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, in netdev_close()
1878 np->rx_ring[i].frag[0].length); in netdev_close()
1883 free_irq(np->pci_dev->irq, dev); in netdev_close()
1885 del_timer_sync(&np->timer); in netdev_close()
1889 np->rx_ring[i].status = 0; in netdev_close()
1890 skb = np->rx_skbuff[i]; in netdev_close()
1892 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1893 le32_to_cpu(np->rx_ring[i].frag[0].addr), in netdev_close()
1894 np->rx_buf_sz, DMA_FROM_DEVICE); in netdev_close()
1896 np->rx_skbuff[i] = NULL; in netdev_close()
1898 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ in netdev_close()
1901 np->tx_ring[i].next_desc = 0; in netdev_close()
1902 skb = np->tx_skbuff[i]; in netdev_close()
1904 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1905 le32_to_cpu(np->tx_ring[i].frag[0].addr), in netdev_close()
1908 np->tx_skbuff[i] = NULL; in netdev_close()
1920 struct netdev_private *np = netdev_priv(dev); in sundance_remove1() local
1923 np->rx_ring, np->rx_ring_dma); in sundance_remove1()
1925 np->tx_ring, np->tx_ring_dma); in sundance_remove1()
1926 pci_iounmap(pdev, np->base); in sundance_remove1()
1935 struct netdev_private *np = netdev_priv(dev); in sundance_suspend() local
1936 void __iomem *ioaddr = np->base; in sundance_suspend()
1944 if (np->wol_enabled) { in sundance_suspend()
1949 device_set_wakeup_enable(dev_d, np->wol_enabled); in sundance_suspend()