Lines Matching refs:np
475 struct netdev_private *np; in fealnx_init_one() local
534 np = netdev_priv(dev); in fealnx_init_one()
535 np->mem = ioaddr; in fealnx_init_one()
536 spin_lock_init(&np->lock); in fealnx_init_one()
537 np->pci_dev = pdev; in fealnx_init_one()
538 np->flags = skel_netdrv_tbl[chip_id].flags; in fealnx_init_one()
540 np->mii.dev = dev; in fealnx_init_one()
541 np->mii.mdio_read = mdio_read; in fealnx_init_one()
542 np->mii.mdio_write = mdio_write; in fealnx_init_one()
543 np->mii.phy_id_mask = 0x1f; in fealnx_init_one()
544 np->mii.reg_num_mask = 0x1f; in fealnx_init_one()
552 np->rx_ring = ring_space; in fealnx_init_one()
553 np->rx_ring_dma = ring_dma; in fealnx_init_one()
561 np->tx_ring = ring_space; in fealnx_init_one()
562 np->tx_ring_dma = ring_dma; in fealnx_init_one()
565 if (np->flags == HAS_MII_XCVR) { in fealnx_init_one()
568 for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); in fealnx_init_one()
573 np->phys[phy_idx++] = phy; in fealnx_init_one()
581 data = mdio_read(dev, np->phys[0], 2); in fealnx_init_one()
583 np->PHYType = SeeqPHY; in fealnx_init_one()
585 np->PHYType = AhdocPHY; in fealnx_init_one()
587 np->PHYType = MarvellPHY; in fealnx_init_one()
589 np->PHYType = Myson981; in fealnx_init_one()
591 np->PHYType = LevelOnePHY; in fealnx_init_one()
593 np->PHYType = OtherPHY; in fealnx_init_one()
598 np->mii_cnt = phy_idx; in fealnx_init_one()
604 np->phys[0] = 32; in fealnx_init_one()
608 np->PHYType = MysonPHY; in fealnx_init_one()
610 np->PHYType = OtherPHY; in fealnx_init_one()
612 np->mii.phy_id = np->phys[0]; in fealnx_init_one()
620 np->mii.full_duplex = 1; in fealnx_init_one()
621 np->default_port = option & 15; in fealnx_init_one()
625 np->mii.full_duplex = full_duplex[card_idx]; in fealnx_init_one()
627 if (np->mii.full_duplex) { in fealnx_init_one()
631 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { in fealnx_init_one()
634 data = mdio_read(dev, np->phys[0], 9); in fealnx_init_one()
636 mdio_write(dev, np->phys[0], 9, data); in fealnx_init_one()
639 if (np->flags == HAS_MII_XCVR) in fealnx_init_one()
640 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); in fealnx_init_one()
643 np->mii.force_media = 1; in fealnx_init_one()
661 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, in fealnx_init_one()
662 np->tx_ring_dma); in fealnx_init_one()
664 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, in fealnx_init_one()
665 np->rx_ring_dma); in fealnx_init_one()
681 struct netdev_private *np = netdev_priv(dev); in fealnx_remove_one() local
683 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, in fealnx_remove_one()
684 np->tx_ring_dma); in fealnx_remove_one()
685 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, in fealnx_remove_one()
686 np->rx_ring_dma); in fealnx_remove_one()
688 pci_iounmap(pdev, np->mem); in fealnx_remove_one()
747 struct netdev_private *np = netdev_priv(dev); in mdio_read() local
748 void __iomem *miiport = np->mem + MANAGEMENT; in mdio_read()
786 struct netdev_private *np = netdev_priv(dev); in mdio_write() local
787 void __iomem *miiport = np->mem + MANAGEMENT; in mdio_write()
818 struct netdev_private *np = netdev_priv(dev); in netdev_open() local
819 void __iomem *ioaddr = np->mem; in netdev_open()
820 const int irq = np->pci_dev->irq; in netdev_open()
835 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); in netdev_open()
836 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); in netdev_open()
855 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ in netdev_open()
857 np->bcrvalue |= 0x04; /* big-endian */ in netdev_open()
862 np->crvalue = 0xa00; in netdev_open()
865 np->crvalue = 0xe00; /* rx 128 burst length */ in netdev_open()
871 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; in netdev_open()
872 if (np->pci_dev->device == 0x891) { in netdev_open()
873 np->bcrvalue |= 0x200; /* set PROG bit */ in netdev_open()
874 np->crvalue |= CR_W_ENH; /* set enhanced bit */ in netdev_open()
875 np->imrvalue |= ETI; in netdev_open()
877 iowrite32(np->bcrvalue, ioaddr + BCR); in netdev_open()
880 dev->if_port = np->default_port; in netdev_open()
885 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ in netdev_open()
886 np->mii.full_duplex = np->mii.force_media; in netdev_open()
888 if (np->linkok) in netdev_open()
896 iowrite32(np->imrvalue, ioaddr + IMR); in netdev_open()
902 timer_setup(&np->timer, netdev_timer, 0); in netdev_open()
903 np->timer.expires = RUN_AT(3 * HZ); in netdev_open()
906 add_timer(&np->timer); in netdev_open()
908 timer_setup(&np->reset_timer, reset_timer, 0); in netdev_open()
909 np->reset_timer_armed = 0; in netdev_open()
919 struct netdev_private *np = netdev_priv(dev); in getlinkstatus() local
922 np->linkok = 0; in getlinkstatus()
924 if (np->PHYType == MysonPHY) { in getlinkstatus()
926 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { in getlinkstatus()
927 np->linkok = 1; in getlinkstatus()
934 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { in getlinkstatus()
935 np->linkok = 1; in getlinkstatus()
946 struct netdev_private *np = netdev_priv(dev); in getlinktype() local
948 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ in getlinktype()
949 if (ioread32(np->mem + TCRRCR) & CR_R_FD) in getlinktype()
950 np->duplexmode = 2; /* full duplex */ in getlinktype()
952 np->duplexmode = 1; /* half duplex */ in getlinktype()
953 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) in getlinktype()
954 np->line_speed = 1; /* 10M */ in getlinktype()
956 np->line_speed = 2; /* 100M */ in getlinktype()
958 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ in getlinktype()
961 data = mdio_read(dev, np->phys[0], MIIRegister18); in getlinktype()
963 np->line_speed = 2; /* 100M */ in getlinktype()
965 np->line_speed = 1; /* 10M */ in getlinktype()
967 np->duplexmode = 2; /* full duplex mode */ in getlinktype()
969 np->duplexmode = 1; /* half duplex mode */ in getlinktype()
970 } else if (np->PHYType == AhdocPHY) { in getlinktype()
973 data = mdio_read(dev, np->phys[0], DiagnosticReg); in getlinktype()
975 np->line_speed = 2; /* 100M */ in getlinktype()
977 np->line_speed = 1; /* 10M */ in getlinktype()
979 np->duplexmode = 2; /* full duplex mode */ in getlinktype()
981 np->duplexmode = 1; /* half duplex mode */ in getlinktype()
984 else if (np->PHYType == MarvellPHY) { in getlinktype()
987 data = mdio_read(dev, np->phys[0], SpecificReg); in getlinktype()
989 np->duplexmode = 2; /* full duplex mode */ in getlinktype()
991 np->duplexmode = 1; /* half duplex mode */ in getlinktype()
994 np->line_speed = 3; /* 1000M */ in getlinktype()
996 np->line_speed = 2; /* 100M */ in getlinktype()
998 np->line_speed = 1; /* 10M */ in getlinktype()
1002 else if (np->PHYType == Myson981) { in getlinktype()
1005 data = mdio_read(dev, np->phys[0], StatusRegister); in getlinktype()
1008 np->line_speed = 2; in getlinktype()
1010 np->line_speed = 1; in getlinktype()
1013 np->duplexmode = 2; in getlinktype()
1015 np->duplexmode = 1; in getlinktype()
1019 else if (np->PHYType == LevelOnePHY) { in getlinktype()
1022 data = mdio_read(dev, np->phys[0], SpecificReg); in getlinktype()
1024 np->duplexmode = 2; /* full duplex mode */ in getlinktype()
1026 np->duplexmode = 1; /* half duplex mode */ in getlinktype()
1029 np->line_speed = 3; /* 1000M */ in getlinktype()
1031 np->line_speed = 2; /* 100M */ in getlinktype()
1033 np->line_speed = 1; /* 10M */ in getlinktype()
1035 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); in getlinktype()
1036 if (np->line_speed == 1) in getlinktype()
1037 np->crvalue |= CR_W_PS10; in getlinktype()
1038 else if (np->line_speed == 3) in getlinktype()
1039 np->crvalue |= CR_W_PS1000; in getlinktype()
1040 if (np->duplexmode == 2) in getlinktype()
1041 np->crvalue |= CR_W_FD; in getlinktype()
1049 struct netdev_private *np = netdev_priv(dev); in allocate_rx_buffers() local
1052 while (np->really_rx_count != RX_RING_SIZE) { in allocate_rx_buffers()
1055 skb = netdev_alloc_skb(dev, np->rx_buf_sz); in allocate_rx_buffers()
1059 while (np->lack_rxbuf->skbuff) in allocate_rx_buffers()
1060 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; in allocate_rx_buffers()
1062 np->lack_rxbuf->skbuff = skb; in allocate_rx_buffers()
1063 np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev, in allocate_rx_buffers()
1065 np->rx_buf_sz, in allocate_rx_buffers()
1067 np->lack_rxbuf->status = RXOWN; in allocate_rx_buffers()
1068 ++np->really_rx_count; in allocate_rx_buffers()
1075 struct netdev_private *np = from_timer(np, t, timer); in netdev_timer() local
1076 struct net_device *dev = np->mii.dev; in netdev_timer()
1077 void __iomem *ioaddr = np->mem; in netdev_timer()
1078 int old_crvalue = np->crvalue; in netdev_timer()
1079 unsigned int old_linkok = np->linkok; in netdev_timer()
1087 spin_lock_irqsave(&np->lock, flags); in netdev_timer()
1089 if (np->flags == HAS_MII_XCVR) { in netdev_timer()
1091 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ in netdev_timer()
1093 if (np->crvalue != old_crvalue) { in netdev_timer()
1094 stop_nic_rxtx(ioaddr, np->crvalue); in netdev_timer()
1095 iowrite32(np->crvalue, ioaddr + TCRRCR); in netdev_timer()
1102 spin_unlock_irqrestore(&np->lock, flags); in netdev_timer()
1104 np->timer.expires = RUN_AT(10 * HZ); in netdev_timer()
1105 add_timer(&np->timer); in netdev_timer()
1113 struct netdev_private *np = netdev_priv(dev); in reset_and_disable_rxtx() local
1114 void __iomem *ioaddr = np->mem; in reset_and_disable_rxtx()
1139 struct netdev_private *np = netdev_priv(dev); in enable_rxtx() local
1140 void __iomem *ioaddr = np->mem; in enable_rxtx()
1144 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), in enable_rxtx()
1146 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), in enable_rxtx()
1149 iowrite32(np->bcrvalue, ioaddr + BCR); in enable_rxtx()
1156 iowrite32(np->imrvalue, ioaddr + IMR); in enable_rxtx()
1164 struct netdev_private *np = from_timer(np, t, reset_timer); in reset_timer() local
1165 struct net_device *dev = np->mii.dev; in reset_timer()
1170 spin_lock_irqsave(&np->lock, flags); in reset_timer()
1171 np->crvalue = np->crvalue_sv; in reset_timer()
1172 np->imrvalue = np->imrvalue_sv; in reset_timer()
1180 np->reset_timer_armed = 0; in reset_timer()
1182 spin_unlock_irqrestore(&np->lock, flags); in reset_timer()
1188 struct netdev_private *np = netdev_priv(dev); in fealnx_tx_timeout() local
1189 void __iomem *ioaddr = np->mem; in fealnx_tx_timeout()
1198 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); in fealnx_tx_timeout()
1201 (unsigned int) np->rx_ring[i].status); in fealnx_tx_timeout()
1203 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); in fealnx_tx_timeout()
1205 printk(KERN_CONT " %4.4x", np->tx_ring[i].status); in fealnx_tx_timeout()
1209 spin_lock_irqsave(&np->lock, flags); in fealnx_tx_timeout()
1215 spin_unlock_irqrestore(&np->lock, flags); in fealnx_tx_timeout()
1226 struct netdev_private *np = netdev_priv(dev); in init_ring() local
1230 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in init_ring()
1231 np->cur_rx = &np->rx_ring[0]; in init_ring()
1232 np->lack_rxbuf = np->rx_ring; in init_ring()
1233 np->really_rx_count = 0; in init_ring()
1237 np->rx_ring[i].status = 0; in init_ring()
1238 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; in init_ring()
1239 np->rx_ring[i].next_desc = np->rx_ring_dma + in init_ring()
1241 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; in init_ring()
1242 np->rx_ring[i].skbuff = NULL; in init_ring()
1246 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; in init_ring()
1247 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; in init_ring()
1251 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); in init_ring()
1254 np->lack_rxbuf = &np->rx_ring[i]; in init_ring()
1258 ++np->really_rx_count; in init_ring()
1259 np->rx_ring[i].skbuff = skb; in init_ring()
1260 np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev, in init_ring()
1262 np->rx_buf_sz, in init_ring()
1264 np->rx_ring[i].status = RXOWN; in init_ring()
1265 np->rx_ring[i].control |= RXIC; in init_ring()
1269 np->cur_tx = &np->tx_ring[0]; in init_ring()
1270 np->cur_tx_copy = &np->tx_ring[0]; in init_ring()
1271 np->really_tx_count = 0; in init_ring()
1272 np->free_tx_count = TX_RING_SIZE; in init_ring()
1275 np->tx_ring[i].status = 0; in init_ring()
1277 np->tx_ring[i].next_desc = np->tx_ring_dma + in init_ring()
1279 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; in init_ring()
1280 np->tx_ring[i].skbuff = NULL; in init_ring()
1284 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; in init_ring()
1285 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; in init_ring()
1291 struct netdev_private *np = netdev_priv(dev); in start_tx() local
1294 spin_lock_irqsave(&np->lock, flags); in start_tx()
1296 np->cur_tx_copy->skbuff = skb; in start_tx()
1301 np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data, in start_tx()
1303 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; in start_tx()
1304 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ in start_tx()
1305 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ in start_tx()
1307 if (np->pci_dev->device == 0x891) in start_tx()
1308 np->cur_tx_copy->control |= ETIControl | RetryTxLC; in start_tx()
1309 np->cur_tx_copy->status = TXOWN; in start_tx()
1310 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; in start_tx()
1311 --np->free_tx_count; in start_tx()
1317 np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, in start_tx()
1320 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; in start_tx()
1321 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ in start_tx()
1322 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ in start_tx()
1325 next = np->cur_tx_copy->next_desc_logical; in start_tx()
1331 if (np->pci_dev->device == 0x891) in start_tx()
1332 np->cur_tx_copy->control |= ETIControl | RetryTxLC; in start_tx()
1338 np->cur_tx_copy->status = TXOWN; in start_tx()
1340 np->cur_tx_copy = next->next_desc_logical; in start_tx()
1341 np->free_tx_count -= 2; in start_tx()
1343 np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, in start_tx()
1346 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; in start_tx()
1347 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ in start_tx()
1348 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ in start_tx()
1350 if (np->pci_dev->device == 0x891) in start_tx()
1351 np->cur_tx_copy->control |= ETIControl | RetryTxLC; in start_tx()
1352 np->cur_tx_copy->status = TXOWN; in start_tx()
1353 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; in start_tx()
1354 --np->free_tx_count; in start_tx()
1358 if (np->free_tx_count < 2) in start_tx()
1360 ++np->really_tx_count; in start_tx()
1361 iowrite32(0, np->mem + TXPDR); in start_tx()
1363 spin_unlock_irqrestore(&np->lock, flags); in start_tx()
1372 struct netdev_private *np = netdev_priv(dev); in reset_tx_descriptors() local
1377 np->cur_tx = &np->tx_ring[0]; in reset_tx_descriptors()
1378 np->cur_tx_copy = &np->tx_ring[0]; in reset_tx_descriptors()
1379 np->really_tx_count = 0; in reset_tx_descriptors()
1380 np->free_tx_count = TX_RING_SIZE; in reset_tx_descriptors()
1383 cur = &np->tx_ring[i]; in reset_tx_descriptors()
1385 dma_unmap_single(&np->pci_dev->dev, cur->buffer, in reset_tx_descriptors()
1393 cur->next_desc = np->tx_ring_dma + in reset_tx_descriptors()
1395 cur->next_desc_logical = &np->tx_ring[i + 1]; in reset_tx_descriptors()
1398 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; in reset_tx_descriptors()
1399 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; in reset_tx_descriptors()
1406 struct netdev_private *np = netdev_priv(dev); in reset_rx_descriptors() local
1407 struct fealnx_desc *cur = np->cur_rx; in reset_rx_descriptors()
1418 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), in reset_rx_descriptors()
1419 np->mem + RXLBA); in reset_rx_descriptors()
1428 struct netdev_private *np = netdev_priv(dev); in intr_handler() local
1429 void __iomem *ioaddr = np->mem; in intr_handler()
1434 spin_lock(&np->lock); in intr_handler()
1448 if (!(intr_status & np->imrvalue)) in intr_handler()
1479 stop_nic_rx(ioaddr, np->crvalue); in intr_handler()
1481 iowrite32(np->crvalue, ioaddr + TCRRCR); in intr_handler()
1485 while (np->really_tx_count) { in intr_handler()
1486 long tx_status = np->cur_tx->status; in intr_handler()
1487 long tx_control = np->cur_tx->control; in intr_handler()
1492 next = np->cur_tx->next_desc_logical; in intr_handler()
1500 if (!(np->crvalue & CR_W_ENH)) { in intr_handler()
1511 if ((tx_status & HF) && np->mii.full_duplex == 0) in intr_handler()
1529 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1530 np->cur_tx->buffer, in intr_handler()
1531 np->cur_tx->skbuff->len, in intr_handler()
1533 dev_consume_skb_irq(np->cur_tx->skbuff); in intr_handler()
1534 np->cur_tx->skbuff = NULL; in intr_handler()
1535 --np->really_tx_count; in intr_handler()
1536 if (np->cur_tx->control & TXLD) { in intr_handler()
1537 np->cur_tx = np->cur_tx->next_desc_logical; in intr_handler()
1538 ++np->free_tx_count; in intr_handler()
1540 np->cur_tx = np->cur_tx->next_desc_logical; in intr_handler()
1541 np->cur_tx = np->cur_tx->next_desc_logical; in intr_handler()
1542 np->free_tx_count += 2; in intr_handler()
1547 if (num_tx && np->free_tx_count >= 2) in intr_handler()
1551 if (np->crvalue & CR_W_ENH) { in intr_handler()
1566 if (!np->reset_timer_armed) { in intr_handler()
1567 np->reset_timer_armed = 1; in intr_handler()
1568 np->reset_timer.expires = RUN_AT(HZ/2); in intr_handler()
1569 add_timer(&np->reset_timer); in intr_handler()
1574 np->crvalue_sv = np->crvalue; in intr_handler()
1575 np->imrvalue_sv = np->imrvalue; in intr_handler()
1576 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ in intr_handler()
1577 np->imrvalue = 0; in intr_handler()
1596 iowrite32(np->imrvalue, ioaddr + IMR); in intr_handler()
1598 spin_unlock(&np->lock); in intr_handler()
1608 struct netdev_private *np = netdev_priv(dev); in netdev_rx() local
1609 void __iomem *ioaddr = np->mem; in netdev_rx()
1612 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { in netdev_rx()
1613 s32 rx_status = np->cur_rx->status; in netdev_rx()
1615 if (np->really_rx_count == 0) in netdev_rx()
1644 cur = np->cur_rx; in netdev_rx()
1645 while (desno <= np->really_rx_count) { in netdev_rx()
1653 if (desno > np->really_rx_count) in netdev_rx()
1665 if (!np->cur_rx->skbuff) { in netdev_rx()
1670 np->cur_rx->status = RXOWN; in netdev_rx()
1671 np->cur_rx = np->cur_rx->next_desc_logical; in netdev_rx()
1675 stop_nic_rx(ioaddr, np->crvalue); in netdev_rx()
1677 iowrite32(np->crvalue, ioaddr + TCRRCR); in netdev_rx()
1698 dma_sync_single_for_cpu(&np->pci_dev->dev, in netdev_rx()
1699 np->cur_rx->buffer, in netdev_rx()
1700 np->rx_buf_sz, in netdev_rx()
1706 np->cur_rx->skbuff->data, pkt_len); in netdev_rx()
1709 skb_put_data(skb, np->cur_rx->skbuff->data, in netdev_rx()
1712 dma_sync_single_for_device(&np->pci_dev->dev, in netdev_rx()
1713 np->cur_rx->buffer, in netdev_rx()
1714 np->rx_buf_sz, in netdev_rx()
1717 dma_unmap_single(&np->pci_dev->dev, in netdev_rx()
1718 np->cur_rx->buffer, in netdev_rx()
1719 np->rx_buf_sz, in netdev_rx()
1721 skb_put(skb = np->cur_rx->skbuff, pkt_len); in netdev_rx()
1722 np->cur_rx->skbuff = NULL; in netdev_rx()
1723 --np->really_rx_count; in netdev_rx()
1731 np->cur_rx = np->cur_rx->next_desc_logical; in netdev_rx()
1743 struct netdev_private *np = netdev_priv(dev); in get_stats() local
1744 void __iomem *ioaddr = np->mem; in get_stats()
1772 struct netdev_private *np = netdev_priv(dev); in __set_rx_mode() local
1773 void __iomem *ioaddr = np->mem; in __set_rx_mode()
1797 stop_nic_rxtx(ioaddr, np->crvalue); in __set_rx_mode()
1801 np->crvalue &= ~CR_W_RXMODEMASK; in __set_rx_mode()
1802 np->crvalue |= rx_mode; in __set_rx_mode()
1803 iowrite32(np->crvalue, ioaddr + TCRRCR); in __set_rx_mode()
1808 struct netdev_private *np = netdev_priv(dev); in netdev_get_drvinfo() local
1811 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in netdev_get_drvinfo()
1817 struct netdev_private *np = netdev_priv(dev); in netdev_get_link_ksettings() local
1819 spin_lock_irq(&np->lock); in netdev_get_link_ksettings()
1820 mii_ethtool_get_link_ksettings(&np->mii, cmd); in netdev_get_link_ksettings()
1821 spin_unlock_irq(&np->lock); in netdev_get_link_ksettings()
1829 struct netdev_private *np = netdev_priv(dev); in netdev_set_link_ksettings() local
1832 spin_lock_irq(&np->lock); in netdev_set_link_ksettings()
1833 rc = mii_ethtool_set_link_ksettings(&np->mii, cmd); in netdev_set_link_ksettings()
1834 spin_unlock_irq(&np->lock); in netdev_set_link_ksettings()
1841 struct netdev_private *np = netdev_priv(dev); in netdev_nway_reset() local
1842 return mii_nway_restart(&np->mii); in netdev_nway_reset()
1847 struct netdev_private *np = netdev_priv(dev); in netdev_get_link() local
1848 return mii_link_ok(&np->mii); in netdev_get_link()
1873 struct netdev_private *np = netdev_priv(dev); in mii_ioctl() local
1879 spin_lock_irq(&np->lock); in mii_ioctl()
1880 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); in mii_ioctl()
1881 spin_unlock_irq(&np->lock); in mii_ioctl()
1889 struct netdev_private *np = netdev_priv(dev); in netdev_close() local
1890 void __iomem *ioaddr = np->mem; in netdev_close()
1901 del_timer_sync(&np->timer); in netdev_close()
1902 del_timer_sync(&np->reset_timer); in netdev_close()
1904 free_irq(np->pci_dev->irq, dev); in netdev_close()
1908 struct sk_buff *skb = np->rx_ring[i].skbuff; in netdev_close()
1910 np->rx_ring[i].status = 0; in netdev_close()
1912 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1913 np->rx_ring[i].buffer, np->rx_buf_sz, in netdev_close()
1916 np->rx_ring[i].skbuff = NULL; in netdev_close()
1921 struct sk_buff *skb = np->tx_ring[i].skbuff; in netdev_close()
1924 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1925 np->tx_ring[i].buffer, skb->len, in netdev_close()
1928 np->tx_ring[i].skbuff = NULL; in netdev_close()