Lines Matching full:lp
448 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_netif_stop() local
451 napi_disable(&lp->napi); in pcnet32_netif_stop()
457 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_netif_start() local
462 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_netif_start()
464 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_netif_start()
465 napi_enable(&lp->napi); in pcnet32_netif_start()
473 * Must be called with lp->lock held.
476 struct pcnet32_private *lp, in pcnet32_realloc_tx_ring() argument
488 dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_realloc_tx_ring()
502 kfree(lp->tx_skbuff); in pcnet32_realloc_tx_ring()
503 kfree(lp->tx_dma_addr); in pcnet32_realloc_tx_ring()
504 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_tx_ring()
505 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, in pcnet32_realloc_tx_ring()
506 lp->tx_ring, lp->tx_ring_dma_addr); in pcnet32_realloc_tx_ring()
508 lp->tx_ring_size = entries; in pcnet32_realloc_tx_ring()
509 lp->tx_mod_mask = lp->tx_ring_size - 1; in pcnet32_realloc_tx_ring()
510 lp->tx_len_bits = (size << 12); in pcnet32_realloc_tx_ring()
511 lp->tx_ring = new_tx_ring; in pcnet32_realloc_tx_ring()
512 lp->tx_ring_dma_addr = new_ring_dma_addr; in pcnet32_realloc_tx_ring()
513 lp->tx_dma_addr = new_dma_addr_list; in pcnet32_realloc_tx_ring()
514 lp->tx_skbuff = new_skb_list; in pcnet32_realloc_tx_ring()
520 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_tx_ring()
533 * Must be called with lp->lock held.
536 struct pcnet32_private *lp, in pcnet32_realloc_rx_ring() argument
547 dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
562 overlap = min(entries, lp->rx_ring_size); in pcnet32_realloc_rx_ring()
564 new_rx_ring[new] = lp->rx_ring[new]; in pcnet32_realloc_rx_ring()
565 new_dma_addr_list[new] = lp->rx_dma_addr[new]; in pcnet32_realloc_rx_ring()
566 new_skb_list[new] = lp->rx_skbuff[new]; in pcnet32_realloc_rx_ring()
575 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", in pcnet32_realloc_rx_ring()
582 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, in pcnet32_realloc_rx_ring()
584 if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) { in pcnet32_realloc_rx_ring()
585 netif_err(lp, drv, dev, "%s dma mapping failed\n", in pcnet32_realloc_rx_ring()
595 for (; new < lp->rx_ring_size; new++) { in pcnet32_realloc_rx_ring()
596 if (lp->rx_skbuff[new]) { in pcnet32_realloc_rx_ring()
597 if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new])) in pcnet32_realloc_rx_ring()
598 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
599 lp->rx_dma_addr[new], in pcnet32_realloc_rx_ring()
602 dev_kfree_skb(lp->rx_skbuff[new]); in pcnet32_realloc_rx_ring()
606 kfree(lp->rx_skbuff); in pcnet32_realloc_rx_ring()
607 kfree(lp->rx_dma_addr); in pcnet32_realloc_rx_ring()
608 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
609 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, in pcnet32_realloc_rx_ring()
610 lp->rx_ring, lp->rx_ring_dma_addr); in pcnet32_realloc_rx_ring()
612 lp->rx_ring_size = entries; in pcnet32_realloc_rx_ring()
613 lp->rx_mod_mask = lp->rx_ring_size - 1; in pcnet32_realloc_rx_ring()
614 lp->rx_len_bits = (size << 4); in pcnet32_realloc_rx_ring()
615 lp->rx_ring = new_rx_ring; in pcnet32_realloc_rx_ring()
616 lp->rx_ring_dma_addr = new_ring_dma_addr; in pcnet32_realloc_rx_ring()
617 lp->rx_dma_addr = new_dma_addr_list; in pcnet32_realloc_rx_ring()
618 lp->rx_skbuff = new_skb_list; in pcnet32_realloc_rx_ring()
622 while (--new >= lp->rx_ring_size) { in pcnet32_realloc_rx_ring()
624 if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) in pcnet32_realloc_rx_ring()
625 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
636 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
643 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_purge_rx_ring() local
647 for (i = 0; i < lp->rx_ring_size; i++) { in pcnet32_purge_rx_ring()
648 lp->rx_ring[i].status = 0; /* CPU owns buffer */ in pcnet32_purge_rx_ring()
650 if (lp->rx_skbuff[i]) { in pcnet32_purge_rx_ring()
651 if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) in pcnet32_purge_rx_ring()
652 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_purge_rx_ring()
653 lp->rx_dma_addr[i], in pcnet32_purge_rx_ring()
656 dev_kfree_skb_any(lp->rx_skbuff[i]); in pcnet32_purge_rx_ring()
658 lp->rx_skbuff[i] = NULL; in pcnet32_purge_rx_ring()
659 lp->rx_dma_addr[i] = 0; in pcnet32_purge_rx_ring()
673 * lp->lock must be held.
679 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_suspend() local
680 const struct pcnet32_access *a = lp->a; in pcnet32_suspend()
685 if (lp->chip_version < PCNET32_79C970A) in pcnet32_suspend()
695 spin_unlock_irqrestore(&lp->lock, *flags); in pcnet32_suspend()
700 spin_lock_irqsave(&lp->lock, *flags); in pcnet32_suspend()
703 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_suspend()
711 static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr) in pcnet32_clr_suspend() argument
713 int csr5 = lp->a->read_csr(ioaddr, CSR5); in pcnet32_clr_suspend()
715 lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND); in pcnet32_clr_suspend()
721 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_link_ksettings() local
724 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_link_ksettings()
725 if (lp->mii) { in pcnet32_get_link_ksettings()
726 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); in pcnet32_get_link_ksettings()
727 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_get_link_ksettings()
728 if (lp->autoneg) { in pcnet32_get_link_ksettings()
730 if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0) in pcnet32_get_link_ksettings()
736 cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI; in pcnet32_get_link_ksettings()
738 cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF; in pcnet32_get_link_ksettings()
744 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_link_ksettings()
751 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_link_ksettings() local
757 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_link_ksettings()
758 if (lp->mii) { in pcnet32_set_link_ksettings()
759 r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); in pcnet32_set_link_ksettings()
760 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_set_link_ksettings()
763 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_set_link_ksettings()
765 lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE; in pcnet32_set_link_ksettings()
766 bcr2 = lp->a->read_bcr(ioaddr, 2); in pcnet32_set_link_ksettings()
768 lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002); in pcnet32_set_link_ksettings()
770 lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002); in pcnet32_set_link_ksettings()
772 lp->port_tp = cmd->base.port == PORT_TP; in pcnet32_set_link_ksettings()
773 csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180; in pcnet32_set_link_ksettings()
776 lp->a->write_csr(ioaddr, CSR15, csr15); in pcnet32_set_link_ksettings()
777 lp->init_block->mode = cpu_to_le16(csr15); in pcnet32_set_link_ksettings()
779 lp->fdx = cmd->base.duplex == DUPLEX_FULL; in pcnet32_set_link_ksettings()
780 bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003; in pcnet32_set_link_ksettings()
783 lp->a->write_bcr(ioaddr, 9, bcr9); in pcnet32_set_link_ksettings()
786 pcnet32_clr_suspend(lp, ioaddr); in pcnet32_set_link_ksettings()
791 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_link_ksettings()
798 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_drvinfo() local
801 if (lp->pci_dev) in pcnet32_get_drvinfo()
802 strlcpy(info->bus_info, pci_name(lp->pci_dev), in pcnet32_get_drvinfo()
811 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_link() local
815 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_link()
816 if (lp->mii) { in pcnet32_get_link()
817 r = mii_link_ok(&lp->mii_if); in pcnet32_get_link()
818 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_get_link()
821 if (!lp->autoneg && lp->port_tp) in pcnet32_get_link()
822 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_get_link()
825 } else if (lp->chip_version > PCNET32_79C970A) { in pcnet32_get_link()
827 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_get_link()
831 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_link()
838 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_msglevel() local
839 return lp->msg_enable; in pcnet32_get_msglevel()
844 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_msglevel() local
845 lp->msg_enable = value; in pcnet32_set_msglevel()
850 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_nway_reset() local
854 if (lp->mii) { in pcnet32_nway_reset()
855 spin_lock_irqsave(&lp->lock, flags); in pcnet32_nway_reset()
856 r = mii_nway_restart(&lp->mii_if); in pcnet32_nway_reset()
857 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_nway_reset()
865 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_ringparam() local
868 ering->tx_pending = lp->tx_ring_size; in pcnet32_get_ringparam()
870 ering->rx_pending = lp->rx_ring_size; in pcnet32_get_ringparam()
876 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_ringparam() local
888 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_ringparam()
889 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ in pcnet32_set_ringparam()
900 if ((1 << i) != lp->tx_ring_size) in pcnet32_set_ringparam()
901 pcnet32_realloc_tx_ring(dev, lp, i); in pcnet32_set_ringparam()
908 if ((1 << i) != lp->rx_ring_size) in pcnet32_set_ringparam()
909 pcnet32_realloc_rx_ring(dev, lp, i); in pcnet32_set_ringparam()
911 lp->napi.weight = lp->rx_ring_size / 2; in pcnet32_set_ringparam()
918 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_ringparam()
920 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", in pcnet32_set_ringparam()
921 lp->rx_ring_size, lp->tx_ring_size); in pcnet32_set_ringparam()
945 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_ethtool_test() local
951 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_ethtool_test()
955 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_ethtool_test()
958 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_ethtool_test()
964 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_loopback_test() local
965 const struct pcnet32_access *a = lp->a; /* access to registers */ in pcnet32_loopback_test()
984 spin_lock_irqsave(&lp->lock, flags); in pcnet32_loopback_test()
985 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ in pcnet32_loopback_test()
987 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); in pcnet32_loopback_test()
990 lp->a->reset(ioaddr); in pcnet32_loopback_test()
991 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ in pcnet32_loopback_test()
994 lp->a->write_bcr(ioaddr, 20, 2); in pcnet32_loopback_test()
999 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ in pcnet32_loopback_test()
1006 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_loopback_test()
1013 lp->tx_skbuff[x] = skb; in pcnet32_loopback_test()
1014 lp->tx_ring[x].length = cpu_to_le16(-skb->len); in pcnet32_loopback_test()
1015 lp->tx_ring[x].misc = 0; in pcnet32_loopback_test()
1031 lp->tx_dma_addr[x] = in pcnet32_loopback_test()
1032 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, in pcnet32_loopback_test()
1034 if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) { in pcnet32_loopback_test()
1035 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_loopback_test()
1040 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); in pcnet32_loopback_test()
1042 lp->tx_ring[x].status = cpu_to_le16(status); in pcnet32_loopback_test()
1050 lp->a->write_csr(ioaddr, CSR15, x | 0x0044); in pcnet32_loopback_test()
1053 lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ in pcnet32_loopback_test()
1059 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { in pcnet32_loopback_test()
1060 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_loopback_test()
1062 spin_lock_irqsave(&lp->lock, flags); in pcnet32_loopback_test()
1067 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); in pcnet32_loopback_test()
1072 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ in pcnet32_loopback_test()
1074 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { in pcnet32_loopback_test()
1079 skb = lp->rx_skbuff[x]; in pcnet32_loopback_test()
1089 skb = lp->rx_skbuff[x]; in pcnet32_loopback_test()
1090 packet = lp->tx_skbuff[x]->data; in pcnet32_loopback_test()
1093 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_loopback_test()
1118 lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ in pcnet32_loopback_test()
1120 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_loopback_test()
1128 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_phys_id() local
1129 const struct pcnet32_access *a = lp->a; in pcnet32_set_phys_id()
1137 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_phys_id()
1139 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); in pcnet32_set_phys_id()
1140 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_phys_id()
1146 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_phys_id()
1149 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_phys_id()
1154 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_phys_id()
1156 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); in pcnet32_set_phys_id()
1157 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_phys_id()
1167 struct pcnet32_private *lp, in pcnet32_rx_entry() argument
1200 netif_err(lp, drv, dev, "Impossible packet size %d!\n", in pcnet32_rx_entry()
1206 netif_err(lp, rx_err, dev, "Runt packet!\n"); in pcnet32_rx_entry()
1222 new_dma_addr = dma_map_single(&lp->pci_dev->dev, in pcnet32_rx_entry()
1226 if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) { in pcnet32_rx_entry()
1227 netif_err(lp, rx_err, dev, in pcnet32_rx_entry()
1232 skb = lp->rx_skbuff[entry]; in pcnet32_rx_entry()
1233 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_rx_entry()
1234 lp->rx_dma_addr[entry], in pcnet32_rx_entry()
1238 lp->rx_skbuff[entry] = newskb; in pcnet32_rx_entry()
1239 lp->rx_dma_addr[entry] = new_dma_addr; in pcnet32_rx_entry()
1255 dma_sync_single_for_cpu(&lp->pci_dev->dev, in pcnet32_rx_entry()
1256 lp->rx_dma_addr[entry], pkt_len, in pcnet32_rx_entry()
1259 (unsigned char *)(lp->rx_skbuff[entry]->data), in pcnet32_rx_entry()
1261 dma_sync_single_for_device(&lp->pci_dev->dev, in pcnet32_rx_entry()
1262 lp->rx_dma_addr[entry], pkt_len, in pcnet32_rx_entry()
1273 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_rx() local
1274 int entry = lp->cur_rx & lp->rx_mod_mask; in pcnet32_rx()
1275 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; in pcnet32_rx()
1280 pcnet32_rx_entry(dev, lp, rxp, entry); in pcnet32_rx()
1289 entry = (++lp->cur_rx) & lp->rx_mod_mask; in pcnet32_rx()
1290 rxp = &lp->rx_ring[entry]; in pcnet32_rx()
1298 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_tx() local
1299 unsigned int dirty_tx = lp->dirty_tx; in pcnet32_tx()
1303 while (dirty_tx != lp->cur_tx) { in pcnet32_tx()
1304 int entry = dirty_tx & lp->tx_mod_mask; in pcnet32_tx()
1305 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); in pcnet32_tx()
1310 lp->tx_ring[entry].base = 0; in pcnet32_tx()
1314 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); in pcnet32_tx()
1316 netif_err(lp, tx_err, dev, in pcnet32_tx()
1330 netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); in pcnet32_tx()
1336 if (!lp->dxsuflo) { /* If controller doesn't recover ... */ in pcnet32_tx()
1339 netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); in pcnet32_tx()
1351 if (lp->tx_skbuff[entry]) { in pcnet32_tx()
1352 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_tx()
1353 lp->tx_dma_addr[entry], in pcnet32_tx()
1354 lp->tx_skbuff[entry]->len, in pcnet32_tx()
1356 dev_kfree_skb_any(lp->tx_skbuff[entry]); in pcnet32_tx()
1357 lp->tx_skbuff[entry] = NULL; in pcnet32_tx()
1358 lp->tx_dma_addr[entry] = 0; in pcnet32_tx()
1363 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); in pcnet32_tx()
1364 if (delta > lp->tx_ring_size) { in pcnet32_tx()
1365 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", in pcnet32_tx()
1366 dirty_tx, lp->cur_tx, lp->tx_full); in pcnet32_tx()
1367 dirty_tx += lp->tx_ring_size; in pcnet32_tx()
1368 delta -= lp->tx_ring_size; in pcnet32_tx()
1371 if (lp->tx_full && in pcnet32_tx()
1373 delta < lp->tx_ring_size - 2) { in pcnet32_tx()
1375 lp->tx_full = 0; in pcnet32_tx()
1378 lp->dirty_tx = dirty_tx; in pcnet32_tx()
1385 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); in pcnet32_poll() local
1386 struct net_device *dev = lp->dev; in pcnet32_poll()
1394 spin_lock_irqsave(&lp->lock, flags); in pcnet32_poll()
1397 lp->a->reset(ioaddr); in pcnet32_poll()
1398 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ in pcnet32_poll()
1405 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_poll()
1407 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_poll()
1410 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); in pcnet32_poll()
1413 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_poll()
1421 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_regs_len() local
1422 int j = lp->phycount * PCNET32_REGS_PER_PHY; in pcnet32_get_regs_len()
1432 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_regs() local
1433 const struct pcnet32_access *a = lp->a; in pcnet32_get_regs()
1437 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_regs()
1464 if (lp->mii) { in pcnet32_get_regs()
1467 if (lp->phymask & (1 << j)) { in pcnet32_get_regs()
1469 lp->a->write_bcr(ioaddr, 33, in pcnet32_get_regs()
1471 *buff++ = lp->a->read_bcr(ioaddr, 34); in pcnet32_get_regs()
1478 pcnet32_clr_suspend(lp, ioaddr); in pcnet32_get_regs()
1480 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_regs()
1590 struct pcnet32_private *lp; in pcnet32_probe1() local
1740 dev = alloc_etherdev(sizeof(*lp)); in pcnet32_probe1()
1828 lp = netdev_priv(dev); in pcnet32_probe1()
1830 lp->init_block = dma_alloc_coherent(&pdev->dev, in pcnet32_probe1()
1831 sizeof(*lp->init_block), in pcnet32_probe1()
1832 &lp->init_dma_addr, GFP_KERNEL); in pcnet32_probe1()
1833 if (!lp->init_block) { in pcnet32_probe1()
1839 lp->pci_dev = pdev; in pcnet32_probe1()
1841 lp->dev = dev; in pcnet32_probe1()
1843 spin_lock_init(&lp->lock); in pcnet32_probe1()
1845 lp->name = chipname; in pcnet32_probe1()
1846 lp->shared_irq = shared; in pcnet32_probe1()
1847 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ in pcnet32_probe1()
1848 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ in pcnet32_probe1()
1849 lp->tx_mod_mask = lp->tx_ring_size - 1; in pcnet32_probe1()
1850 lp->rx_mod_mask = lp->rx_ring_size - 1; in pcnet32_probe1()
1851 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); in pcnet32_probe1()
1852 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); in pcnet32_probe1()
1853 lp->mii_if.full_duplex = fdx; in pcnet32_probe1()
1854 lp->mii_if.phy_id_mask = 0x1f; in pcnet32_probe1()
1855 lp->mii_if.reg_num_mask = 0x1f; in pcnet32_probe1()
1856 lp->dxsuflo = dxsuflo; in pcnet32_probe1()
1857 lp->mii = mii; in pcnet32_probe1()
1858 lp->chip_version = chip_version; in pcnet32_probe1()
1859 lp->msg_enable = pcnet32_debug; in pcnet32_probe1()
1862 lp->options = PCNET32_PORT_ASEL; in pcnet32_probe1()
1864 lp->options = options_mapping[options[cards_found]]; in pcnet32_probe1()
1866 if (lp->chip_version == PCNET32_79C970A) in pcnet32_probe1()
1867 lp->options = PCNET32_PORT_10BT; in pcnet32_probe1()
1868 lp->mii_if.dev = dev; in pcnet32_probe1()
1869 lp->mii_if.mdio_read = mdio_read; in pcnet32_probe1()
1870 lp->mii_if.mdio_write = mdio_write; in pcnet32_probe1()
1873 lp->napi.weight = lp->rx_ring_size / 2; in pcnet32_probe1()
1875 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); in pcnet32_probe1()
1877 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && in pcnet32_probe1()
1879 lp->options |= PCNET32_PORT_FD; in pcnet32_probe1()
1881 lp->a = a; in pcnet32_probe1()
1884 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { in pcnet32_probe1()
1891 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; in pcnet32_probe1()
1893 lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ in pcnet32_probe1()
1894 lp->init_block->tlen_rlen = in pcnet32_probe1()
1895 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); in pcnet32_probe1()
1897 lp->init_block->phys_addr[i] = dev->dev_addr[i]; in pcnet32_probe1()
1898 lp->init_block->filter[0] = 0x00000000; in pcnet32_probe1()
1899 lp->init_block->filter[1] = 0x00000000; in pcnet32_probe1()
1900 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); in pcnet32_probe1()
1901 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); in pcnet32_probe1()
1906 a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); in pcnet32_probe1()
1907 a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); in pcnet32_probe1()
1937 if (lp->mii) { in pcnet32_probe1()
1938 /* lp->phycount and lp->phymask are set to 0 by memset above */ in pcnet32_probe1()
1940 lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; in pcnet32_probe1()
1953 lp->phycount++; in pcnet32_probe1()
1954 lp->phymask |= (1 << i); in pcnet32_probe1()
1955 lp->mii_if.phy_id = i; in pcnet32_probe1()
1960 lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); in pcnet32_probe1()
1961 if (lp->phycount > 1) in pcnet32_probe1()
1962 lp->options |= PCNET32_PORT_MII; in pcnet32_probe1()
1965 timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0); in pcnet32_probe1()
1979 lp->next = pcnet32_dev; in pcnet32_probe1()
1984 pr_info("%s: registered as %s\n", dev->name, lp->name); in pcnet32_probe1()
1994 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), in pcnet32_probe1()
1995 lp->init_block, lp->init_dma_addr); in pcnet32_probe1()
2006 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_alloc_ring() local
2008 lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_alloc_ring()
2009 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, in pcnet32_alloc_ring()
2010 &lp->tx_ring_dma_addr, GFP_KERNEL); in pcnet32_alloc_ring()
2011 if (lp->tx_ring == NULL) { in pcnet32_alloc_ring()
2012 netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); in pcnet32_alloc_ring()
2016 lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_alloc_ring()
2017 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, in pcnet32_alloc_ring()
2018 &lp->rx_ring_dma_addr, GFP_KERNEL); in pcnet32_alloc_ring()
2019 if (lp->rx_ring == NULL) { in pcnet32_alloc_ring()
2020 netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); in pcnet32_alloc_ring()
2024 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), in pcnet32_alloc_ring()
2026 if (!lp->tx_dma_addr) in pcnet32_alloc_ring()
2029 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), in pcnet32_alloc_ring()
2031 if (!lp->rx_dma_addr) in pcnet32_alloc_ring()
2034 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), in pcnet32_alloc_ring()
2036 if (!lp->tx_skbuff) in pcnet32_alloc_ring()
2039 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), in pcnet32_alloc_ring()
2041 if (!lp->rx_skbuff) in pcnet32_alloc_ring()
2049 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_free_ring() local
2051 kfree(lp->tx_skbuff); in pcnet32_free_ring()
2052 lp->tx_skbuff = NULL; in pcnet32_free_ring()
2054 kfree(lp->rx_skbuff); in pcnet32_free_ring()
2055 lp->rx_skbuff = NULL; in pcnet32_free_ring()
2057 kfree(lp->tx_dma_addr); in pcnet32_free_ring()
2058 lp->tx_dma_addr = NULL; in pcnet32_free_ring()
2060 kfree(lp->rx_dma_addr); in pcnet32_free_ring()
2061 lp->rx_dma_addr = NULL; in pcnet32_free_ring()
2063 if (lp->tx_ring) { in pcnet32_free_ring()
2064 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_free_ring()
2065 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, in pcnet32_free_ring()
2066 lp->tx_ring, lp->tx_ring_dma_addr); in pcnet32_free_ring()
2067 lp->tx_ring = NULL; in pcnet32_free_ring()
2070 if (lp->rx_ring) { in pcnet32_free_ring()
2071 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_free_ring()
2072 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, in pcnet32_free_ring()
2073 lp->rx_ring, lp->rx_ring_dma_addr); in pcnet32_free_ring()
2074 lp->rx_ring = NULL; in pcnet32_free_ring()
2080 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_open() local
2081 struct pci_dev *pdev = lp->pci_dev; in pcnet32_open()
2089 lp->shared_irq ? IRQF_SHARED : 0, dev->name, in pcnet32_open()
2094 spin_lock_irqsave(&lp->lock, flags); in pcnet32_open()
2102 lp->a->reset(ioaddr); in pcnet32_open()
2105 lp->a->write_bcr(ioaddr, 20, 2); in pcnet32_open()
2107 netif_printk(lp, ifup, KERN_DEBUG, dev, in pcnet32_open()
2109 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), in pcnet32_open()
2110 (u32) (lp->rx_ring_dma_addr), in pcnet32_open()
2111 (u32) (lp->init_dma_addr)); in pcnet32_open()
2113 lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL); in pcnet32_open()
2114 lp->port_tp = !!(lp->options & PCNET32_PORT_10BT); in pcnet32_open()
2115 lp->fdx = !!(lp->options & PCNET32_PORT_FD); in pcnet32_open()
2118 val = lp->a->read_bcr(ioaddr, 2) & ~2; in pcnet32_open()
2119 if (lp->options & PCNET32_PORT_ASEL) in pcnet32_open()
2121 lp->a->write_bcr(ioaddr, 2, val); in pcnet32_open()
2124 if (lp->mii_if.full_duplex) { in pcnet32_open()
2125 val = lp->a->read_bcr(ioaddr, 9) & ~3; in pcnet32_open()
2126 if (lp->options & PCNET32_PORT_FD) { in pcnet32_open()
2128 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) in pcnet32_open()
2130 } else if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2132 if (lp->chip_version == 0x2627) in pcnet32_open()
2135 lp->a->write_bcr(ioaddr, 9, val); in pcnet32_open()
2139 val = lp->a->read_csr(ioaddr, 124) & ~0x10; in pcnet32_open()
2140 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) in pcnet32_open()
2142 lp->a->write_csr(ioaddr, 124, val); in pcnet32_open()
2148 if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2149 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; in pcnet32_open()
2150 netif_printk(lp, link, KERN_DEBUG, dev, in pcnet32_open()
2154 if (lp->phycount < 2) { in pcnet32_open()
2160 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { in pcnet32_open()
2161 lp->a->write_bcr(ioaddr, 32, in pcnet32_open()
2162 lp->a->read_bcr(ioaddr, 32) | 0x0080); in pcnet32_open()
2164 val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; in pcnet32_open()
2165 if (lp->options & PCNET32_PORT_FD) in pcnet32_open()
2167 if (lp->options & PCNET32_PORT_100) in pcnet32_open()
2169 lp->a->write_bcr(ioaddr, 32, val); in pcnet32_open()
2171 if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2172 lp->a->write_bcr(ioaddr, 32, in pcnet32_open()
2173 lp->a->read_bcr(ioaddr, in pcnet32_open()
2176 val = lp->a->read_bcr(ioaddr, 32) & ~0x98; in pcnet32_open()
2178 lp->a->write_bcr(ioaddr, 32, val); in pcnet32_open()
2191 val = lp->a->read_bcr(ioaddr, 2); in pcnet32_open()
2192 lp->a->write_bcr(ioaddr, 2, val & ~2); in pcnet32_open()
2193 val = lp->a->read_bcr(ioaddr, 32); in pcnet32_open()
2194 lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ in pcnet32_open()
2196 if (!(lp->options & PCNET32_PORT_ASEL)) { in pcnet32_open()
2202 (lp->options & PCNET32_PORT_100) ? in pcnet32_open()
2204 bcr9 = lp->a->read_bcr(ioaddr, 9); in pcnet32_open()
2206 if (lp->options & PCNET32_PORT_FD) { in pcnet32_open()
2213 lp->a->write_bcr(ioaddr, 9, bcr9); in pcnet32_open()
2217 if (lp->phymask & (1 << i)) { in pcnet32_open()
2229 lp->mii_if.phy_id = i; in pcnet32_open()
2231 if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2232 mii_ethtool_gset(&lp->mii_if, &ecmd); in pcnet32_open()
2235 mii_ethtool_sset(&lp->mii_if, &ecmd); in pcnet32_open()
2238 lp->mii_if.phy_id = first_phy; in pcnet32_open()
2239 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); in pcnet32_open()
2243 if (lp->dxsuflo) { /* Disable transmit stop on underflow */ in pcnet32_open()
2244 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_open()
2246 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_open()
2250 lp->init_block->mode = in pcnet32_open()
2251 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); in pcnet32_open()
2259 napi_enable(&lp->napi); in pcnet32_open()
2262 lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); in pcnet32_open()
2263 lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); in pcnet32_open()
2265 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ in pcnet32_open()
2266 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); in pcnet32_open()
2270 if (lp->chip_version >= PCNET32_79C970A) { in pcnet32_open()
2273 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); in pcnet32_open()
2278 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) in pcnet32_open()
2284 lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); in pcnet32_open()
2286 netif_printk(lp, ifup, KERN_DEBUG, dev, in pcnet32_open()
2289 (u32) (lp->init_dma_addr), in pcnet32_open()
2290 lp->a->read_csr(ioaddr, CSR0)); in pcnet32_open()
2292 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_open()
2304 lp->a->write_bcr(ioaddr, 20, 4); in pcnet32_open()
2307 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_open()
2327 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_purge_tx_ring() local
2330 for (i = 0; i < lp->tx_ring_size; i++) { in pcnet32_purge_tx_ring()
2331 lp->tx_ring[i].status = 0; /* CPU owns buffer */ in pcnet32_purge_tx_ring()
2333 if (lp->tx_skbuff[i]) { in pcnet32_purge_tx_ring()
2334 if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i])) in pcnet32_purge_tx_ring()
2335 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_purge_tx_ring()
2336 lp->tx_dma_addr[i], in pcnet32_purge_tx_ring()
2337 lp->tx_skbuff[i]->len, in pcnet32_purge_tx_ring()
2339 dev_kfree_skb_any(lp->tx_skbuff[i]); in pcnet32_purge_tx_ring()
2341 lp->tx_skbuff[i] = NULL; in pcnet32_purge_tx_ring()
2342 lp->tx_dma_addr[i] = 0; in pcnet32_purge_tx_ring()
2349 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_init_ring() local
2352 lp->tx_full = 0; in pcnet32_init_ring()
2353 lp->cur_rx = lp->cur_tx = 0; in pcnet32_init_ring()
2354 lp->dirty_rx = lp->dirty_tx = 0; in pcnet32_init_ring()
2356 for (i = 0; i < lp->rx_ring_size; i++) { in pcnet32_init_ring()
2357 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; in pcnet32_init_ring()
2359 lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB); in pcnet32_init_ring()
2360 rx_skbuff = lp->rx_skbuff[i]; in pcnet32_init_ring()
2363 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", in pcnet32_init_ring()
2371 if (lp->rx_dma_addr[i] == 0) { in pcnet32_init_ring()
2372 lp->rx_dma_addr[i] = in pcnet32_init_ring()
2373 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, in pcnet32_init_ring()
2375 if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) { in pcnet32_init_ring()
2377 netif_err(lp, drv, dev, in pcnet32_init_ring()
2383 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); in pcnet32_init_ring()
2384 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); in pcnet32_init_ring()
2386 lp->rx_ring[i].status = cpu_to_le16(0x8000); in pcnet32_init_ring()
2390 for (i = 0; i < lp->tx_ring_size; i++) { in pcnet32_init_ring()
2391 lp->tx_ring[i].status = 0; /* CPU owns buffer */ in pcnet32_init_ring()
2393 lp->tx_ring[i].base = 0; in pcnet32_init_ring()
2394 lp->tx_dma_addr[i] = 0; in pcnet32_init_ring()
2397 lp->init_block->tlen_rlen = in pcnet32_init_ring()
2398 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); in pcnet32_init_ring()
2400 lp->init_block->phys_addr[i] = dev->dev_addr[i]; in pcnet32_init_ring()
2401 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); in pcnet32_init_ring()
2402 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); in pcnet32_init_ring()
2413 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_restart() local
2419 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) in pcnet32_restart()
2423 netif_err(lp, drv, dev, "%s timed out waiting for stop\n", in pcnet32_restart()
2431 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); in pcnet32_restart()
2434 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) in pcnet32_restart()
2437 lp->a->write_csr(ioaddr, CSR0, csr0_bits); in pcnet32_restart()
2442 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_tx_timeout() local
2445 spin_lock_irqsave(&lp->lock, flags); in pcnet32_tx_timeout()
2449 dev->name, lp->a->read_csr(ioaddr, CSR0)); in pcnet32_tx_timeout()
2450 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_tx_timeout()
2452 if (netif_msg_tx_err(lp)) { in pcnet32_tx_timeout()
2456 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", in pcnet32_tx_timeout()
2457 lp->cur_rx); in pcnet32_tx_timeout()
2458 for (i = 0; i < lp->rx_ring_size; i++) in pcnet32_tx_timeout()
2460 le32_to_cpu(lp->rx_ring[i].base), in pcnet32_tx_timeout()
2461 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & in pcnet32_tx_timeout()
2462 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), in pcnet32_tx_timeout()
2463 le16_to_cpu(lp->rx_ring[i].status)); in pcnet32_tx_timeout()
2464 for (i = 0; i < lp->tx_ring_size; i++) in pcnet32_tx_timeout()
2466 le32_to_cpu(lp->tx_ring[i].base), in pcnet32_tx_timeout()
2467 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, in pcnet32_tx_timeout()
2468 le32_to_cpu(lp->tx_ring[i].misc), in pcnet32_tx_timeout()
2469 le16_to_cpu(lp->tx_ring[i].status)); in pcnet32_tx_timeout()
2477 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_tx_timeout()
2483 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_start_xmit() local
2489 spin_lock_irqsave(&lp->lock, flags); in pcnet32_start_xmit()
2491 netif_printk(lp, tx_queued, KERN_DEBUG, dev, in pcnet32_start_xmit()
2493 __func__, lp->a->read_csr(ioaddr, CSR0)); in pcnet32_start_xmit()
2503 entry = lp->cur_tx & lp->tx_mod_mask; in pcnet32_start_xmit()
2508 lp->tx_ring[entry].length = cpu_to_le16(-skb->len); in pcnet32_start_xmit()
2510 lp->tx_ring[entry].misc = 0x00000000; in pcnet32_start_xmit()
2512 lp->tx_dma_addr[entry] = in pcnet32_start_xmit()
2513 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, in pcnet32_start_xmit()
2515 if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) { in pcnet32_start_xmit()
2520 lp->tx_skbuff[entry] = skb; in pcnet32_start_xmit()
2521 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); in pcnet32_start_xmit()
2523 lp->tx_ring[entry].status = cpu_to_le16(status); in pcnet32_start_xmit()
2525 lp->cur_tx++; in pcnet32_start_xmit()
2529 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); in pcnet32_start_xmit()
2531 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { in pcnet32_start_xmit()
2532 lp->tx_full = 1; in pcnet32_start_xmit()
2536 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_start_xmit()
2545 struct pcnet32_private *lp; in pcnet32_interrupt() local
2551 lp = netdev_priv(dev); in pcnet32_interrupt()
2553 spin_lock(&lp->lock); in pcnet32_interrupt()
2555 csr0 = lp->a->read_csr(ioaddr, CSR0); in pcnet32_interrupt()
2560 lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); in pcnet32_interrupt()
2562 netif_printk(lp, intr, KERN_DEBUG, dev, in pcnet32_interrupt()
2564 csr0, lp->a->read_csr(ioaddr, CSR0)); in pcnet32_interrupt()
2584 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", in pcnet32_interrupt()
2588 if (napi_schedule_prep(&lp->napi)) { in pcnet32_interrupt()
2591 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_interrupt()
2593 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_interrupt()
2595 __napi_schedule(&lp->napi); in pcnet32_interrupt()
2598 csr0 = lp->a->read_csr(ioaddr, CSR0); in pcnet32_interrupt()
2601 netif_printk(lp, intr, KERN_DEBUG, dev, in pcnet32_interrupt()
2603 lp->a->read_csr(ioaddr, CSR0)); in pcnet32_interrupt()
2605 spin_unlock(&lp->lock); in pcnet32_interrupt()
2613 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_close() local
2616 del_timer_sync(&lp->watchdog_timer); in pcnet32_close()
2619 napi_disable(&lp->napi); in pcnet32_close()
2621 spin_lock_irqsave(&lp->lock, flags); in pcnet32_close()
2623 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); in pcnet32_close()
2625 netif_printk(lp, ifdown, KERN_DEBUG, dev, in pcnet32_close()
2627 lp->a->read_csr(ioaddr, CSR0)); in pcnet32_close()
2630 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_close()
2636 lp->a->write_bcr(ioaddr, 20, 4); in pcnet32_close()
2638 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_close()
2642 spin_lock_irqsave(&lp->lock, flags); in pcnet32_close()
2647 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_close()
2654 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_stats() local
2658 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_stats()
2659 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); in pcnet32_get_stats()
2660 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_stats()
2668 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_load_multicast() local
2669 volatile struct pcnet32_init_block *ib = lp->init_block; in pcnet32_load_multicast()
2680 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); in pcnet32_load_multicast()
2681 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); in pcnet32_load_multicast()
2682 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); in pcnet32_load_multicast()
2683 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); in pcnet32_load_multicast()
2697 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, in pcnet32_load_multicast()
2707 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_multicast_list() local
2710 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_multicast_list()
2712 csr15 = lp->a->read_csr(ioaddr, CSR15); in pcnet32_set_multicast_list()
2715 netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); in pcnet32_set_multicast_list()
2716 lp->init_block->mode = in pcnet32_set_multicast_list()
2717 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << in pcnet32_set_multicast_list()
2719 lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); in pcnet32_set_multicast_list()
2721 lp->init_block->mode = in pcnet32_set_multicast_list()
2722 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); in pcnet32_set_multicast_list()
2723 lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); in pcnet32_set_multicast_list()
2728 pcnet32_clr_suspend(lp, ioaddr); in pcnet32_set_multicast_list()
2730 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_set_multicast_list()
2735 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_multicast_list()
2738 /* This routine assumes that the lp->lock is held */
2741 struct pcnet32_private *lp = netdev_priv(dev); in mdio_read() local
2745 if (!lp->mii) in mdio_read()
2748 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); in mdio_read()
2749 val_out = lp->a->read_bcr(ioaddr, 34); in mdio_read()
2754 /* This routine assumes that the lp->lock is held */
2757 struct pcnet32_private *lp = netdev_priv(dev); in mdio_write() local
2760 if (!lp->mii) in mdio_write()
2763 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); in mdio_write()
2764 lp->a->write_bcr(ioaddr, 34, val); in mdio_write()
2769 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_ioctl() local
2774 if (lp->mii) { in pcnet32_ioctl()
2775 spin_lock_irqsave(&lp->lock, flags); in pcnet32_ioctl()
2776 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); in pcnet32_ioctl()
2777 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_ioctl()
2787 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_check_otherphy() local
2788 struct mii_if_info mii = lp->mii_if; in pcnet32_check_otherphy()
2793 if (i == lp->mii_if.phy_id) in pcnet32_check_otherphy()
2795 if (lp->phymask & (1 << i)) { in pcnet32_check_otherphy()
2799 netif_info(lp, link, dev, "Using PHY number %d\n", in pcnet32_check_otherphy()
2804 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); in pcnet32_check_otherphy()
2805 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, in pcnet32_check_otherphy()
2814 lp->mii_if.phy_id = i; in pcnet32_check_otherphy()
2827 * Caller is assumed to hold and release the lp->lock.
2832 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_check_media() local
2837 if (lp->mii) { in pcnet32_check_media()
2838 curr_link = mii_link_ok(&lp->mii_if); in pcnet32_check_media()
2839 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_check_media()
2842 if (!lp->autoneg && lp->port_tp) in pcnet32_check_media()
2843 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_check_media()
2848 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_check_media()
2853 netif_info(lp, link, dev, "link down\n"); in pcnet32_check_media()
2855 if (lp->phycount > 1) { in pcnet32_check_media()
2861 if (lp->mii) { in pcnet32_check_media()
2862 if (netif_msg_link(lp)) { in pcnet32_check_media()
2865 mii_ethtool_gset(&lp->mii_if, &ecmd); in pcnet32_check_media()
2871 bcr9 = lp->a->read_bcr(dev->base_addr, 9); in pcnet32_check_media()
2872 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { in pcnet32_check_media()
2873 if (lp->mii_if.full_duplex) in pcnet32_check_media()
2877 lp->a->write_bcr(dev->base_addr, 9, bcr9); in pcnet32_check_media()
2880 netif_info(lp, link, dev, "link up\n"); in pcnet32_check_media()
2892 struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer); in pcnet32_watchdog() local
2893 struct net_device *dev = lp->dev; in pcnet32_watchdog()
2897 spin_lock_irqsave(&lp->lock, flags); in pcnet32_watchdog()
2899 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_watchdog()
2901 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); in pcnet32_watchdog()
2933 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_remove_one() local
2938 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), in pcnet32_remove_one()
2939 lp->init_block, lp->init_dma_addr); in pcnet32_remove_one()
3016 struct pcnet32_private *lp = netdev_priv(pcnet32_dev); in pcnet32_cleanup_module() local
3017 next_dev = lp->next; in pcnet32_cleanup_module()
3021 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), in pcnet32_cleanup_module()
3022 lp->init_block, lp->init_dma_addr); in pcnet32_cleanup_module()