• Home
  • Raw
  • Download

Lines Matching refs:ep

326 	struct epic_private *ep;  in epic_init_one()  local
356 dev = alloc_etherdev(sizeof (*ep)); in epic_init_one()
369 ep = netdev_priv(dev); in epic_init_one()
370 ep->ioaddr = ioaddr; in epic_init_one()
371 ep->mii.dev = dev; in epic_init_one()
372 ep->mii.mdio_read = mdio_read; in epic_init_one()
373 ep->mii.mdio_write = mdio_write; in epic_init_one()
374 ep->mii.phy_id_mask = 0x1f; in epic_init_one()
375 ep->mii.reg_num_mask = 0x1f; in epic_init_one()
381 ep->tx_ring = ring_space; in epic_init_one()
382 ep->tx_ring_dma = ring_dma; in epic_init_one()
388 ep->rx_ring = ring_space; in epic_init_one()
389 ep->rx_ring_dma = ring_dma; in epic_init_one()
401 spin_lock_init(&ep->lock); in epic_init_one()
402 spin_lock_init(&ep->napi_lock); in epic_init_one()
424 pr_cont(" %4.4x%s", read_eeprom(ep, i), in epic_init_one()
428 ep->pci_dev = pdev; in epic_init_one()
429 ep->chip_id = chip_idx; in epic_init_one()
430 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; in epic_init_one()
431 ep->irq_mask = in epic_init_one()
432 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) in epic_init_one()
440 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { in epic_init_one()
443 ep->phys[phy_idx++] = phy; in epic_init_one()
450 ep->mii_phy_cnt = phy_idx; in epic_init_one()
452 phy = ep->phys[0]; in epic_init_one()
453 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); in epic_init_one()
457 ep->mii.advertising, mdio_read(dev, phy, 5)); in epic_init_one()
458 } else if ( ! (ep->chip_flags & NO_MII)) { in epic_init_one()
462 ep->phys[0] = 3; in epic_init_one()
464 ep->mii.phy_id = ep->phys[0]; in epic_init_one()
468 if (ep->chip_flags & MII_PWRDWN) in epic_init_one()
474 ep->mii.force_media = ep->mii.full_duplex = 1; in epic_init_one()
477 dev->if_port = ep->default_port = option; in epic_init_one()
483 netif_napi_add(dev, &ep->napi, epic_poll, 64); in epic_init_one()
498 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, in epic_init_one()
499 ep->rx_ring_dma); in epic_init_one()
501 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, in epic_init_one()
502 ep->tx_ring_dma); in epic_init_one()
537 static void epic_disable_int(struct net_device *dev, struct epic_private *ep) in epic_disable_int() argument
539 void __iomem *ioaddr = ep->ioaddr; in epic_disable_int()
552 struct epic_private *ep) in epic_napi_irq_off() argument
554 void __iomem *ioaddr = ep->ioaddr; in epic_napi_irq_off()
556 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent); in epic_napi_irq_off()
561 struct epic_private *ep) in epic_napi_irq_on() argument
563 void __iomem *ioaddr = ep->ioaddr; in epic_napi_irq_on()
566 ew32(INTMASK, ep->irq_mask | EpicNapiEvent); in epic_napi_irq_on()
569 static int read_eeprom(struct epic_private *ep, int location) in read_eeprom() argument
571 void __iomem *ioaddr = ep->ioaddr; in read_eeprom()
607 struct epic_private *ep = netdev_priv(dev); in mdio_read() local
608 void __iomem *ioaddr = ep->ioaddr; in mdio_read()
631 struct epic_private *ep = netdev_priv(dev); in mdio_write() local
632 void __iomem *ioaddr = ep->ioaddr; in mdio_write()
647 struct epic_private *ep = netdev_priv(dev); in epic_open() local
648 void __iomem *ioaddr = ep->ioaddr; in epic_open()
649 const int irq = ep->pci_dev->irq; in epic_open()
655 napi_enable(&ep->napi); in epic_open()
658 napi_disable(&ep->napi); in epic_open()
677 if (ep->chip_flags & MII_PWRDWN) in epic_open()
696 ep->tx_threshold = TX_FIFO_THRESH; in epic_open()
697 ew32(TxThresh, ep->tx_threshold); in epic_open()
700 if (ep->mii_phy_cnt) in epic_open()
701 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); in epic_open()
705 mdio_read(dev, ep->phys[0], MII_BMSR)); in epic_open()
708 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); in epic_open()
711 ep->mii.full_duplex = 1; in epic_open()
713 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); in epic_open()
716 ep->mii.full_duplex ? "full" in epic_open()
718 ep->phys[0], mii_lpa); in epic_open()
722 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); in epic_open()
723 ew32(PRxCDAR, ep->rx_ring_dma); in epic_open()
724 ew32(PTxCDAR, ep->tx_ring_dma); in epic_open()
734 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | in epic_open()
740 ep->mii.full_duplex ? "full" : "half"); in epic_open()
745 timer_setup(&ep->timer, epic_timer, 0); in epic_open()
746 ep->timer.expires = jiffies + 3*HZ; in epic_open()
747 add_timer(&ep->timer); in epic_open()
757 struct epic_private *ep = netdev_priv(dev); in epic_pause() local
758 void __iomem *ioaddr = ep->ioaddr; in epic_pause()
780 struct epic_private *ep = netdev_priv(dev); in epic_restart() local
781 void __iomem *ioaddr = ep->ioaddr; in epic_restart()
788 ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); in epic_restart()
801 if (ep->chip_flags & MII_PWRDWN) in epic_restart()
807 ep->tx_threshold = TX_FIFO_THRESH; in epic_restart()
808 ew32(TxThresh, ep->tx_threshold); in epic_restart()
809 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); in epic_restart()
810 ew32(PRxCDAR, ep->rx_ring_dma + in epic_restart()
811 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc)); in epic_restart()
812 ew32(PTxCDAR, ep->tx_ring_dma + in epic_restart()
813 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); in epic_restart()
821 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | in epic_restart()
830 struct epic_private *ep = netdev_priv(dev); in check_media() local
831 void __iomem *ioaddr = ep->ioaddr; in check_media()
832 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; in check_media()
833 int negotiated = mii_lpa & ep->mii.advertising; in check_media()
836 if (ep->mii.force_media) in check_media()
840 if (ep->mii.full_duplex != duplex) { in check_media()
841 ep->mii.full_duplex = duplex; in check_media()
843 ep->mii.full_duplex ? "full" : "half", in check_media()
844 ep->phys[0], mii_lpa); in check_media()
845 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); in check_media()
851 struct epic_private *ep = from_timer(ep, t, timer); in epic_timer() local
852 struct net_device *dev = ep->mii.dev; in epic_timer()
853 void __iomem *ioaddr = ep->ioaddr; in epic_timer()
865 ep->timer.expires = jiffies + next_tick; in epic_timer()
866 add_timer(&ep->timer); in epic_timer()
871 struct epic_private *ep = netdev_priv(dev); in epic_tx_timeout() local
872 void __iomem *ioaddr = ep->ioaddr; in epic_tx_timeout()
879 ep->dirty_tx, ep->cur_tx); in epic_tx_timeout()
892 if (!ep->tx_full) in epic_tx_timeout()
899 struct epic_private *ep = netdev_priv(dev); in epic_init_ring() local
902 ep->tx_full = 0; in epic_init_ring()
903 ep->dirty_tx = ep->cur_tx = 0; in epic_init_ring()
904 ep->cur_rx = ep->dirty_rx = 0; in epic_init_ring()
905 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in epic_init_ring()
909 ep->rx_ring[i].rxstatus = 0; in epic_init_ring()
910 ep->rx_ring[i].buflength = ep->rx_buf_sz; in epic_init_ring()
911 ep->rx_ring[i].next = ep->rx_ring_dma + in epic_init_ring()
913 ep->rx_skbuff[i] = NULL; in epic_init_ring()
916 ep->rx_ring[i-1].next = ep->rx_ring_dma; in epic_init_ring()
920 struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); in epic_init_ring()
921 ep->rx_skbuff[i] = skb; in epic_init_ring()
925 ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_init_ring()
927 ep->rx_buf_sz, in epic_init_ring()
929 ep->rx_ring[i].rxstatus = DescOwn; in epic_init_ring()
931 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in epic_init_ring()
936 ep->tx_skbuff[i] = NULL; in epic_init_ring()
937 ep->tx_ring[i].txstatus = 0x0000; in epic_init_ring()
938 ep->tx_ring[i].next = ep->tx_ring_dma + in epic_init_ring()
941 ep->tx_ring[i-1].next = ep->tx_ring_dma; in epic_init_ring()
946 struct epic_private *ep = netdev_priv(dev); in epic_start_xmit() local
947 void __iomem *ioaddr = ep->ioaddr; in epic_start_xmit()
959 spin_lock_irqsave(&ep->lock, flags); in epic_start_xmit()
960 free_count = ep->cur_tx - ep->dirty_tx; in epic_start_xmit()
961 entry = ep->cur_tx % TX_RING_SIZE; in epic_start_xmit()
963 ep->tx_skbuff[entry] = skb; in epic_start_xmit()
964 ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_start_xmit()
976 ep->tx_full = 1; in epic_start_xmit()
978 ep->tx_ring[entry].buflength = ctrl_word | skb->len; in epic_start_xmit()
979 ep->tx_ring[entry].txstatus = in epic_start_xmit()
983 ep->cur_tx++; in epic_start_xmit()
984 if (ep->tx_full) in epic_start_xmit()
987 spin_unlock_irqrestore(&ep->lock, flags); in epic_start_xmit()
998 static void epic_tx_error(struct net_device *dev, struct epic_private *ep, in epic_tx_error() argument
1020 static void epic_tx(struct net_device *dev, struct epic_private *ep) in epic_tx() argument
1028 cur_tx = ep->cur_tx; in epic_tx()
1029 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { in epic_tx()
1032 int txstatus = ep->tx_ring[entry].txstatus; in epic_tx()
1040 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; in epic_tx()
1042 epic_tx_error(dev, ep, txstatus); in epic_tx()
1045 skb = ep->tx_skbuff[entry]; in epic_tx()
1046 dma_unmap_single(&ep->pci_dev->dev, in epic_tx()
1047 ep->tx_ring[entry].bufaddr, skb->len, in epic_tx()
1050 ep->tx_skbuff[entry] = NULL; in epic_tx()
1056 dirty_tx, cur_tx, ep->tx_full); in epic_tx()
1060 ep->dirty_tx = dirty_tx; in epic_tx()
1061 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { in epic_tx()
1063 ep->tx_full = 0; in epic_tx()
1073 struct epic_private *ep = netdev_priv(dev); in epic_interrupt() local
1074 void __iomem *ioaddr = ep->ioaddr; in epic_interrupt()
1093 spin_lock(&ep->napi_lock); in epic_interrupt()
1094 if (napi_schedule_prep(&ep->napi)) { in epic_interrupt()
1095 epic_napi_irq_off(dev, ep); in epic_interrupt()
1096 __napi_schedule(&ep->napi); in epic_interrupt()
1098 spin_unlock(&ep->napi_lock); in epic_interrupt()
1116 ew32(TxThresh, ep->tx_threshold += 128); in epic_interrupt()
1141 struct epic_private *ep = netdev_priv(dev); in epic_rx() local
1142 int entry = ep->cur_rx % RX_RING_SIZE; in epic_rx()
1143 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; in epic_rx()
1148 ep->rx_ring[entry].rxstatus); in epic_rx()
1154 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { in epic_rx()
1155 int status = ep->rx_ring[entry].rxstatus; in epic_rx()
1189 dma_sync_single_for_cpu(&ep->pci_dev->dev, in epic_rx()
1190 ep->rx_ring[entry].bufaddr, in epic_rx()
1191 ep->rx_buf_sz, in epic_rx()
1193 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); in epic_rx()
1195 dma_sync_single_for_device(&ep->pci_dev->dev, in epic_rx()
1196 ep->rx_ring[entry].bufaddr, in epic_rx()
1197 ep->rx_buf_sz, in epic_rx()
1200 dma_unmap_single(&ep->pci_dev->dev, in epic_rx()
1201 ep->rx_ring[entry].bufaddr, in epic_rx()
1202 ep->rx_buf_sz, in epic_rx()
1204 skb_put(skb = ep->rx_skbuff[entry], pkt_len); in epic_rx()
1205 ep->rx_skbuff[entry] = NULL; in epic_rx()
1213 entry = (++ep->cur_rx) % RX_RING_SIZE; in epic_rx()
1217 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { in epic_rx()
1218 entry = ep->dirty_rx % RX_RING_SIZE; in epic_rx()
1219 if (ep->rx_skbuff[entry] == NULL) { in epic_rx()
1221 skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); in epic_rx()
1225 ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_rx()
1227 ep->rx_buf_sz, in epic_rx()
1232 ep->rx_ring[entry].rxstatus = DescOwn; in epic_rx()
1237 static void epic_rx_err(struct net_device *dev, struct epic_private *ep) in epic_rx_err() argument
1239 void __iomem *ioaddr = ep->ioaddr; in epic_rx_err()
1254 struct epic_private *ep = container_of(napi, struct epic_private, napi); in epic_poll() local
1255 struct net_device *dev = ep->mii.dev; in epic_poll()
1256 void __iomem *ioaddr = ep->ioaddr; in epic_poll()
1259 epic_tx(dev, ep); in epic_poll()
1263 epic_rx_err(dev, ep); in epic_poll()
1268 spin_lock_irqsave(&ep->napi_lock, flags); in epic_poll()
1271 epic_napi_irq_on(dev, ep); in epic_poll()
1272 spin_unlock_irqrestore(&ep->napi_lock, flags); in epic_poll()
1280 struct epic_private *ep = netdev_priv(dev); in epic_close() local
1281 struct pci_dev *pdev = ep->pci_dev; in epic_close()
1282 void __iomem *ioaddr = ep->ioaddr; in epic_close()
1287 napi_disable(&ep->napi); in epic_close()
1293 del_timer_sync(&ep->timer); in epic_close()
1295 epic_disable_int(dev, ep); in epic_close()
1303 skb = ep->rx_skbuff[i]; in epic_close()
1304 ep->rx_skbuff[i] = NULL; in epic_close()
1305 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ in epic_close()
1306 ep->rx_ring[i].buflength = 0; in epic_close()
1308 dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr, in epic_close()
1309 ep->rx_buf_sz, DMA_FROM_DEVICE); in epic_close()
1312 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ in epic_close()
1315 skb = ep->tx_skbuff[i]; in epic_close()
1316 ep->tx_skbuff[i] = NULL; in epic_close()
1319 dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len, in epic_close()
1332 struct epic_private *ep = netdev_priv(dev); in epic_get_stats() local
1333 void __iomem *ioaddr = ep->ioaddr; in epic_get_stats()
1353 struct epic_private *ep = netdev_priv(dev); in set_rx_mode() local
1354 void __iomem *ioaddr = ep->ioaddr; in set_rx_mode()
1382 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { in set_rx_mode()
1385 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); in set_rx_mode()
1447 struct epic_private *ep = netdev_priv(dev); in ethtool_begin() local
1448 void __iomem *ioaddr = ep->ioaddr; in ethtool_begin()
1450 if (ep->ethtool_ops_nesting == U32_MAX) in ethtool_begin()
1453 if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) { in ethtool_begin()
1462 struct epic_private *ep = netdev_priv(dev); in ethtool_complete() local
1463 void __iomem *ioaddr = ep->ioaddr; in ethtool_complete()
1466 if (!--ep->ethtool_ops_nesting && !netif_running(dev)) { in ethtool_complete()
1514 struct epic_private *ep = netdev_priv(dev); in epic_remove_one() local
1516 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, in epic_remove_one()
1517 ep->tx_ring_dma); in epic_remove_one()
1518 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, in epic_remove_one()
1519 ep->rx_ring_dma); in epic_remove_one()
1521 pci_iounmap(pdev, ep->ioaddr); in epic_remove_one()
1531 struct epic_private *ep = netdev_priv(dev); in epic_suspend() local
1532 void __iomem *ioaddr = ep->ioaddr; in epic_suspend()