• Home
  • Raw
  • Download

Lines Matching refs:vptr

373 static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
376 static void velocity_print_info(struct velocity_info *vptr);
387 static void velocity_free_rd_ring(struct velocity_info *vptr);
388 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
389 static int velocity_soft_reset(struct velocity_info *vptr);
390 static void mii_init(struct velocity_info *vptr, u32 mii_status);
392 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
393 static void velocity_print_link_status(struct velocity_info *vptr);
395 static void velocity_shutdown(struct velocity_info *vptr);
396 static void enable_flow_control_ability(struct velocity_info *vptr);
402 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
489 struct velocity_info *vptr = netdev_priv(dev); in velocity_remove1() local
496 list_del(&vptr->list); in velocity_remove1()
500 iounmap(vptr->mac_regs); in velocity_remove1()
605 static void velocity_init_cam_filter(struct velocity_info *vptr) in velocity_init_cam_filter() argument
607 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_init_cam_filter()
614 memset(vptr->vCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
615 memset(vptr->mCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
616 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
617 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_init_cam_filter()
620 if (vptr->vlgrp) { in velocity_init_cam_filter()
623 if (!vlan_group_get_device(vptr->vlgrp, 0)) in velocity_init_cam_filter()
627 if (vlan_group_get_device(vptr->vlgrp, vid)) { in velocity_init_cam_filter()
629 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); in velocity_init_cam_filter()
634 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
641 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_register() local
643 vptr->vlgrp = grp; in velocity_vlan_rx_register()
648 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_add_vid() local
650 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
651 velocity_init_cam_filter(vptr); in velocity_vlan_rx_add_vid()
652 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
657 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_kill_vid() local
659 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
660 vlan_group_set_device(vptr->vlgrp, vid, NULL); in velocity_vlan_rx_kill_vid()
661 velocity_init_cam_filter(vptr); in velocity_vlan_rx_kill_vid()
662 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
665 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) in velocity_init_rx_ring_indexes() argument
667 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; in velocity_init_rx_ring_indexes()
678 static void velocity_rx_reset(struct velocity_info *vptr) in velocity_rx_reset() argument
681 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_rx_reset()
684 velocity_init_rx_ring_indexes(vptr); in velocity_rx_reset()
689 for (i = 0; i < vptr->options.numrx; ++i) in velocity_rx_reset()
690 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; in velocity_rx_reset()
692 writew(vptr->options.numrx, &regs->RBRDU); in velocity_rx_reset()
693 writel(vptr->rx.pool_dma, &regs->RDBaseLo); in velocity_rx_reset()
695 writew(vptr->options.numrx - 1, &regs->RDCSize); in velocity_rx_reset()
707 static void velocity_init_registers(struct velocity_info *vptr, in velocity_init_registers() argument
710 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_init_registers()
719 netif_stop_queue(vptr->dev); in velocity_init_registers()
724 velocity_rx_reset(vptr); in velocity_init_registers()
728 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
729 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
730 velocity_print_link_status(vptr); in velocity_init_registers()
731 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
732 netif_wake_queue(vptr->dev); in velocity_init_registers()
735 enable_flow_control_ability(vptr); in velocity_init_registers()
749 velocity_soft_reset(vptr); in velocity_init_registers()
754 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); in velocity_init_registers()
760 mac_set_rx_thresh(regs, vptr->options.rx_thresh); in velocity_init_registers()
761 mac_set_dma_length(regs, vptr->options.DMA_length); in velocity_init_registers()
772 velocity_init_cam_filter(vptr); in velocity_init_registers()
777 velocity_set_multi(vptr->dev); in velocity_init_registers()
784 vptr->int_mask = INT_MASK_DEF; in velocity_init_registers()
786 writel(vptr->rx.pool_dma, &regs->RDBaseLo); in velocity_init_registers()
787 writew(vptr->options.numrx - 1, &regs->RDCSize); in velocity_init_registers()
791 writew(vptr->options.numtx - 1, &regs->TDCSize); in velocity_init_registers()
793 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_registers()
794 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]); in velocity_init_registers()
798 init_flow_control_register(vptr); in velocity_init_registers()
803 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
804 netif_stop_queue(vptr->dev); in velocity_init_registers()
806 mii_init(vptr, mii_status); in velocity_init_registers()
808 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
809 velocity_print_link_status(vptr); in velocity_init_registers()
810 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
811 netif_wake_queue(vptr->dev); in velocity_init_registers()
814 enable_flow_control_ability(vptr); in velocity_init_registers()
816 mac_write_int_mask(vptr->int_mask, regs); in velocity_init_registers()
830 static int velocity_soft_reset(struct velocity_info *vptr) in velocity_soft_reset() argument
832 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_soft_reset()
883 struct velocity_info *vptr; in velocity_found1() local
905 vptr = netdev_priv(dev); in velocity_found1()
916 velocity_init_info(pdev, vptr, info); in velocity_found1()
918 vptr->dev = dev; in velocity_found1()
926 ret = velocity_get_pci_info(vptr, pdev); in velocity_found1()
938 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); in velocity_found1()
944 vptr->mac_regs = regs; in velocity_found1()
948 dev->base_addr = vptr->ioaddr; in velocity_found1()
956 velocity_get_options(&vptr->options, velocity_nics, drv_string); in velocity_found1()
962 vptr->options.flags &= info->flags; in velocity_found1()
968 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); in velocity_found1()
970 vptr->wol_opts = vptr->options.wol_opts; in velocity_found1()
971 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_found1()
973 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); in velocity_found1()
985 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) in velocity_found1()
995 velocity_print_info(vptr); in velocity_found1()
1006 list_add(&vptr->list, &velocity_dev_list); in velocity_found1()
1033 static void __devinit velocity_print_info(struct velocity_info *vptr) in velocity_print_info() argument
1035 struct net_device *dev = vptr->dev; in velocity_print_info()
1037 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); in velocity_print_info()
1055 struct velocity_info *vptr, in velocity_init_info() argument
1058 memset(vptr, 0, sizeof(struct velocity_info)); in velocity_init_info()
1060 vptr->pdev = pdev; in velocity_init_info()
1061 vptr->chip_id = info->chip_id; in velocity_init_info()
1062 vptr->tx.numq = info->txqueue; in velocity_init_info()
1063 vptr->multicast_limit = MCAM_SIZE; in velocity_init_info()
1064 spin_lock_init(&vptr->lock); in velocity_init_info()
1065 INIT_LIST_HEAD(&vptr->list); in velocity_init_info()
1077 static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) in velocity_get_pci_info() argument
1079 vptr->rev_id = pdev->revision; in velocity_get_pci_info()
1083 vptr->ioaddr = pci_resource_start(pdev, 0); in velocity_get_pci_info()
1084 vptr->memaddr = pci_resource_start(pdev, 1); in velocity_get_pci_info()
1102 vptr->pdev = pdev; in velocity_get_pci_info()
1115 static int velocity_init_dma_rings(struct velocity_info *vptr) in velocity_init_dma_rings() argument
1117 struct velocity_opt *opt = &vptr->options; in velocity_init_dma_rings()
1120 struct pci_dev *pdev = vptr->pdev; in velocity_init_dma_rings()
1131 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + in velocity_init_dma_rings()
1135 vptr->dev->name); in velocity_init_dma_rings()
1139 vptr->rx.ring = pool; in velocity_init_dma_rings()
1140 vptr->rx.pool_dma = pool_dma; in velocity_init_dma_rings()
1145 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_dma_rings()
1146 vptr->tx.rings[i] = pool; in velocity_init_dma_rings()
1147 vptr->tx.pool_dma[i] = pool_dma; in velocity_init_dma_rings()
1162 static void velocity_free_dma_rings(struct velocity_info *vptr) in velocity_free_dma_rings() argument
1164 const int size = vptr->options.numrx * sizeof(struct rx_desc) + in velocity_free_dma_rings()
1165 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; in velocity_free_dma_rings()
1167 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); in velocity_free_dma_rings()
1170 static void velocity_give_many_rx_descs(struct velocity_info *vptr) in velocity_give_many_rx_descs() argument
1172 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_give_many_rx_descs()
1179 if (vptr->rx.filled < 4) in velocity_give_many_rx_descs()
1184 unusable = vptr->rx.filled & 0x0003; in velocity_give_many_rx_descs()
1185 dirty = vptr->rx.dirty - unusable; in velocity_give_many_rx_descs()
1186 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { in velocity_give_many_rx_descs()
1187 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; in velocity_give_many_rx_descs()
1188 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; in velocity_give_many_rx_descs()
1191 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU); in velocity_give_many_rx_descs()
1192 vptr->rx.filled = unusable; in velocity_give_many_rx_descs()
1195 static int velocity_rx_refill(struct velocity_info *vptr) in velocity_rx_refill() argument
1197 int dirty = vptr->rx.dirty, done = 0; in velocity_rx_refill()
1200 struct rx_desc *rd = vptr->rx.ring + dirty; in velocity_rx_refill()
1206 if (!vptr->rx.info[dirty].skb) { in velocity_rx_refill()
1207 if (velocity_alloc_rx_buf(vptr, dirty) < 0) in velocity_rx_refill()
1211 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; in velocity_rx_refill()
1212 } while (dirty != vptr->rx.curr); in velocity_rx_refill()
1215 vptr->rx.dirty = dirty; in velocity_rx_refill()
1216 vptr->rx.filled += done; in velocity_rx_refill()
1222 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) in velocity_set_rxbufsize() argument
1224 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; in velocity_set_rxbufsize()
1235 static int velocity_init_rd_ring(struct velocity_info *vptr) in velocity_init_rd_ring() argument
1239 vptr->rx.info = kcalloc(vptr->options.numrx, in velocity_init_rd_ring()
1241 if (!vptr->rx.info) in velocity_init_rd_ring()
1244 velocity_init_rx_ring_indexes(vptr); in velocity_init_rd_ring()
1246 if (velocity_rx_refill(vptr) != vptr->options.numrx) { in velocity_init_rd_ring()
1248 "%s: failed to allocate RX buffer.\n", vptr->dev->name); in velocity_init_rd_ring()
1249 velocity_free_rd_ring(vptr); in velocity_init_rd_ring()
1266 static void velocity_free_rd_ring(struct velocity_info *vptr) in velocity_free_rd_ring() argument
1270 if (vptr->rx.info == NULL) in velocity_free_rd_ring()
1273 for (i = 0; i < vptr->options.numrx; i++) { in velocity_free_rd_ring()
1274 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); in velocity_free_rd_ring()
1275 struct rx_desc *rd = vptr->rx.ring + i; in velocity_free_rd_ring()
1281 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_free_rd_ring()
1289 kfree(vptr->rx.info); in velocity_free_rd_ring()
1290 vptr->rx.info = NULL; in velocity_free_rd_ring()
1302 static int velocity_init_td_ring(struct velocity_info *vptr) in velocity_init_td_ring() argument
1308 for (j = 0; j < vptr->tx.numq; j++) { in velocity_init_td_ring()
1309 curr = vptr->tx.pool_dma[j]; in velocity_init_td_ring()
1311 vptr->tx.infos[j] = kcalloc(vptr->options.numtx, in velocity_init_td_ring()
1314 if (!vptr->tx.infos[j]) { in velocity_init_td_ring()
1316 kfree(vptr->tx.infos[j]); in velocity_init_td_ring()
1320 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; in velocity_init_td_ring()
1329 static void velocity_free_td_ring_entry(struct velocity_info *vptr, in velocity_free_td_ring_entry() argument
1332 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]); in velocity_free_td_ring_entry()
1342 pci_unmap_single(vptr->pdev, td_info->skb_dma[i], in velocity_free_td_ring_entry()
1360 static void velocity_free_td_ring(struct velocity_info *vptr) in velocity_free_td_ring() argument
1364 for (j = 0; j < vptr->tx.numq; j++) { in velocity_free_td_ring()
1365 if (vptr->tx.infos[j] == NULL) in velocity_free_td_ring()
1367 for (i = 0; i < vptr->options.numtx; i++) { in velocity_free_td_ring()
1368 velocity_free_td_ring_entry(vptr, j, i); in velocity_free_td_ring()
1371 kfree(vptr->tx.infos[j]); in velocity_free_td_ring()
1372 vptr->tx.infos[j] = NULL; in velocity_free_td_ring()
1386 static int velocity_rx_srv(struct velocity_info *vptr, int status) in velocity_rx_srv() argument
1388 struct net_device_stats *stats = &vptr->stats; in velocity_rx_srv()
1389 int rd_curr = vptr->rx.curr; in velocity_rx_srv()
1393 struct rx_desc *rd = vptr->rx.ring + rd_curr; in velocity_rx_srv()
1395 if (!vptr->rx.info[rd_curr].skb) in velocity_rx_srv()
1407 if (velocity_receive_frame(vptr, rd_curr) < 0) in velocity_rx_srv()
1421 if (rd_curr >= vptr->options.numrx) in velocity_rx_srv()
1425 vptr->rx.curr = rd_curr; in velocity_rx_srv()
1427 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) in velocity_rx_srv()
1428 velocity_give_many_rx_descs(vptr); in velocity_rx_srv()
1473 struct velocity_info *vptr) in velocity_rx_copy() argument
1479 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); in velocity_rx_copy()
1501 static inline void velocity_iph_realign(struct velocity_info *vptr, in velocity_iph_realign() argument
1504 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { in velocity_iph_realign()
1519 static int velocity_receive_frame(struct velocity_info *vptr, int idx) in velocity_receive_frame() argument
1522 struct net_device_stats *stats = &vptr->stats; in velocity_receive_frame()
1523 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_receive_frame()
1524 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_receive_frame()
1529 …VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev… in velocity_receive_frame()
1535 vptr->stats.multicast++; in velocity_receive_frame()
1539 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, in velocity_receive_frame()
1540 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); in velocity_receive_frame()
1546 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { in velocity_receive_frame()
1557 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { in velocity_receive_frame()
1558 velocity_iph_realign(vptr, skb, pkt_len); in velocity_receive_frame()
1563 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_receive_frame()
1567 skb->protocol = eth_type_trans(skb, vptr->dev); in velocity_receive_frame()
1569 if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) { in velocity_receive_frame()
1570 vlan_hwaccel_rx(skb, vptr->vlgrp, in velocity_receive_frame()
1591 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) in velocity_alloc_rx_buf() argument
1593 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_alloc_rx_buf()
1594 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_alloc_rx_buf()
1596 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); in velocity_alloc_rx_buf()
1605 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, in velocity_alloc_rx_buf()
1606 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); in velocity_alloc_rx_buf()
1613 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; in velocity_alloc_rx_buf()
1629 static int velocity_tx_srv(struct velocity_info *vptr, u32 status) in velocity_tx_srv() argument
1637 struct net_device_stats *stats = &vptr->stats; in velocity_tx_srv()
1639 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { in velocity_tx_srv()
1640 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; in velocity_tx_srv()
1641 idx = (idx + 1) % vptr->options.numtx) { in velocity_tx_srv()
1646 td = &(vptr->tx.rings[qnum][idx]); in velocity_tx_srv()
1647 tdinfo = &(vptr->tx.infos[qnum][idx]); in velocity_tx_srv()
1670 velocity_free_tx_buf(vptr, tdinfo); in velocity_tx_srv()
1671 vptr->tx.used[qnum]--; in velocity_tx_srv()
1673 vptr->tx.tail[qnum] = idx; in velocity_tx_srv()
1675 if (AVAIL_TD(vptr, qnum) < 1) { in velocity_tx_srv()
1683 if (netif_queue_stopped(vptr->dev) && (full == 0) in velocity_tx_srv()
1684 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { in velocity_tx_srv()
1685 netif_wake_queue(vptr->dev); in velocity_tx_srv()
1699 static void velocity_print_link_status(struct velocity_info *vptr) in velocity_print_link_status() argument
1702 if (vptr->mii_status & VELOCITY_LINK_FAIL) { in velocity_print_link_status()
1703 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); in velocity_print_link_status()
1704 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_print_link_status()
1705 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); in velocity_print_link_status()
1707 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_print_link_status()
1709 else if (vptr->mii_status & VELOCITY_SPEED_100) in velocity_print_link_status()
1714 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_print_link_status()
1719 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); in velocity_print_link_status()
1720 switch (vptr->options.spd_dpx) { in velocity_print_link_status()
1751 static void velocity_error(struct velocity_info *vptr, int status) in velocity_error() argument
1755 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_error()
1760 netif_stop_queue(vptr->dev); in velocity_error()
1767 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_error()
1770 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_error()
1771 vptr->mii_status = check_connection_type(regs); in velocity_error()
1778 if (vptr->rev_id < REV_ID_VT3216_A0) { in velocity_error()
1779 if (vptr->mii_status | VELOCITY_DUPLEX_FULL) in velocity_error()
1787 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) { in velocity_error()
1799 vptr->mii_status &= ~VELOCITY_LINK_FAIL; in velocity_error()
1800 netif_carrier_on(vptr->dev); in velocity_error()
1802 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_error()
1803 netif_carrier_off(vptr->dev); in velocity_error()
1806 velocity_print_link_status(vptr); in velocity_error()
1807 enable_flow_control_ability(vptr); in velocity_error()
1816 if (vptr->mii_status & VELOCITY_LINK_FAIL) in velocity_error()
1817 netif_stop_queue(vptr->dev); in velocity_error()
1819 netif_wake_queue(vptr->dev); in velocity_error()
1823 velocity_update_hw_mibs(vptr); in velocity_error()
1825 mac_rx_queue_wake(vptr->mac_regs); in velocity_error()
1837 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo) in velocity_free_tx_buf() argument
1851 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); in velocity_free_tx_buf()
1853 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); in velocity_free_tx_buf()
1862 static int velocity_init_rings(struct velocity_info *vptr, int mtu) in velocity_init_rings() argument
1866 velocity_set_rxbufsize(vptr, mtu); in velocity_init_rings()
1868 ret = velocity_init_dma_rings(vptr); in velocity_init_rings()
1872 ret = velocity_init_rd_ring(vptr); in velocity_init_rings()
1876 ret = velocity_init_td_ring(vptr); in velocity_init_rings()
1883 velocity_free_rd_ring(vptr); in velocity_init_rings()
1885 velocity_free_dma_rings(vptr); in velocity_init_rings()
1889 static void velocity_free_rings(struct velocity_info *vptr) in velocity_free_rings() argument
1891 velocity_free_td_ring(vptr); in velocity_free_rings()
1892 velocity_free_rd_ring(vptr); in velocity_free_rings()
1893 velocity_free_dma_rings(vptr); in velocity_free_rings()
1909 struct velocity_info *vptr = netdev_priv(dev); in velocity_open() local
1912 ret = velocity_init_rings(vptr, dev->mtu); in velocity_open()
1917 pci_set_power_state(vptr->pdev, PCI_D0); in velocity_open()
1919 velocity_give_many_rx_descs(vptr); in velocity_open()
1921 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_open()
1923 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, in velocity_open()
1927 pci_set_power_state(vptr->pdev, PCI_D3hot); in velocity_open()
1928 velocity_free_rings(vptr); in velocity_open()
1932 mac_enable_int(vptr->mac_regs); in velocity_open()
1934 vptr->flags |= VELOCITY_FLAGS_OPENED; in velocity_open()
1951 struct velocity_info *vptr = netdev_priv(dev); in velocity_change_mtu() local
1956 vptr->dev->name); in velocity_change_mtu()
1979 tmp_vptr->pdev = vptr->pdev; in velocity_change_mtu()
1980 tmp_vptr->options = vptr->options; in velocity_change_mtu()
1981 tmp_vptr->tx.numq = vptr->tx.numq; in velocity_change_mtu()
1987 spin_lock_irqsave(&vptr->lock, flags); in velocity_change_mtu()
1990 velocity_shutdown(vptr); in velocity_change_mtu()
1992 rx = vptr->rx; in velocity_change_mtu()
1993 tx = vptr->tx; in velocity_change_mtu()
1995 vptr->rx = tmp_vptr->rx; in velocity_change_mtu()
1996 vptr->tx = tmp_vptr->tx; in velocity_change_mtu()
2003 velocity_give_many_rx_descs(vptr); in velocity_change_mtu()
2005 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_change_mtu()
2007 mac_enable_int(vptr->mac_regs); in velocity_change_mtu()
2010 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_change_mtu()
2029 static void velocity_shutdown(struct velocity_info *vptr) in velocity_shutdown() argument
2031 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_shutdown()
2050 struct velocity_info *vptr = netdev_priv(dev); in velocity_close() local
2053 velocity_shutdown(vptr); in velocity_close()
2055 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) in velocity_close()
2056 velocity_get_ip(vptr); in velocity_close()
2061 pci_set_power_state(vptr->pdev, PCI_D3hot); in velocity_close()
2063 velocity_free_rings(vptr); in velocity_close()
2065 vptr->flags &= (~VELOCITY_FLAGS_OPENED); in velocity_close()
2080 struct velocity_info *vptr = netdev_priv(dev); in velocity_xmit() local
2103 spin_lock_irqsave(&vptr->lock, flags); in velocity_xmit()
2105 index = vptr->tx.curr[qnum]; in velocity_xmit()
2106 td_ptr = &(vptr->tx.rings[qnum][index]); in velocity_xmit()
2107 tdinfo = &(vptr->tx.infos[qnum][index]); in velocity_xmit()
2127 tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, in velocity_xmit()
2141 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); in velocity_xmit()
2158 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); in velocity_xmit()
2167 if (vptr->vlgrp && vlan_tx_tag_present(skb)) { in velocity_xmit()
2175 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) in velocity_xmit()
2189 prev = vptr->options.numtx - 1; in velocity_xmit()
2191 vptr->tx.used[qnum]++; in velocity_xmit()
2192 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; in velocity_xmit()
2194 if (AVAIL_TD(vptr, qnum) < 1) in velocity_xmit()
2197 td_ptr = &(vptr->tx.rings[qnum][prev]); in velocity_xmit()
2199 mac_tx_queue_wake(vptr->mac_regs, qnum); in velocity_xmit()
2202 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_xmit()
2221 struct velocity_info *vptr = netdev_priv(dev); in velocity_intr() local
2226 spin_lock(&vptr->lock); in velocity_intr()
2227 isr_status = mac_read_isr(vptr->mac_regs); in velocity_intr()
2231 spin_unlock(&vptr->lock); in velocity_intr()
2235 mac_disable_int(vptr->mac_regs); in velocity_intr()
2243 mac_write_isr(vptr->mac_regs, isr_status); in velocity_intr()
2245 velocity_error(vptr, isr_status); in velocity_intr()
2247 max_count += velocity_rx_srv(vptr, isr_status); in velocity_intr()
2249 max_count += velocity_tx_srv(vptr, isr_status); in velocity_intr()
2250 isr_status = mac_read_isr(vptr->mac_regs); in velocity_intr()
2251 if (max_count > vptr->options.int_works) in velocity_intr()
2258 spin_unlock(&vptr->lock); in velocity_intr()
2259 mac_enable_int(vptr->mac_regs); in velocity_intr()
2276 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_multi() local
2277 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_set_multi()
2286 } else if ((dev->mc_count > vptr->multicast_limit) in velocity_set_multi()
2292 int offset = MCAM_SIZE - vptr->multicast_limit; in velocity_set_multi()
2293 mac_get_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
2297 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); in velocity_set_multi()
2300 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
2323 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_stats() local
2327 return &vptr->stats; in velocity_get_stats()
2329 spin_lock_irq(&vptr->lock); in velocity_get_stats()
2330 velocity_update_hw_mibs(vptr); in velocity_get_stats()
2331 spin_unlock_irq(&vptr->lock); in velocity_get_stats()
2333 vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; in velocity_get_stats()
2334 vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; in velocity_get_stats()
2335 vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; in velocity_get_stats()
2338 vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; in velocity_get_stats()
2342 vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; in velocity_get_stats()
2350 return &vptr->stats; in velocity_get_stats()
2366 struct velocity_info *vptr = netdev_priv(dev); in velocity_ioctl() local
2373 pci_set_power_state(vptr->pdev, PCI_D0); in velocity_ioctl()
2386 pci_set_power_state(vptr->pdev, PCI_D3hot); in velocity_ioctl()
2460 static void mii_init(struct velocity_info *vptr, u32 mii_status) in mii_init() argument
2464 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { in mii_init()
2469 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); in mii_init()
2475 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
2476 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); in mii_init()
2478 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); in mii_init()
2482 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); in mii_init()
2489 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); in mii_init()
2495 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
2496 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); in mii_init()
2498 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); in mii_init()
2506 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); in mii_init()
2510 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); in mii_init()
2515 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); in mii_init()
2518 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); in mii_init()
2658 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) in velocity_get_opt_media_mode() argument
2662 switch (vptr->options.spd_dpx) { in velocity_get_opt_media_mode()
2679 vptr->mii_status = status; in velocity_get_opt_media_mode()
2690 static void mii_set_auto_on(struct velocity_info *vptr) in mii_set_auto_on() argument
2692 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) in mii_set_auto_on()
2693 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); in mii_set_auto_on()
2695 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); in mii_set_auto_on()
2714 static void set_mii_flow_control(struct velocity_info *vptr) in set_mii_flow_control() argument
2717 switch (vptr->options.flow_cntl) { in set_mii_flow_control()
2719 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2720 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2724 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2725 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2729 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2730 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2734 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2735 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
2751 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) in velocity_set_media_mode() argument
2754 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_set_media_mode()
2756 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); in velocity_set_media_mode()
2757 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); in velocity_set_media_mode()
2760 set_mii_flow_control(vptr); in velocity_set_media_mode()
2773 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) { in velocity_set_media_mode()
2774 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); in velocity_set_media_mode()
2785 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); in velocity_set_media_mode()
2786 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); in velocity_set_media_mode()
2787 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); in velocity_set_media_mode()
2790 mii_set_auto_on(vptr); in velocity_set_media_mode()
2812 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
2818 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
2822 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); in velocity_set_media_mode()
2830 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); in velocity_set_media_mode()
2843 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); in velocity_set_media_mode()
2845 mii_set_auto_on(vptr); in velocity_set_media_mode()
2939 static void enable_flow_control_ability(struct velocity_info *vptr) in enable_flow_control_ability() argument
2942 struct mac_regs __iomem * regs = vptr->mac_regs; in enable_flow_control_ability()
2944 switch (vptr->options.flow_cntl) { in enable_flow_control_ability()
2995 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_up() local
2997 pci_set_power_state(vptr->pdev, PCI_D0); in velocity_ethtool_up()
3011 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_down() local
3013 pci_set_power_state(vptr->pdev, PCI_D3hot); in velocity_ethtool_down()
3018 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_settings() local
3019 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_get_settings()
3021 status = check_connection_type(vptr->mac_regs); in velocity_get_settings()
3052 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_settings() local
3057 curr_status = check_connection_type(vptr->mac_regs); in velocity_set_settings()
3068 velocity_set_media_mode(vptr, new_status); in velocity_set_settings()
3075 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_link() local
3076 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_get_link()
3082 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_drvinfo() local
3085 strcpy(info->bus_info, pci_name(vptr->pdev)); in velocity_get_drvinfo()
3090 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_get_wol() local
3097 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_ethtool_get_wol()
3099 if (vptr->wol_opts & VELOCITY_WOL_ARP) in velocity_ethtool_get_wol()
3101 memcpy(&wol->sopass, vptr->wol_passwd, 6); in velocity_ethtool_get_wol()
3106 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_set_wol() local
3110 vptr->wol_opts = VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3120 vptr->wol_opts |= VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3121 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3124 vptr->wol_opts |= VELOCITY_WOL_UCAST; in velocity_ethtool_set_wol()
3125 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3128 vptr->wol_opts |= VELOCITY_WOL_ARP; in velocity_ethtool_set_wol()
3129 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3131 memcpy(vptr->wol_passwd, wol->sopass, 6); in velocity_ethtool_set_wol()
3171 struct velocity_info *vptr = netdev_priv(dev); in velocity_mii_ioctl() local
3172 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_mii_ioctl()
3184 if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) in velocity_mii_ioctl()
3190 spin_lock_irqsave(&vptr->lock, flags); in velocity_mii_ioctl()
3191 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); in velocity_mii_ioctl()
3192 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_mii_ioctl()
3193 check_connection_type(vptr->mac_regs); in velocity_mii_ioctl()
3216 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context) in velocity_save_context() argument
3218 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_save_context()
3242 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_restore_context() argument
3244 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_restore_context()
3320 static int velocity_set_wol(struct velocity_info *vptr) in velocity_set_wol() argument
3322 struct mac_regs __iomem * regs = vptr->mac_regs; in velocity_set_wol()
3340 if (vptr->wol_opts & VELOCITY_WOL_UCAST) { in velocity_set_wol()
3344 if (vptr->wol_opts & VELOCITY_WOL_ARP) { in velocity_set_wol()
3355 memcpy(arp->ar_tip, vptr->ip_addr, 4); in velocity_set_wol()
3369 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { in velocity_set_wol()
3370 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_wol()
3371 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); in velocity_set_wol()
3373 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); in velocity_set_wol()
3376 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_set_wol()
3377 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); in velocity_set_wol()
3400 struct velocity_info *vptr = netdev_priv(dev); in velocity_suspend() local
3403 if(!netif_running(vptr->dev)) in velocity_suspend()
3406 netif_device_detach(vptr->dev); in velocity_suspend()
3408 spin_lock_irqsave(&vptr->lock, flags); in velocity_suspend()
3411 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { in velocity_suspend()
3412 velocity_get_ip(vptr); in velocity_suspend()
3413 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3414 velocity_shutdown(vptr); in velocity_suspend()
3415 velocity_set_wol(vptr); in velocity_suspend()
3419 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3420 velocity_shutdown(vptr); in velocity_suspend()
3427 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_suspend()
3434 struct velocity_info *vptr = netdev_priv(dev); in velocity_resume() local
3438 if(!netif_running(vptr->dev)) in velocity_resume()
3445 mac_wol_reset(vptr->mac_regs); in velocity_resume()
3447 spin_lock_irqsave(&vptr->lock, flags); in velocity_resume()
3448 velocity_restore_context(vptr, &vptr->context); in velocity_resume()
3449 velocity_init_registers(vptr, VELOCITY_INIT_WOL); in velocity_resume()
3450 mac_disable_int(vptr->mac_regs); in velocity_resume()
3452 velocity_tx_srv(vptr, 0); in velocity_resume()
3454 for (i = 0; i < vptr->tx.numq; i++) { in velocity_resume()
3455 if (vptr->tx.used[i]) { in velocity_resume()
3456 mac_tx_queue_wake(vptr->mac_regs, i); in velocity_resume()
3460 mac_enable_int(vptr->mac_regs); in velocity_resume()
3461 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_resume()
3462 netif_device_attach(vptr->dev); in velocity_resume()
3473 struct velocity_info *vptr; in velocity_netdev_event() local
3480 list_for_each_entry(vptr, &velocity_dev_list, list) { in velocity_netdev_event()
3481 if (vptr->dev == dev) { in velocity_netdev_event()
3482 velocity_get_ip(vptr); in velocity_netdev_event()