Lines Matching refs:vptr
213 struct velocity_info *vptr,
219 static int velocity_soft_reset(struct velocity_info *vptr);
220 static void velocity_init_cam_filter(struct velocity_info *vptr);
221 static void mii_init(struct velocity_info *vptr, u32 mii_status);
222 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
223 static void velocity_print_link_status(struct velocity_info *vptr);
225 static void enable_flow_control_ability(struct velocity_info *vptr);
231 static int velocity_set_media_mode(struct velocity_info *vptr,
379 static void velocity_init_cam_filter(struct velocity_info *vptr) in velocity_init_cam_filter() argument
381 struct mac_regs *regs = vptr->mac_regs; in velocity_init_cam_filter()
388 memset(vptr->vCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
389 memset(vptr->mCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
390 mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); in velocity_init_cam_filter()
391 mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); in velocity_init_cam_filter()
394 if (vptr->flags & VELOCITY_FLAGS_TAGGING) { in velocity_init_cam_filter()
397 if (vptr->options.vid != 0) in velocity_init_cam_filter()
400 mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), in velocity_init_cam_filter()
402 vptr->vCAMmask[0] |= 1; in velocity_init_cam_filter()
403 mac_set_cam_mask(regs, vptr->vCAMmask, in velocity_init_cam_filter()
414 static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) in velocity_give_many_rx_descs() argument
416 struct mac_regs *regs = vptr->mac_regs; in velocity_give_many_rx_descs()
423 if (vptr->rd_filled < 4) in velocity_give_many_rx_descs()
428 unusable = vptr->rd_filled & 0x0003; in velocity_give_many_rx_descs()
429 dirty = vptr->rd_dirty - unusable; in velocity_give_many_rx_descs()
430 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { in velocity_give_many_rx_descs()
431 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; in velocity_give_many_rx_descs()
433 vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; in velocity_give_many_rx_descs()
436 writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); in velocity_give_many_rx_descs()
437 vptr->rd_filled = unusable; in velocity_give_many_rx_descs()
440 static int velocity_rx_refill(struct velocity_info *vptr) in velocity_rx_refill() argument
442 int dirty = vptr->rd_dirty, done = 0, ret = 0; in velocity_rx_refill()
446 struct rx_desc *rd = vptr->rd_ring + dirty; in velocity_rx_refill()
455 rd->rdesc0.len = cpu_to_le32(vptr->rx_buf_sz);; in velocity_rx_refill()
458 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; in velocity_rx_refill()
459 } while (dirty != vptr->rd_curr); in velocity_rx_refill()
463 vptr->rd_dirty = dirty; in velocity_rx_refill()
464 vptr->rd_filled += done; in velocity_rx_refill()
465 velocity_give_many_rx_descs(vptr); in velocity_rx_refill()
481 int rd_curr = vptr->rd_curr % RX_DESC_DEF; in velocity_poll()
482 struct rx_desc *rd = &(vptr->rd_ring[rd_curr]); in velocity_poll()
502 vptr->rd_curr++; in velocity_poll()
503 vptr->rd_curr = vptr->rd_curr % RX_DESC_DEF; in velocity_poll()
504 velocity_rx_refill(vptr); in velocity_poll()
525 int entry = vptr->td_curr % TX_DESC_DEF; in velocity_transmit()
526 td_ptr = &(vptr->td_rings[entry]); in velocity_transmit()
529 ptxb = vptr->txb + (entry * PKT_BUF_SZ); in velocity_transmit()
550 vptr->td_rings[entry].tdesc0.pktsize = pktlen; in velocity_transmit()
551 vptr->td_rings[entry].td_buf[0].pa_low = virt_to_bus(ptxb); in velocity_transmit()
552 vptr->td_rings[entry].td_buf[0].pa_high &= in velocity_transmit()
554 vptr->td_rings[entry].td_buf[0].bufsize = in velocity_transmit()
555 vptr->td_rings[entry].tdesc0.pktsize; in velocity_transmit()
556 vptr->td_rings[entry].tdesc1.CMDZ = 2; in velocity_transmit()
567 if (vptr->flags & VELOCITY_FLAGS_TAGGING) { in velocity_transmit()
568 td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); in velocity_transmit()
574 vptr->td_curr = (entry + 1); in velocity_transmit()
583 td_ptr = &(vptr->td_rings[prev]); in velocity_transmit()
585 mac_tx_queue_wake(vptr->mac_regs, 0); in velocity_transmit()
613 struct mac_regs *regs = vptr->mac_regs; in velocity_disable()
624 vptr->flags &= (~VELOCITY_FLAGS_OPENED); in velocity_disable()
679 vptr = &vptx; in velocity_probe()
682 velocity_init_info(pci, vptr, info); in velocity_probe()
687 ret = velocity_get_pci_info(vptr, pci); in velocity_probe()
693 regs = ioremap(vptr->memaddr, vptr->io_size); in velocity_probe()
699 vptr->mac_regs = regs; in velocity_probe()
701 BASE = vptr->ioaddr; in velocity_probe()
703 printf("Chip ID: %hX\n", vptr->chip_id); in velocity_probe()
711 velocity_get_options(&vptr->options, 0, pci->driver_name); in velocity_probe()
716 vptr->options.flags &= 0x00FFFFFFUL; //info->flags = 0x00FFFFFFUL; in velocity_probe()
722 vptr->flags = in velocity_probe()
723 vptr->options. in velocity_probe()
726 vptr->wol_opts = vptr->options.wol_opts; in velocity_probe()
727 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_probe()
729 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); in velocity_probe()
731 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { in velocity_probe()
738 check_connection_type(vptr->mac_regs); in velocity_probe()
759 struct velocity_info *vptr, in velocity_init_info() argument
762 memset(vptr, 0, sizeof(struct velocity_info)); in velocity_init_info()
764 vptr->pdev = pdev; in velocity_init_info()
765 vptr->chip_id = info->chip_id; in velocity_init_info()
766 vptr->io_size = info->io_size; in velocity_init_info()
767 vptr->num_txq = info->txqueue; in velocity_init_info()
768 vptr->multicast_limit = MCAM_SIZE; in velocity_init_info()
772 vptr->chip_id, (unsigned int) vptr->io_size, vptr->num_txq, in velocity_init_info()
773 vptr->multicast_limit); in velocity_init_info()
851 static int velocity_get_pci_info(struct velocity_info *vptr, in velocity_get_pci_info() argument
854 if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) { in velocity_get_pci_info()
861 vptr->ioaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_0); in velocity_get_pci_info()
862 vptr->memaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_1); in velocity_get_pci_info()
883 vptr->pdev = pdev; in velocity_get_pci_info()
897 static void velocity_print_link_status(struct velocity_info *vptr) in velocity_print_link_status() argument
900 if (vptr->mii_status & VELOCITY_LINK_FAIL) { in velocity_print_link_status()
902 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_print_link_status()
905 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_print_link_status()
907 else if (vptr->mii_status & VELOCITY_SPEED_100) in velocity_print_link_status()
912 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_print_link_status()
918 switch (vptr->options.spd_dpx) { in velocity_print_link_status()
945 static void velocity_rx_reset(struct velocity_info *vptr) in velocity_rx_reset() argument
948 struct mac_regs *regs = vptr->mac_regs; in velocity_rx_reset()
956 for (i = 0; i < vptr->options.numrx; ++i) in velocity_rx_reset()
957 vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; in velocity_rx_reset()
960 writel(virt_to_le32desc(vptr->rd_ring), ®s->RDBaseLo); in velocity_rx_reset()
975 struct velocity_info *vptr, in velocity_init_registers() argument
978 struct mac_regs *regs = vptr->mac_regs; in velocity_init_registers()
992 velocity_rx_reset(vptr); in velocity_init_registers()
996 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
998 if (velocity_set_media_mode(vptr, mii_status) != in velocity_init_registers()
1000 velocity_print_link_status(vptr); in velocity_init_registers()
1001 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1006 enable_flow_control_ability(vptr); in velocity_init_registers()
1020 velocity_soft_reset(vptr); in velocity_init_registers()
1031 mac_set_rx_thresh(regs, vptr->options.rx_thresh); in velocity_init_registers()
1032 mac_set_dma_length(regs, vptr->options.DMA_length); in velocity_init_registers()
1045 velocity_init_cam_filter(vptr); in velocity_init_registers()
1057 vptr->int_mask = INT_MASK_DEF; in velocity_init_registers()
1059 writel(virt_to_le32desc(vptr->rd_ring), ®s->RDBaseLo); in velocity_init_registers()
1060 writew(vptr->options.numrx - 1, ®s->RDCSize); in velocity_init_registers()
1064 writew(vptr->options.numtx - 1, ®s->TDCSize); in velocity_init_registers()
1067 writel(virt_to_le32desc(vptr->td_rings), in velocity_init_registers()
1072 init_flow_control_register(vptr); in velocity_init_registers()
1078 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1081 mii_init(vptr, mii_status); in velocity_init_registers()
1083 if (velocity_set_media_mode(vptr, mii_status) != in velocity_init_registers()
1085 velocity_print_link_status(vptr); in velocity_init_registers()
1086 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1091 enable_flow_control_ability(vptr); in velocity_init_registers()
1093 mac_write_int_mask(vptr->int_mask, regs); in velocity_init_registers()
1098 velocity_print_link_status(vptr); in velocity_init_registers()
1109 static int velocity_soft_reset(struct velocity_info *vptr) in velocity_soft_reset() argument
1111 struct mac_regs *regs = vptr->mac_regs; in velocity_soft_reset()
1139 static int velocity_init_rings(struct velocity_info *vptr) in velocity_init_rings() argument
1144 vptr->rd_curr = 0; in velocity_init_rings()
1145 vptr->td_curr = 0; in velocity_init_rings()
1146 memset(vptr->td_rings, 0, TX_DESC_DEF * sizeof(struct tx_desc)); in velocity_init_rings()
1147 memset(vptr->rd_ring, 0, RX_DESC_DEF * sizeof(struct rx_desc)); in velocity_init_rings()
1152 vptr->rd_ring[idx].rdesc0.RSR = 0; in velocity_init_rings()
1153 vptr->rd_ring[idx].rdesc0.len = 0; in velocity_init_rings()
1154 vptr->rd_ring[idx].rdesc0.reserved = 0; in velocity_init_rings()
1155 vptr->rd_ring[idx].rdesc0.owner = 0; in velocity_init_rings()
1156 vptr->rd_ring[idx].len = cpu_to_le32(vptr->rx_buf_sz); in velocity_init_rings()
1157 vptr->rd_ring[idx].inten = 1; in velocity_init_rings()
1158 vptr->rd_ring[idx].pa_low = in velocity_init_rings()
1159 virt_to_bus(vptr->rxb + (RX_DESC_DEF * idx)); in velocity_init_rings()
1160 vptr->rd_ring[idx].pa_high = 0; in velocity_init_rings()
1161 vptr->rd_ring[idx].rdesc0.owner = OWNED_BY_NIC; in velocity_init_rings()
1231 vptr->TxDescArrays = tx_ring; in velocity_open()
1232 if (vptr->TxDescArrays == 0) in velocity_open()
1236 TxPhyAddr = virt_to_bus(vptr->TxDescArrays); in velocity_open()
1240 vptr->td_rings = (struct tx_desc *) (vptr->TxDescArrays + diff); in velocity_open()
1242 printf("Aligned Address: %lX\n", virt_to_bus(vptr->td_rings)); in velocity_open()
1243 vptr->tx_buffs = txb; in velocity_open()
1245 TxBufPhyAddr = virt_to_bus(vptr->tx_buffs); in velocity_open()
1248 vptr->txb = (unsigned char *) (vptr->tx_buffs + diff); in velocity_open()
1250 vptr->RxDescArrays = rx_ring; in velocity_open()
1252 RxPhyAddr = virt_to_bus(vptr->RxDescArrays); in velocity_open()
1255 vptr->rd_ring = (struct rx_desc *) (vptr->RxDescArrays + diff); in velocity_open()
1257 vptr->rx_buffs = rxb; in velocity_open()
1259 RxBufPhyAddr = virt_to_bus(vptr->rx_buffs); in velocity_open()
1262 vptr->rxb = (unsigned char *) (vptr->rx_buffs + diff); in velocity_open()
1264 if (vptr->RxDescArrays == NULL || vptr->RxDescArrays == NULL) { in velocity_open()
1269 vptr->rx_buf_sz = PKT_BUF_SZ; in velocity_open()
1279 ret = velocity_init_rings(vptr); in velocity_open()
1284 velocity_init_registers(nic, vptr, VELOCITY_INIT_COLD); in velocity_open()
1285 mac_write_int_mask(0, vptr->mac_regs); in velocity_open()
1289 vptr->flags |= VELOCITY_FLAGS_OPENED; in velocity_open()
1307 static void mii_init(struct velocity_info *vptr, u32 mii_status __unused) in mii_init() argument
1311 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { in mii_init()
1317 vptr->mac_regs); in mii_init()
1323 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1325 vptr->mac_regs); in mii_init()
1328 vptr->mac_regs); in mii_init()
1332 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); in mii_init()
1340 vptr->mac_regs); in mii_init()
1346 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1348 vptr->mac_regs); in mii_init()
1351 vptr->mac_regs); in mii_init()
1359 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); in mii_init()
1364 vptr->mac_regs); in mii_init()
1369 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); in mii_init()
1372 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); in mii_init()
1512 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) in velocity_get_opt_media_mode() argument
1516 switch (vptr->options.spd_dpx) { in velocity_get_opt_media_mode()
1533 vptr->mii_status = status; in velocity_get_opt_media_mode()
1544 static void mii_set_auto_on(struct velocity_info *vptr) in mii_set_auto_on() argument
1546 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) in mii_set_auto_on()
1547 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); in mii_set_auto_on()
1549 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); in mii_set_auto_on()
1568 static void set_mii_flow_control(struct velocity_info *vptr) in set_mii_flow_control() argument
1571 switch (vptr->options.flow_cntl) { in set_mii_flow_control()
1573 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1574 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1578 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1579 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1583 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1584 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1588 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); in set_mii_flow_control()
1590 vptr->mac_regs); in set_mii_flow_control()
1606 static int velocity_set_media_mode(struct velocity_info *vptr, in velocity_set_media_mode() argument
1610 struct mac_regs *regs = vptr->mac_regs; in velocity_set_media_mode()
1612 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); in velocity_set_media_mode()
1613 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); in velocity_set_media_mode()
1616 set_mii_flow_control(vptr); in velocity_set_media_mode()
1629 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) { in velocity_set_media_mode()
1631 vptr->mac_regs); in velocity_set_media_mode()
1643 MII_REG_ANAR, vptr->mac_regs); in velocity_set_media_mode()
1645 MII_REG_G1000CR, vptr->mac_regs); in velocity_set_media_mode()
1647 vptr->mac_regs); in velocity_set_media_mode()
1650 mii_set_auto_on(vptr); in velocity_set_media_mode()
1673 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
1680 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
1685 MII_REG_G1000CR, vptr->mac_regs); in velocity_set_media_mode()
1694 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); in velocity_set_media_mode()
1707 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); in velocity_set_media_mode()
1709 mii_set_auto_on(vptr); in velocity_set_media_mode()
1809 static void enable_flow_control_ability(struct velocity_info *vptr) in enable_flow_control_ability() argument
1812 struct mac_regs *regs = vptr->mac_regs; in enable_flow_control_ability()
1814 switch (vptr->options.flow_cntl) { in enable_flow_control_ability()