Lines Matching refs:ap
91 #define ACE_IS_TIGON_I(ap) 0 argument
92 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES argument
94 #define ACE_IS_TIGON_I(ap) (ap->version == 1) argument
95 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries argument
460 struct ace_private *ap; in acenic_probe_one() local
469 ap = netdev_priv(dev); in acenic_probe_one()
470 ap->pdev = pdev; in acenic_probe_one()
471 ap->name = pci_name(pdev); in acenic_probe_one()
495 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); in acenic_probe_one()
498 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { in acenic_probe_one()
501 ap->name); in acenic_probe_one()
502 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; in acenic_probe_one()
503 pci_write_config_word(ap->pdev, PCI_COMMAND, in acenic_probe_one()
504 ap->pci_command); in acenic_probe_one()
508 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); in acenic_probe_one()
509 if (ap->pci_latency <= 0x40) { in acenic_probe_one()
510 ap->pci_latency = 0x40; in acenic_probe_one()
511 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); in acenic_probe_one()
520 ap->regs = ioremap(dev->base_addr, 0x4000); in acenic_probe_one()
521 if (!ap->regs) { in acenic_probe_one()
524 ap->name, boards_found); in acenic_probe_one()
532 ap->name); in acenic_probe_one()
535 ap->name); in acenic_probe_one()
539 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); in acenic_probe_one()
542 printk(KERN_INFO "%s: NetGear GA620 ", ap->name); in acenic_probe_one()
547 ap->name); in acenic_probe_one()
551 printk(KERN_INFO "%s: SGI AceNIC ", ap->name); in acenic_probe_one()
554 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); in acenic_probe_one()
562 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { in acenic_probe_one()
574 ap->board_idx = BOARD_IDX_OVERFLOW; in acenic_probe_one()
576 ap->board_idx = boards_found; in acenic_probe_one()
578 ap->board_idx = BOARD_IDX_STATIC; in acenic_probe_one()
588 ap->name = dev->name; in acenic_probe_one()
590 if (ap->pci_using_dac) in acenic_probe_one()
608 struct ace_private *ap = netdev_priv(dev); in acenic_remove_one() local
609 struct ace_regs __iomem *regs = ap->regs; in acenic_remove_one()
615 if (ap->version >= 2) in acenic_remove_one()
636 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; in acenic_remove_one()
642 ringp = &ap->skb->rx_std_skbuff[i]; in acenic_remove_one()
644 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one()
648 ap->rx_std_ring[i].size = 0; in acenic_remove_one()
649 ap->skb->rx_std_skbuff[i].skb = NULL; in acenic_remove_one()
654 if (ap->version >= 2) { in acenic_remove_one()
656 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; in acenic_remove_one()
662 ringp = &ap->skb->rx_mini_skbuff[i]; in acenic_remove_one()
664 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one()
668 ap->rx_mini_ring[i].size = 0; in acenic_remove_one()
669 ap->skb->rx_mini_skbuff[i].skb = NULL; in acenic_remove_one()
676 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; in acenic_remove_one()
681 ringp = &ap->skb->rx_jumbo_skbuff[i]; in acenic_remove_one()
683 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one()
687 ap->rx_jumbo_ring[i].size = 0; in acenic_remove_one()
688 ap->skb->rx_jumbo_skbuff[i].skb = NULL; in acenic_remove_one()
706 struct ace_private *ap = netdev_priv(dev); in ace_free_descriptors() local
709 if (ap->rx_std_ring != NULL) { in ace_free_descriptors()
715 pci_free_consistent(ap->pdev, size, ap->rx_std_ring, in ace_free_descriptors()
716 ap->rx_ring_base_dma); in ace_free_descriptors()
717 ap->rx_std_ring = NULL; in ace_free_descriptors()
718 ap->rx_jumbo_ring = NULL; in ace_free_descriptors()
719 ap->rx_mini_ring = NULL; in ace_free_descriptors()
720 ap->rx_return_ring = NULL; in ace_free_descriptors()
722 if (ap->evt_ring != NULL) { in ace_free_descriptors()
724 pci_free_consistent(ap->pdev, size, ap->evt_ring, in ace_free_descriptors()
725 ap->evt_ring_dma); in ace_free_descriptors()
726 ap->evt_ring = NULL; in ace_free_descriptors()
728 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { in ace_free_descriptors()
730 pci_free_consistent(ap->pdev, size, ap->tx_ring, in ace_free_descriptors()
731 ap->tx_ring_dma); in ace_free_descriptors()
733 ap->tx_ring = NULL; in ace_free_descriptors()
735 if (ap->evt_prd != NULL) { in ace_free_descriptors()
736 pci_free_consistent(ap->pdev, sizeof(u32), in ace_free_descriptors()
737 (void *)ap->evt_prd, ap->evt_prd_dma); in ace_free_descriptors()
738 ap->evt_prd = NULL; in ace_free_descriptors()
740 if (ap->rx_ret_prd != NULL) { in ace_free_descriptors()
741 pci_free_consistent(ap->pdev, sizeof(u32), in ace_free_descriptors()
742 (void *)ap->rx_ret_prd, in ace_free_descriptors()
743 ap->rx_ret_prd_dma); in ace_free_descriptors()
744 ap->rx_ret_prd = NULL; in ace_free_descriptors()
746 if (ap->tx_csm != NULL) { in ace_free_descriptors()
747 pci_free_consistent(ap->pdev, sizeof(u32), in ace_free_descriptors()
748 (void *)ap->tx_csm, ap->tx_csm_dma); in ace_free_descriptors()
749 ap->tx_csm = NULL; in ace_free_descriptors()
756 struct ace_private *ap = netdev_priv(dev); in ace_allocate_descriptors() local
765 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, in ace_allocate_descriptors()
766 &ap->rx_ring_base_dma); in ace_allocate_descriptors()
767 if (ap->rx_std_ring == NULL) in ace_allocate_descriptors()
770 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; in ace_allocate_descriptors()
771 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; in ace_allocate_descriptors()
772 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; in ace_allocate_descriptors()
776 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); in ace_allocate_descriptors()
778 if (ap->evt_ring == NULL) in ace_allocate_descriptors()
785 if (!ACE_IS_TIGON_I(ap)) { in ace_allocate_descriptors()
788 ap->tx_ring = pci_alloc_consistent(ap->pdev, size, in ace_allocate_descriptors()
789 &ap->tx_ring_dma); in ace_allocate_descriptors()
791 if (ap->tx_ring == NULL) in ace_allocate_descriptors()
795 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), in ace_allocate_descriptors()
796 &ap->evt_prd_dma); in ace_allocate_descriptors()
797 if (ap->evt_prd == NULL) in ace_allocate_descriptors()
800 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), in ace_allocate_descriptors()
801 &ap->rx_ret_prd_dma); in ace_allocate_descriptors()
802 if (ap->rx_ret_prd == NULL) in ace_allocate_descriptors()
805 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), in ace_allocate_descriptors()
806 &ap->tx_csm_dma); in ace_allocate_descriptors()
807 if (ap->tx_csm == NULL) in ace_allocate_descriptors()
825 struct ace_private *ap; in ace_init_cleanup() local
827 ap = netdev_priv(dev); in ace_init_cleanup()
831 if (ap->info) in ace_init_cleanup()
832 pci_free_consistent(ap->pdev, sizeof(struct ace_info), in ace_init_cleanup()
833 ap->info, ap->info_dma); in ace_init_cleanup()
834 kfree(ap->skb); in ace_init_cleanup()
835 kfree(ap->trace_buf); in ace_init_cleanup()
840 iounmap(ap->regs); in ace_init_cleanup()
862 struct ace_private *ap; in ace_init() local
873 ap = netdev_priv(dev); in ace_init()
874 regs = ap->regs; in ace_init()
876 board_idx = ap->board_idx; in ace_init()
917 tig_ver, ap->firmware_major, ap->firmware_minor, in ace_init()
918 ap->firmware_fix); in ace_init()
920 ap->version = 1; in ace_init()
921 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; in ace_init()
926 tig_ver, ap->firmware_major, ap->firmware_minor, in ace_init()
927 ap->firmware_fix); in ace_init()
937 ap->version = 2; in ace_init()
938 ap->tx_ring_entries = MAX_TX_RING_ENTRIES; in ace_init()
1006 pdev = ap->pdev; in ace_init()
1026 ap->pci_latency); in ace_init()
1039 if (ap->version >= 2) { in ace_init()
1046 if (ap->pci_command & PCI_COMMAND_INVALIDATE) { in ace_init()
1047 ap->pci_command &= ~PCI_COMMAND_INVALIDATE; in ace_init()
1049 ap->pci_command); in ace_init()
1053 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { in ace_init()
1074 ap->pci_command &= ~PCI_COMMAND_INVALIDATE; in ace_init()
1076 ap->pci_command); in ace_init()
1121 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { in ace_init()
1123 ap->pci_command |= PCI_COMMAND_FAST_BACK; in ace_init()
1124 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); in ace_init()
1132 ap->pci_using_dac = 1; in ace_init()
1134 ap->pci_using_dac = 0; in ace_init()
1145 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), in ace_init()
1146 &ap->info_dma))) { in ace_init()
1150 ap->info = info; in ace_init()
1155 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { in ace_init()
1170 spin_lock_init(&ap->debug_lock); in ace_init()
1171 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; in ace_init()
1172 ap->last_std_rx = 0; in ace_init()
1173 ap->last_mini_rx = 0; in ace_init()
1176 memset(ap->info, 0, sizeof(struct ace_info)); in ace_init()
1177 memset(ap->skb, 0, sizeof(struct ace_skb)); in ace_init()
1183 ap->fw_running = 0; in ace_init()
1185 tmp_ptr = ap->info_dma; in ace_init()
1189 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); in ace_init()
1191 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); in ace_init()
1194 *(ap->evt_prd) = 0; in ace_init()
1196 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); in ace_init()
1209 tmp_ptr = ap->info_dma; in ace_init()
1213 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); in ace_init()
1218 memset(ap->rx_std_ring, 0, in ace_init()
1222 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; in ace_init()
1224 ap->rx_std_skbprd = 0; in ace_init()
1225 atomic_set(&ap->cur_rx_bufs, 0); in ace_init()
1228 (ap->rx_ring_base_dma + in ace_init()
1234 memset(ap->rx_jumbo_ring, 0, in ace_init()
1238 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; in ace_init()
1240 ap->rx_jumbo_skbprd = 0; in ace_init()
1241 atomic_set(&ap->cur_jumbo_bufs, 0); in ace_init()
1243 memset(ap->rx_mini_ring, 0, in ace_init()
1246 if (ap->version >= 2) { in ace_init()
1248 (ap->rx_ring_base_dma + in ace_init()
1257 ap->rx_mini_ring[i].flags = in ace_init()
1265 ap->rx_mini_skbprd = 0; in ace_init()
1266 atomic_set(&ap->cur_mini_bufs, 0); in ace_init()
1269 (ap->rx_ring_base_dma + in ace_init()
1277 memset(ap->rx_return_ring, 0, in ace_init()
1280 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); in ace_init()
1281 *(ap->rx_ret_prd) = 0; in ace_init()
1285 if (ACE_IS_TIGON_I(ap)) { in ace_init()
1286 ap->tx_ring = (__force struct tx_desc *) regs->Window; in ace_init()
1289 writel(0, (__force void __iomem *)ap->tx_ring + i * 4); in ace_init()
1293 memset(ap->tx_ring, 0, in ace_init()
1296 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); in ace_init()
1299 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); in ace_init()
1305 if (!ACE_IS_TIGON_I(ap)) in ace_init()
1312 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); in ace_init()
1343 ap->name, ACE_MAX_MOD_PARMS); in ace_init()
1369 if(ap->version >= 2) in ace_init()
1382 ap->name); in ace_init()
1395 "forcing auto negotiation\n", ap->name); in ace_init()
1403 "negotiation\n", ap->name); in ace_init()
1406 if ((option & 0x400) && (ap->version >= 2)) { in ace_init()
1408 ap->name); in ace_init()
1413 ap->link = tmp; in ace_init()
1415 if (ap->version >= 2) in ace_init()
1418 writel(ap->firmware_start, ®s->Pc); in ace_init()
1428 ap->cur_rx = 0; in ace_init()
1429 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; in ace_init()
1432 ace_set_txprd(regs, ap, 0); in ace_init()
1453 while (time_before(jiffies, myjif) && !ap->fw_running) in ace_init()
1456 if (!ap->fw_running) { in ace_init()
1457 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); in ace_init()
1459 ace_dump_trace(ap); in ace_init()
1472 if (ap->version >= 2) in ace_init()
1486 if (!test_and_set_bit(0, &ap->std_refill_busy)) in ace_init()
1490 ap->name); in ace_init()
1491 if (ap->version >= 2) { in ace_init()
1492 if (!test_and_set_bit(0, &ap->mini_refill_busy)) in ace_init()
1496 "the RX mini ring\n", ap->name); in ace_init()
1508 struct ace_private *ap = netdev_priv(dev); in ace_set_rxtx_parms() local
1509 struct ace_regs __iomem *regs = ap->regs; in ace_set_rxtx_parms()
1510 int board_idx = ap->board_idx; in ace_set_rxtx_parms()
1547 struct ace_private *ap = netdev_priv(dev); in ace_watchdog() local
1548 struct ace_regs __iomem *regs = ap->regs; in ace_watchdog()
1555 if (*ap->tx_csm != ap->tx_ret_csm) { in ace_watchdog()
1572 struct ace_private *ap = netdev_priv(dev); in ace_tasklet() local
1575 cur_size = atomic_read(&ap->cur_rx_bufs); in ace_tasklet()
1577 !test_and_set_bit(0, &ap->std_refill_busy)) { in ace_tasklet()
1584 if (ap->version >= 2) { in ace_tasklet()
1585 cur_size = atomic_read(&ap->cur_mini_bufs); in ace_tasklet()
1587 !test_and_set_bit(0, &ap->mini_refill_busy)) { in ace_tasklet()
1596 cur_size = atomic_read(&ap->cur_jumbo_bufs); in ace_tasklet()
1597 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && in ace_tasklet()
1598 !test_and_set_bit(0, &ap->jumbo_refill_busy)) { in ace_tasklet()
1604 ap->tasklet_pending = 0; in ace_tasklet()
1611 static void ace_dump_trace(struct ace_private *ap) in ace_dump_trace() argument
1614 if (!ap->trace_buf) in ace_dump_trace()
1615 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) in ace_dump_trace()
1630 struct ace_private *ap = netdev_priv(dev); in ace_load_std_rx_ring() local
1631 struct ace_regs __iomem *regs = ap->regs; in ace_load_std_rx_ring()
1635 prefetchw(&ap->cur_rx_bufs); in ace_load_std_rx_ring()
1637 idx = ap->rx_std_skbprd; in ace_load_std_rx_ring()
1648 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), in ace_load_std_rx_ring()
1652 ap->skb->rx_std_skbuff[idx].skb = skb; in ace_load_std_rx_ring()
1653 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], in ace_load_std_rx_ring()
1656 rd = &ap->rx_std_ring[idx]; in ace_load_std_rx_ring()
1666 atomic_add(i, &ap->cur_rx_bufs); in ace_load_std_rx_ring()
1667 ap->rx_std_skbprd = idx; in ace_load_std_rx_ring()
1669 if (ACE_IS_TIGON_I(ap)) { in ace_load_std_rx_ring()
1673 cmd.idx = ap->rx_std_skbprd; in ace_load_std_rx_ring()
1681 clear_bit(0, &ap->std_refill_busy); in ace_load_std_rx_ring()
1693 struct ace_private *ap = netdev_priv(dev); in ace_load_mini_rx_ring() local
1694 struct ace_regs __iomem *regs = ap->regs; in ace_load_mini_rx_ring()
1697 prefetchw(&ap->cur_mini_bufs); in ace_load_mini_rx_ring()
1699 idx = ap->rx_mini_skbprd; in ace_load_mini_rx_ring()
1709 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), in ace_load_mini_rx_ring()
1713 ap->skb->rx_mini_skbuff[idx].skb = skb; in ace_load_mini_rx_ring()
1714 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], in ace_load_mini_rx_ring()
1717 rd = &ap->rx_mini_ring[idx]; in ace_load_mini_rx_ring()
1727 atomic_add(i, &ap->cur_mini_bufs); in ace_load_mini_rx_ring()
1729 ap->rx_mini_skbprd = idx; in ace_load_mini_rx_ring()
1735 clear_bit(0, &ap->mini_refill_busy); in ace_load_mini_rx_ring()
1750 struct ace_private *ap = netdev_priv(dev); in ace_load_jumbo_rx_ring() local
1751 struct ace_regs __iomem *regs = ap->regs; in ace_load_jumbo_rx_ring()
1754 idx = ap->rx_jumbo_skbprd; in ace_load_jumbo_rx_ring()
1765 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), in ace_load_jumbo_rx_ring()
1769 ap->skb->rx_jumbo_skbuff[idx].skb = skb; in ace_load_jumbo_rx_ring()
1770 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], in ace_load_jumbo_rx_ring()
1773 rd = &ap->rx_jumbo_ring[idx]; in ace_load_jumbo_rx_ring()
1783 atomic_add(i, &ap->cur_jumbo_bufs); in ace_load_jumbo_rx_ring()
1784 ap->rx_jumbo_skbprd = idx; in ace_load_jumbo_rx_ring()
1786 if (ACE_IS_TIGON_I(ap)) { in ace_load_jumbo_rx_ring()
1790 cmd.idx = ap->rx_jumbo_skbprd; in ace_load_jumbo_rx_ring()
1798 clear_bit(0, &ap->jumbo_refill_busy); in ace_load_jumbo_rx_ring()
1815 struct ace_private *ap; in ace_handle_event() local
1817 ap = netdev_priv(dev); in ace_handle_event()
1820 switch (ap->evt_ring[evtcsm].evt) { in ace_handle_event()
1823 ap->name); in ace_handle_event()
1824 ap->fw_running = 1; in ace_handle_event()
1831 u16 code = ap->evt_ring[evtcsm].code; in ace_handle_event()
1835 u32 state = readl(&ap->regs->GigLnkState); in ace_handle_event()
1838 ap->name, in ace_handle_event()
1846 ap->name); in ace_handle_event()
1850 "UP\n", ap->name); in ace_handle_event()
1854 "state %02x\n", ap->name, code); in ace_handle_event()
1859 switch(ap->evt_ring[evtcsm].code) { in ace_handle_event()
1862 ap->name); in ace_handle_event()
1866 "error\n", ap->name); in ace_handle_event()
1870 ap->name); in ace_handle_event()
1874 ap->name, ap->evt_ring[evtcsm].code); in ace_handle_event()
1881 if (ap->skb->rx_jumbo_skbuff[i].skb) { in ace_handle_event()
1882 ap->rx_jumbo_ring[i].size = 0; in ace_handle_event()
1883 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); in ace_handle_event()
1884 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); in ace_handle_event()
1885 ap->skb->rx_jumbo_skbuff[i].skb = NULL; in ace_handle_event()
1889 if (ACE_IS_TIGON_I(ap)) { in ace_handle_event()
1894 ace_issue_cmd(ap->regs, &cmd); in ace_handle_event()
1896 writel(0, &((ap->regs)->RxJumboPrd)); in ace_handle_event()
1900 ap->jumbo = 0; in ace_handle_event()
1901 ap->rx_jumbo_skbprd = 0; in ace_handle_event()
1903 ap->name); in ace_handle_event()
1904 clear_bit(0, &ap->jumbo_refill_busy); in ace_handle_event()
1909 ap->name, ap->evt_ring[evtcsm].evt); in ace_handle_event()
1920 struct ace_private *ap = netdev_priv(dev); in ace_rx_int() local
1926 prefetchw(&ap->cur_rx_bufs); in ace_rx_int()
1927 prefetchw(&ap->cur_mini_bufs); in ace_rx_int()
1942 retdesc = &ap->rx_return_ring[idx]; in ace_rx_int()
1956 rip = &ap->skb->rx_std_skbuff[skbidx]; in ace_rx_int()
1958 rxdesc = &ap->rx_std_ring[skbidx]; in ace_rx_int()
1962 rip = &ap->skb->rx_jumbo_skbuff[skbidx]; in ace_rx_int()
1964 rxdesc = &ap->rx_jumbo_ring[skbidx]; in ace_rx_int()
1965 atomic_dec(&ap->cur_jumbo_bufs); in ace_rx_int()
1968 rip = &ap->skb->rx_mini_skbuff[skbidx]; in ace_rx_int()
1970 rxdesc = &ap->rx_mini_ring[skbidx]; in ace_rx_int()
1982 pci_unmap_page(ap->pdev, in ace_rx_int()
2017 atomic_sub(std_count, &ap->cur_rx_bufs); in ace_rx_int()
2018 if (!ACE_IS_TIGON_I(ap)) in ace_rx_int()
2019 atomic_sub(mini_count, &ap->cur_mini_bufs); in ace_rx_int()
2026 if (ACE_IS_TIGON_I(ap)) { in ace_rx_int()
2027 writel(idx, &ap->regs->RxRetCsm); in ace_rx_int()
2029 ap->cur_rx = idx; in ace_rx_int()
2041 struct ace_private *ap = netdev_priv(dev); in ace_tx_int() local
2047 info = ap->skb->tx_skbuff + idx; in ace_tx_int()
2051 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), in ace_tx_int()
2064 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); in ace_tx_int()
2071 ap->tx_ret_csm = txcsm; in ace_tx_int()
2106 struct ace_private *ap = netdev_priv(dev); in ace_interrupt() local
2107 struct ace_regs __iomem *regs = ap->regs; in ace_interrupt()
2138 rxretprd = *ap->rx_ret_prd; in ace_interrupt()
2139 rxretcsm = ap->cur_rx; in ace_interrupt()
2144 txcsm = *ap->tx_csm; in ace_interrupt()
2145 idx = ap->tx_ret_csm; in ace_interrupt()
2155 if (!tx_ring_full(ap, txcsm, ap->tx_prd)) in ace_interrupt()
2160 evtprd = *ap->evt_prd; in ace_interrupt()
2175 cur_size = atomic_read(&ap->cur_rx_bufs); in ace_interrupt()
2178 !test_and_set_bit(0, &ap->std_refill_busy)) { in ace_interrupt()
2188 if (!ACE_IS_TIGON_I(ap)) { in ace_interrupt()
2189 cur_size = atomic_read(&ap->cur_mini_bufs); in ace_interrupt()
2193 &ap->mini_refill_busy)) { in ace_interrupt()
2205 if (ap->jumbo) { in ace_interrupt()
2206 cur_size = atomic_read(&ap->cur_jumbo_bufs); in ace_interrupt()
2210 &ap->jumbo_refill_busy)){ in ace_interrupt()
2221 if (run_tasklet && !ap->tasklet_pending) { in ace_interrupt()
2222 ap->tasklet_pending = 1; in ace_interrupt()
2223 tasklet_schedule(&ap->ace_tasklet); in ace_interrupt()
2232 struct ace_private *ap = netdev_priv(dev); in ace_open() local
2233 struct ace_regs __iomem *regs = ap->regs; in ace_open()
2236 if (!(ap->fw_running)) { in ace_open()
2253 if (ap->jumbo && in ace_open()
2254 !test_and_set_bit(0, &ap->jumbo_refill_busy)) in ace_open()
2263 ap->promisc = 1; in ace_open()
2265 ap->promisc = 0; in ace_open()
2266 ap->mcast_all = 0; in ace_open()
2280 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); in ace_open()
2287 struct ace_private *ap = netdev_priv(dev); in ace_close() local
2288 struct ace_regs __iomem *regs = ap->regs; in ace_close()
2301 if (ap->promisc) { in ace_close()
2306 ap->promisc = 0; in ace_close()
2314 tasklet_kill(&ap->ace_tasklet); in ace_close()
2324 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { in ace_close()
2328 info = ap->skb->tx_skbuff + i; in ace_close()
2332 if (ACE_IS_TIGON_I(ap)) { in ace_close()
2335 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; in ace_close()
2340 memset(ap->tx_ring + i, 0, in ace_close()
2342 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), in ace_close()
2353 if (ap->jumbo) { in ace_close()
2368 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, in ace_map_tx_skb() argument
2374 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), in ace_map_tx_skb()
2378 info = ap->skb->tx_skbuff + idx; in ace_map_tx_skb()
2387 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, in ace_load_tx_bd() argument
2394 if (ACE_IS_TIGON_I(ap)) { in ace_load_tx_bd()
2412 struct ace_private *ap = netdev_priv(dev); in ace_start_xmit() local
2413 struct ace_regs __iomem *regs = ap->regs; in ace_start_xmit()
2419 idx = ap->tx_prd; in ace_start_xmit()
2421 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) in ace_start_xmit()
2428 mapping = ace_map_tx_skb(ap, skb, skb, idx); in ace_start_xmit()
2436 desc = ap->tx_ring + idx; in ace_start_xmit()
2437 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); in ace_start_xmit()
2440 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) in ace_start_xmit()
2443 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); in ace_start_xmit()
2449 mapping = ace_map_tx_skb(ap, skb, NULL, idx); in ace_start_xmit()
2458 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); in ace_start_xmit()
2460 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); in ace_start_xmit()
2467 info = ap->skb->tx_skbuff + idx; in ace_start_xmit()
2468 desc = ap->tx_ring + idx; in ace_start_xmit()
2470 mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0, in ace_start_xmit()
2477 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); in ace_start_xmit()
2481 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) in ace_start_xmit()
2494 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); in ace_start_xmit()
2499 ap->tx_prd = idx; in ace_start_xmit()
2500 ace_set_txprd(regs, ap, idx); in ace_start_xmit()
2511 if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) in ace_start_xmit()
2548 struct ace_private *ap = netdev_priv(dev); in ace_change_mtu() local
2549 struct ace_regs __iomem *regs = ap->regs; in ace_change_mtu()
2558 if (!(ap->jumbo)) { in ace_change_mtu()
2561 ap->jumbo = 1; in ace_change_mtu()
2562 if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) in ace_change_mtu()
2567 while (test_and_set_bit(0, &ap->jumbo_refill_busy)); in ace_change_mtu()
2570 if (ap->jumbo) { in ace_change_mtu()
2585 struct ace_private *ap = netdev_priv(dev); in ace_get_settings() local
2586 struct ace_regs __iomem *regs = ap->regs; in ace_get_settings()
2638 struct ace_private *ap = netdev_priv(dev); in ace_set_settings() local
2639 struct ace_regs __iomem *regs = ap->regs; in ace_set_settings()
2657 if (!ACE_IS_TIGON_I(ap)) in ace_set_settings()
2679 if (link != ap->link) { in ace_set_settings()
2684 ap->link = link; in ace_set_settings()
2686 if (!ACE_IS_TIGON_I(ap)) in ace_set_settings()
2701 struct ace_private *ap = netdev_priv(dev); in ace_get_drvinfo() local
2705 ap->firmware_major, ap->firmware_minor, in ace_get_drvinfo()
2706 ap->firmware_fix); in ace_get_drvinfo()
2708 if (ap->pdev) in ace_get_drvinfo()
2709 strlcpy(info->bus_info, pci_name(ap->pdev), in ace_get_drvinfo()
2719 struct ace_private *ap = netdev_priv(dev); in ace_set_mac_addr() local
2720 struct ace_regs __iomem *regs = ap->regs; in ace_set_mac_addr()
2747 struct ace_private *ap = netdev_priv(dev); in ace_set_multicast_list() local
2748 struct ace_regs __iomem *regs = ap->regs; in ace_set_multicast_list()
2751 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { in ace_set_multicast_list()
2756 ap->mcast_all = 1; in ace_set_multicast_list()
2757 } else if (ap->mcast_all) { in ace_set_multicast_list()
2762 ap->mcast_all = 0; in ace_set_multicast_list()
2765 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { in ace_set_multicast_list()
2770 ap->promisc = 1; in ace_set_multicast_list()
2771 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { in ace_set_multicast_list()
2776 ap->promisc = 0; in ace_set_multicast_list()
2785 if (!netdev_mc_empty(dev) && !ap->mcast_all) { in ace_set_multicast_list()
2790 }else if (!ap->mcast_all) { in ace_set_multicast_list()
2801 struct ace_private *ap = netdev_priv(dev); in ace_get_stats() local
2803 (struct ace_mac_stats __iomem *)ap->regs->Stats; in ace_get_stats()
2875 struct ace_private *ap = netdev_priv(dev); in ace_load_firmware() local
2876 struct ace_regs __iomem *regs = ap->regs; in ace_load_firmware()
2883 "CPU is running!\n", ap->name); in ace_load_firmware()
2887 if (ACE_IS_TIGON_I(ap)) in ace_load_firmware()
2890 ret = request_firmware(&fw, fw_name, &ap->pdev->dev); in ace_load_firmware()
2893 ap->name, fw_name); in ace_load_firmware()
2904 ap->firmware_major = fw->data[0]; in ace_load_firmware()
2905 ap->firmware_minor = fw->data[1]; in ace_load_firmware()
2906 ap->firmware_fix = fw->data[2]; in ace_load_firmware()
2908 ap->firmware_start = be32_to_cpu(fw_data[1]); in ace_load_firmware()
2909 if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) { in ace_load_firmware()
2911 ap->name, ap->firmware_start, fw_name); in ace_load_firmware()
2919 ap->name, load_addr, fw_name); in ace_load_firmware()
3082 struct ace_private *ap = netdev_priv(dev); in read_eeprom_byte() local
3083 struct ace_regs __iomem *regs = ap->regs; in read_eeprom_byte()
3100 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); in read_eeprom_byte()
3109 ap->name); in read_eeprom_byte()
3118 ap->name); in read_eeprom_byte()
3128 ap->name); in read_eeprom_byte()
3185 ap->name, offset); in read_eeprom_byte()