• Home
  • Raw
  • Download

Lines Matching +full:factory +full:- +full:programmed

2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2010 Exar Corp.
47 * Default is '2' - which means disable in promisc mode
48 * and enable in non-promiscuous mode.
60 #include <linux/dma-mapping.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
89 #include "s2io-regs.h"
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) && in RXD_IS_UP2DT()
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK)); in RXD_IS_UP2DT()
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state); in is_s2io_card_up()
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr); in do_s2io_copy_mac_addr()
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8); in do_s2io_copy_mac_addr()
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16); in do_s2io_copy_mac_addr()
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24); in do_s2io_copy_mac_addr()
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); in do_s2io_copy_mac_addr()
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); in do_s2io_copy_mac_addr()
353 * Constants to be programmed into the Xena's registers, to configure
452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
500 if (!sp->config.multiq) { in s2io_stop_all_tx_queue()
503 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_stop_all_tx_queue()
504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; in s2io_stop_all_tx_queue()
506 netif_tx_stop_all_queues(sp->dev); in s2io_stop_all_tx_queue()
511 if (!sp->config.multiq) in s2io_stop_tx_queue()
512 sp->mac_control.fifos[fifo_no].queue_state = in s2io_stop_tx_queue()
515 netif_tx_stop_all_queues(sp->dev); in s2io_stop_tx_queue()
520 if (!sp->config.multiq) { in s2io_start_all_tx_queue()
523 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_start_all_tx_queue()
524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; in s2io_start_all_tx_queue()
526 netif_tx_start_all_queues(sp->dev); in s2io_start_all_tx_queue()
531 if (!sp->config.multiq) { in s2io_wake_all_tx_queue()
534 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_wake_all_tx_queue()
535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; in s2io_wake_all_tx_queue()
537 netif_tx_wake_all_queues(sp->dev); in s2io_wake_all_tx_queue()
545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) in s2io_wake_tx_queue()
546 netif_wake_subqueue(fifo->dev, fifo->fifo_no); in s2io_wake_tx_queue()
547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { in s2io_wake_tx_queue()
548 if (netif_queue_stopped(fifo->dev)) { in s2io_wake_tx_queue()
549 fifo->queue_state = FIFO_QUEUE_START; in s2io_wake_tx_queue()
550 netif_wake_queue(fifo->dev); in s2io_wake_tx_queue()
556 * init_shared_mem - Allocation and Initialization of Memory
571 struct net_device *dev = nic->dev; in init_shared_mem()
574 struct config_param *config = &nic->config; in init_shared_mem()
575 struct mac_info *mac_control = &nic->mac_control; in init_shared_mem()
580 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
583 size += tx_cfg->fifo_len; in init_shared_mem()
589 return -EINVAL; in init_shared_mem()
593 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
596 size = tx_cfg->fifo_len; in init_shared_mem()
601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - " in init_shared_mem()
604 return -EINVAL; in init_shared_mem()
608 lst_size = (sizeof(struct TxD) * config->max_txds); in init_shared_mem()
611 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
612 struct fifo_info *fifo = &mac_control->fifos[i]; in init_shared_mem()
613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
614 int fifo_len = tx_cfg->fifo_len; in init_shared_mem()
617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL); in init_shared_mem()
618 if (!fifo->list_info) { in init_shared_mem()
620 return -ENOMEM; in init_shared_mem()
624 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, in init_shared_mem()
627 struct fifo_info *fifo = &mac_control->fifos[i]; in init_shared_mem()
628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
630 fifo->tx_curr_put_info.offset = 0; in init_shared_mem()
631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1; in init_shared_mem()
632 fifo->tx_curr_get_info.offset = 0; in init_shared_mem()
633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1; in init_shared_mem()
634 fifo->fifo_no = i; in init_shared_mem()
635 fifo->nic = nic; in init_shared_mem()
636 fifo->max_txds = MAX_SKB_FRAGS + 2; in init_shared_mem()
637 fifo->dev = dev; in init_shared_mem()
643 tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE, in init_shared_mem()
648 return -ENOMEM; in init_shared_mem()
656 mac_control->zerodma_virt_addr = tmp_v; in init_shared_mem()
660 dev->name, tmp_v); in init_shared_mem()
661 tmp_v = dma_alloc_coherent(&nic->pdev->dev, in init_shared_mem()
667 return -ENOMEM; in init_shared_mem()
673 if (l == tx_cfg->fifo_len) in init_shared_mem()
675 fifo->list_info[l].list_virt_addr = in init_shared_mem()
677 fifo->list_info[l].list_phy_addr = in init_shared_mem()
684 for (i = 0; i < config->tx_fifo_num; i++) { in init_shared_mem()
685 struct fifo_info *fifo = &mac_control->fifos[i]; in init_shared_mem()
686 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_shared_mem()
688 size = tx_cfg->fifo_len; in init_shared_mem()
689 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL); in init_shared_mem()
690 if (!fifo->ufo_in_band_v) in init_shared_mem()
691 return -ENOMEM; in init_shared_mem()
697 for (i = 0; i < config->rx_ring_num; i++) { in init_shared_mem()
698 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_shared_mem()
699 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem()
701 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) { in init_shared_mem()
704 dev->name, i); in init_shared_mem()
707 size += rx_cfg->num_rxd; in init_shared_mem()
708 ring->block_count = rx_cfg->num_rxd / in init_shared_mem()
709 (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
710 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; in init_shared_mem()
712 if (nic->rxd_mode == RXD_MODE_1) in init_shared_mem()
717 for (i = 0; i < config->rx_ring_num; i++) { in init_shared_mem()
718 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_shared_mem()
719 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem()
721 ring->rx_curr_get_info.block_index = 0; in init_shared_mem()
722 ring->rx_curr_get_info.offset = 0; in init_shared_mem()
723 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1; in init_shared_mem()
724 ring->rx_curr_put_info.block_index = 0; in init_shared_mem()
725 ring->rx_curr_put_info.offset = 0; in init_shared_mem()
726 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; in init_shared_mem()
727 ring->nic = nic; in init_shared_mem()
728 ring->ring_no = i; in init_shared_mem()
730 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
736 rx_blocks = &ring->rx_blocks[j]; in init_shared_mem()
738 tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size, in init_shared_mem()
747 rx_blocks->block_virt_addr = tmp_v_addr; in init_shared_mem()
748 return -ENOMEM; in init_shared_mem()
753 rxd_count[nic->rxd_mode]; in init_shared_mem()
754 rx_blocks->block_virt_addr = tmp_v_addr; in init_shared_mem()
755 rx_blocks->block_dma_addr = tmp_p_addr; in init_shared_mem()
756 rx_blocks->rxds = kmalloc(size, GFP_KERNEL); in init_shared_mem()
757 if (!rx_blocks->rxds) in init_shared_mem()
758 return -ENOMEM; in init_shared_mem()
760 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) { in init_shared_mem()
761 rx_blocks->rxds[l].virt_addr = in init_shared_mem()
762 rx_blocks->block_virt_addr + in init_shared_mem()
763 (rxd_size[nic->rxd_mode] * l); in init_shared_mem()
764 rx_blocks->rxds[l].dma_addr = in init_shared_mem()
765 rx_blocks->block_dma_addr + in init_shared_mem()
766 (rxd_size[nic->rxd_mode] * l); in init_shared_mem()
772 tmp_v_addr = ring->rx_blocks[j].block_virt_addr; in init_shared_mem()
773 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr; in init_shared_mem()
774 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; in init_shared_mem()
775 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; in init_shared_mem()
778 pre_rxd_blk->reserved_2_pNext_RxD_block = in init_shared_mem()
780 pre_rxd_blk->pNext_RxD_Blk_physical = in init_shared_mem()
784 if (nic->rxd_mode == RXD_MODE_3B) { in init_shared_mem()
789 for (i = 0; i < config->rx_ring_num; i++) { in init_shared_mem()
790 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_shared_mem()
791 struct ring_info *ring = &mac_control->rings[i]; in init_shared_mem()
793 blk_cnt = rx_cfg->num_rxd / in init_shared_mem()
794 (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
796 ring->ba = kmalloc(size, GFP_KERNEL); in init_shared_mem()
797 if (!ring->ba) in init_shared_mem()
798 return -ENOMEM; in init_shared_mem()
804 (rxd_count[nic->rxd_mode] + 1); in init_shared_mem()
805 ring->ba[j] = kmalloc(size, GFP_KERNEL); in init_shared_mem()
806 if (!ring->ba[j]) in init_shared_mem()
807 return -ENOMEM; in init_shared_mem()
809 while (k != rxd_count[nic->rxd_mode]) { in init_shared_mem()
810 ba = &ring->ba[j][k]; in init_shared_mem()
812 ba->ba_0_org = kmalloc(size, GFP_KERNEL); in init_shared_mem()
813 if (!ba->ba_0_org) in init_shared_mem()
814 return -ENOMEM; in init_shared_mem()
816 tmp = (unsigned long)ba->ba_0_org; in init_shared_mem()
819 ba->ba_0 = (void *)tmp; in init_shared_mem()
822 ba->ba_1_org = kmalloc(size, GFP_KERNEL); in init_shared_mem()
823 if (!ba->ba_1_org) in init_shared_mem()
824 return -ENOMEM; in init_shared_mem()
826 tmp = (unsigned long)ba->ba_1_org; in init_shared_mem()
829 ba->ba_1 = (void *)tmp; in init_shared_mem()
838 mac_control->stats_mem = in init_shared_mem()
839 dma_alloc_coherent(&nic->pdev->dev, size, in init_shared_mem()
840 &mac_control->stats_mem_phy, GFP_KERNEL); in init_shared_mem()
842 if (!mac_control->stats_mem) { in init_shared_mem()
848 return -ENOMEM; in init_shared_mem()
851 mac_control->stats_mem_sz = size; in init_shared_mem()
853 tmp_v_addr = mac_control->stats_mem; in init_shared_mem()
854 mac_control->stats_info = tmp_v_addr; in init_shared_mem()
857 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr); in init_shared_mem()
858 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; in init_shared_mem()
863 * free_shared_mem - Free the allocated Memory
885 dev = nic->dev; in free_shared_mem()
887 config = &nic->config; in free_shared_mem()
888 mac_control = &nic->mac_control; in free_shared_mem()
889 stats = mac_control->stats_info; in free_shared_mem()
890 swstats = &stats->sw_stat; in free_shared_mem()
892 lst_size = sizeof(struct TxD) * config->max_txds; in free_shared_mem()
895 for (i = 0; i < config->tx_fifo_num; i++) { in free_shared_mem()
896 struct fifo_info *fifo = &mac_control->fifos[i]; in free_shared_mem()
897 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in free_shared_mem()
899 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page); in free_shared_mem()
904 if (!fifo->list_info) in free_shared_mem()
907 fli = &fifo->list_info[mem_blks]; in free_shared_mem()
908 if (!fli->list_virt_addr) in free_shared_mem()
910 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, in free_shared_mem()
911 fli->list_virt_addr, in free_shared_mem()
912 fli->list_phy_addr); in free_shared_mem()
913 swstats->mem_freed += PAGE_SIZE; in free_shared_mem()
918 if (mac_control->zerodma_virt_addr) { in free_shared_mem()
919 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, in free_shared_mem()
920 mac_control->zerodma_virt_addr, in free_shared_mem()
925 dev->name, mac_control->zerodma_virt_addr); in free_shared_mem()
926 swstats->mem_freed += PAGE_SIZE; in free_shared_mem()
928 kfree(fifo->list_info); in free_shared_mem()
929 swstats->mem_freed += tx_cfg->fifo_len * in free_shared_mem()
934 for (i = 0; i < config->rx_ring_num; i++) { in free_shared_mem()
935 struct ring_info *ring = &mac_control->rings[i]; in free_shared_mem()
937 blk_cnt = ring->block_count; in free_shared_mem()
939 tmp_v_addr = ring->rx_blocks[j].block_virt_addr; in free_shared_mem()
940 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; in free_shared_mem()
943 dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr, in free_shared_mem()
945 swstats->mem_freed += size; in free_shared_mem()
946 kfree(ring->rx_blocks[j].rxds); in free_shared_mem()
947 swstats->mem_freed += sizeof(struct rxd_info) * in free_shared_mem()
948 rxd_count[nic->rxd_mode]; in free_shared_mem()
952 if (nic->rxd_mode == RXD_MODE_3B) { in free_shared_mem()
954 for (i = 0; i < config->rx_ring_num; i++) { in free_shared_mem()
955 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in free_shared_mem()
956 struct ring_info *ring = &mac_control->rings[i]; in free_shared_mem()
958 blk_cnt = rx_cfg->num_rxd / in free_shared_mem()
959 (rxd_count[nic->rxd_mode] + 1); in free_shared_mem()
962 if (!ring->ba[j]) in free_shared_mem()
964 while (k != rxd_count[nic->rxd_mode]) { in free_shared_mem()
965 struct buffAdd *ba = &ring->ba[j][k]; in free_shared_mem()
966 kfree(ba->ba_0_org); in free_shared_mem()
967 swstats->mem_freed += in free_shared_mem()
969 kfree(ba->ba_1_org); in free_shared_mem()
970 swstats->mem_freed += in free_shared_mem()
974 kfree(ring->ba[j]); in free_shared_mem()
975 swstats->mem_freed += sizeof(struct buffAdd) * in free_shared_mem()
976 (rxd_count[nic->rxd_mode] + 1); in free_shared_mem()
978 kfree(ring->ba); in free_shared_mem()
979 swstats->mem_freed += sizeof(struct buffAdd *) * in free_shared_mem()
984 for (i = 0; i < nic->config.tx_fifo_num; i++) { in free_shared_mem()
985 struct fifo_info *fifo = &mac_control->fifos[i]; in free_shared_mem()
986 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in free_shared_mem()
988 if (fifo->ufo_in_band_v) { in free_shared_mem()
989 swstats->mem_freed += tx_cfg->fifo_len * in free_shared_mem()
991 kfree(fifo->ufo_in_band_v); in free_shared_mem()
995 if (mac_control->stats_mem) { in free_shared_mem()
996 swstats->mem_freed += mac_control->stats_mem_sz; in free_shared_mem()
997 dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz, in free_shared_mem()
998 mac_control->stats_mem, in free_shared_mem()
999 mac_control->stats_mem_phy); in free_shared_mem()
1004 * s2io_verify_pci_mode -
1009 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_verify_pci_mode()
1013 val64 = readq(&bar0->pci_mode); in s2io_verify_pci_mode()
1017 return -1; /* Unknown PCI mode */ in s2io_verify_pci_mode()
1027 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { in s2io_on_nec_bridge()
1028 if (tdev->bus == s2io_pdev->bus->parent) { in s2io_on_nec_bridge()
1039 * s2io_print_pci_mode -
1043 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_print_pci_mode()
1046 struct config_param *config = &nic->config; in s2io_print_pci_mode()
1049 val64 = readq(&bar0->pci_mode); in s2io_print_pci_mode()
1053 return -1; /* Unknown PCI mode */ in s2io_print_pci_mode()
1055 config->bus_speed = bus_speed[mode]; in s2io_print_pci_mode()
1057 if (s2io_on_nec_bridge(nic->pdev)) { in s2io_print_pci_mode()
1058 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", in s2io_print_pci_mode()
1059 nic->dev->name); in s2io_print_pci_mode()
1090 mode = -1; in s2io_print_pci_mode()
1094 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode); in s2io_print_pci_mode()
1100 * init_tti - Initialization transmit traffic interrupt scheme
1106 * '-1' on failure
1111 struct XENA_dev_config __iomem *bar0 = nic->bar0; in init_tti()
1114 struct config_param *config = &nic->config; in init_tti()
1116 for (i = 0; i < config->tx_fifo_num; i++) { in init_tti()
1122 if (nic->device_type == XFRAME_II_DEVICE) { in init_tti()
1123 int count = (nic->config.bus_speed * 125)/2; in init_tti()
1135 writeq(val64, &bar0->tti_data1_mem); in init_tti()
1137 if (nic->config.intr_type == MSI_X) { in init_tti()
1143 if ((nic->config.tx_steering_type == in init_tti()
1145 (config->tx_fifo_num > 1) && in init_tti()
1146 (i >= nic->udp_fifo_idx) && in init_tti()
1147 (i < (nic->udp_fifo_idx + in init_tti()
1148 nic->total_udp_fifos))) in init_tti()
1160 writeq(val64, &bar0->tti_data2_mem); in init_tti()
1165 writeq(val64, &bar0->tti_command_mem); in init_tti()
1167 if (wait_for_cmd_complete(&bar0->tti_command_mem, in init_tti()
1177 * init_nic - Initialization of hardware
1182 * '-1' on failure (endian settings incorrect).
1187 struct XENA_dev_config __iomem *bar0 = nic->bar0; in init_nic()
1188 struct net_device *dev = nic->dev; in init_nic()
1196 struct config_param *config = &nic->config; in init_nic()
1197 struct mac_info *mac_control = &nic->mac_control; in init_nic()
1202 return -EIO; in init_nic()
1208 if (nic->device_type & XFRAME_II_DEVICE) { in init_nic()
1210 writeq(val64, &bar0->sw_reset); in init_nic()
1212 val64 = readq(&bar0->sw_reset); in init_nic()
1217 writeq(val64, &bar0->sw_reset); in init_nic()
1219 val64 = readq(&bar0->sw_reset); in init_nic()
1224 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1226 val64 = readq(&bar0->adapter_status); in init_nic()
1232 return -ENODEV; in init_nic()
1236 add = &bar0->mac_cfg; in init_nic()
1237 val64 = readq(&bar0->mac_cfg); in init_nic()
1239 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1241 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1245 val64 = readq(&bar0->mac_int_mask); in init_nic()
1246 val64 = readq(&bar0->mc_int_mask); in init_nic()
1247 val64 = readq(&bar0->xgxs_int_mask); in init_nic()
1250 val64 = dev->mtu; in init_nic()
1251 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); in init_nic()
1253 if (nic->device_type & XFRAME_II_DEVICE) { in init_nic()
1256 &bar0->dtx_control, UF); in init_nic()
1264 &bar0->dtx_control, UF); in init_nic()
1265 val64 = readq(&bar0->dtx_control); in init_nic()
1272 writeq(val64, &bar0->tx_fifo_partition_0); in init_nic()
1273 writeq(val64, &bar0->tx_fifo_partition_1); in init_nic()
1274 writeq(val64, &bar0->tx_fifo_partition_2); in init_nic()
1275 writeq(val64, &bar0->tx_fifo_partition_3); in init_nic()
1277 for (i = 0, j = 0; i < config->tx_fifo_num; i++) { in init_nic()
1278 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in init_nic()
1280 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) | in init_nic()
1281 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3); in init_nic()
1283 if (i == (config->tx_fifo_num - 1)) { in init_nic()
1290 writeq(val64, &bar0->tx_fifo_partition_0); in init_nic()
1295 writeq(val64, &bar0->tx_fifo_partition_1); in init_nic()
1300 writeq(val64, &bar0->tx_fifo_partition_2); in init_nic()
1305 writeq(val64, &bar0->tx_fifo_partition_3); in init_nic()
1317 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. in init_nic()
1319 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4)) in init_nic()
1320 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); in init_nic()
1322 val64 = readq(&bar0->tx_fifo_partition_0); in init_nic()
1324 &bar0->tx_fifo_partition_0, (unsigned long long)val64); in init_nic()
1330 val64 = readq(&bar0->tx_pa_cfg); in init_nic()
1335 writeq(val64, &bar0->tx_pa_cfg); in init_nic()
1339 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1340 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in init_nic()
1342 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3); in init_nic()
1344 writeq(val64, &bar0->rx_queue_priority); in init_nic()
1351 if (nic->device_type & XFRAME_II_DEVICE) in init_nic()
1356 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1359 mem_share = (mem_size / config->rx_ring_num + in init_nic()
1360 mem_size % config->rx_ring_num); in init_nic()
1364 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1368 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1372 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1376 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1380 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1384 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1388 mem_share = (mem_size / config->rx_ring_num); in init_nic()
1393 writeq(val64, &bar0->rx_queue_cfg); in init_nic()
1399 switch (config->tx_fifo_num) { in init_nic()
1402 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1403 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1404 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1405 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1406 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1410 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1411 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1412 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1413 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1415 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1419 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1421 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1423 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1425 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1427 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1431 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1432 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1433 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1434 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1436 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1440 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1442 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1444 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1446 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1448 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1452 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1454 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1456 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1458 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1460 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1464 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1466 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1468 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1470 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1472 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1476 writeq(val64, &bar0->tx_w_round_robin_0); in init_nic()
1477 writeq(val64, &bar0->tx_w_round_robin_1); in init_nic()
1478 writeq(val64, &bar0->tx_w_round_robin_2); in init_nic()
1479 writeq(val64, &bar0->tx_w_round_robin_3); in init_nic()
1481 writeq(val64, &bar0->tx_w_round_robin_4); in init_nic()
1486 val64 = readq(&bar0->tx_fifo_partition_0); in init_nic()
1488 writeq(val64, &bar0->tx_fifo_partition_0); in init_nic()
1494 switch (config->rx_ring_num) { in init_nic()
1497 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1498 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1499 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1500 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1501 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1504 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1508 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1509 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1510 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1511 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1513 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1516 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1520 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1522 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1524 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1526 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1528 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1531 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1535 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1536 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1537 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1538 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1540 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1543 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1547 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1549 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1551 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1553 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1555 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1558 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1562 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1564 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1566 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1568 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1570 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1573 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1577 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1579 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1581 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1583 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1585 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1588 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1592 writeq(val64, &bar0->rx_w_round_robin_0); in init_nic()
1593 writeq(val64, &bar0->rx_w_round_robin_1); in init_nic()
1594 writeq(val64, &bar0->rx_w_round_robin_2); in init_nic()
1595 writeq(val64, &bar0->rx_w_round_robin_3); in init_nic()
1597 writeq(val64, &bar0->rx_w_round_robin_4); in init_nic()
1600 writeq(val64, &bar0->rts_qos_steering); in init_nic()
1607 writeq(val64, &bar0->rts_frm_len_n[i]); in init_nic()
1610 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22); in init_nic()
1611 for (i = 0 ; i < config->rx_ring_num ; i++) in init_nic()
1612 writeq(val64, &bar0->rts_frm_len_n[i]); in init_nic()
1617 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1626 &bar0->rts_frm_len_n[i]); in init_nic()
1635 dev->name, i); in init_nic()
1636 return -ENODEV; in init_nic()
1641 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); in init_nic()
1643 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1645 writeq(val64, &bar0->stat_byte_cnt); in init_nic()
1654 writeq(val64, &bar0->mac_link_util); in init_nic()
1662 if (SUCCESS != init_tti(nic, nic->last_link_state)) in init_nic()
1663 return -ENODEV; in init_nic()
1666 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1668 * Programmed to generate Apprx 500 Intrs per in init_nic()
1671 int count = (nic->config.bus_speed * 125)/4; in init_nic()
1680 writeq(val64, &bar0->rti_data1_mem); in init_nic()
1684 if (nic->config.intr_type == MSI_X) in init_nic()
1690 writeq(val64, &bar0->rti_data2_mem); in init_nic()
1692 for (i = 0; i < config->rx_ring_num; i++) { in init_nic()
1696 writeq(val64, &bar0->rti_command_mem); in init_nic()
1707 val64 = readq(&bar0->rti_command_mem); in init_nic()
1713 dev->name); in init_nic()
1714 return -ENODEV; in init_nic()
1725 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); in init_nic()
1726 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); in init_nic()
1729 add = &bar0->mac_cfg; in init_nic()
1730 val64 = readq(&bar0->mac_cfg); in init_nic()
1732 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1734 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1736 val64 = readq(&bar0->mac_cfg); in init_nic()
1739 add = &bar0->mac_cfg; in init_nic()
1740 val64 = readq(&bar0->mac_cfg); in init_nic()
1742 if (nic->device_type == XFRAME_II_DEVICE) in init_nic()
1743 writeq(val64, &bar0->mac_cfg); in init_nic()
1745 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in init_nic()
1755 val64 = readq(&bar0->rmac_pause_cfg); in init_nic()
1757 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); in init_nic()
1758 writeq(val64, &bar0->rmac_pause_cfg); in init_nic()
1769 nic->mac_control.mc_pause_threshold_q0q3) in init_nic()
1772 writeq(val64, &bar0->mc_pause_thresh_q0q3); in init_nic()
1777 nic->mac_control.mc_pause_threshold_q4q7) in init_nic()
1780 writeq(val64, &bar0->mc_pause_thresh_q4q7); in init_nic()
1786 val64 = readq(&bar0->pic_control); in init_nic()
1788 writeq(val64, &bar0->pic_control); in init_nic()
1790 if (nic->config.bus_speed == 266) { in init_nic()
1791 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout); in init_nic()
1792 writeq(0x0, &bar0->read_retry_delay); in init_nic()
1793 writeq(0x0, &bar0->write_retry_delay); in init_nic()
1800 if (nic->device_type == XFRAME_II_DEVICE) { in init_nic()
1803 writeq(val64, &bar0->misc_control); in init_nic()
1804 val64 = readq(&bar0->pic_control2); in init_nic()
1806 writeq(val64, &bar0->pic_control2); in init_nic()
1808 if (strstr(nic->product_name, "CX4")) { in init_nic()
1810 writeq(val64, &bar0->tmac_avg_ipg); in init_nic()
1820 if (nic->device_type == XFRAME_II_DEVICE) in s2io_link_fault_indication()
1827 * do_s2io_write_bits - update alarm bits in alarm register
1850 struct XENA_dev_config __iomem *bar0 = nic->bar0; in en_dis_err_alarms()
1854 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); in en_dis_err_alarms()
1861 TXDMA_SM_INT, flag, &bar0->txdma_int_mask); in en_dis_err_alarms()
1866 &bar0->pfc_err_mask); in en_dis_err_alarms()
1870 TDA_PCIX_ERR, flag, &bar0->tda_err_mask); in en_dis_err_alarms()
1878 flag, &bar0->pcc_err_mask); in en_dis_err_alarms()
1881 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); in en_dis_err_alarms()
1886 flag, &bar0->lso_err_mask); in en_dis_err_alarms()
1889 flag, &bar0->tpa_err_mask); in en_dis_err_alarms()
1891 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); in en_dis_err_alarms()
1897 &bar0->mac_int_mask); in en_dis_err_alarms()
1901 flag, &bar0->mac_tmac_err_mask); in en_dis_err_alarms()
1907 &bar0->xgxs_int_mask); in en_dis_err_alarms()
1910 flag, &bar0->xgxs_txgxs_err_mask); in en_dis_err_alarms()
1917 flag, &bar0->rxdma_int_mask); in en_dis_err_alarms()
1921 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); in en_dis_err_alarms()
1925 &bar0->prc_pcix_err_mask); in en_dis_err_alarms()
1928 &bar0->rpa_err_mask); in en_dis_err_alarms()
1934 flag, &bar0->rda_err_mask); in en_dis_err_alarms()
1937 flag, &bar0->rti_err_mask); in en_dis_err_alarms()
1943 &bar0->mac_int_mask); in en_dis_err_alarms()
1950 flag, &bar0->mac_rmac_err_mask); in en_dis_err_alarms()
1956 &bar0->xgxs_int_mask); in en_dis_err_alarms()
1958 &bar0->xgxs_rxgxs_err_mask); in en_dis_err_alarms()
1964 flag, &bar0->mc_int_mask); in en_dis_err_alarms()
1967 &bar0->mc_err_mask); in en_dis_err_alarms()
1969 nic->general_int_mask = gen_int_mask; in en_dis_err_alarms()
1972 nic->general_int_mask = 0; in en_dis_err_alarms()
1976 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1988 struct XENA_dev_config __iomem *bar0 = nic->bar0; in en_dis_able_nic_intrs()
1991 intr_mask = nic->general_int_mask; in en_dis_able_nic_intrs()
2008 &bar0->pic_int_mask); in en_dis_able_nic_intrs()
2010 &bar0->gpio_int_mask); in en_dis_able_nic_intrs()
2012 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); in en_dis_able_nic_intrs()
2018 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); in en_dis_able_nic_intrs()
2030 writeq(0x0, &bar0->tx_traffic_mask); in en_dis_able_nic_intrs()
2036 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); in en_dis_able_nic_intrs()
2045 writeq(0x0, &bar0->rx_traffic_mask); in en_dis_able_nic_intrs()
2051 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); in en_dis_able_nic_intrs()
2055 temp64 = readq(&bar0->general_int_mask); in en_dis_able_nic_intrs()
2060 writeq(temp64, &bar0->general_int_mask); in en_dis_able_nic_intrs()
2062 nic->general_int_mask = readq(&bar0->general_int_mask); in en_dis_able_nic_intrs()
2066 * verify_pcc_quiescent- Checks for PCC quiescent state
2076 struct XENA_dev_config __iomem *bar0 = sp->bar0; in verify_pcc_quiescent()
2077 u64 val64 = readq(&bar0->adapter_status); in verify_pcc_quiescent()
2079 herc = (sp->device_type == XFRAME_II_DEVICE); in verify_pcc_quiescent()
2082 if ((!herc && (sp->pdev->revision >= 4)) || herc) { in verify_pcc_quiescent()
2090 if ((!herc && (sp->pdev->revision >= 4)) || herc) { in verify_pcc_quiescent()
2104 * verify_xena_quiescence - Checks whether the H/W is ready
2118 struct XENA_dev_config __iomem *bar0 = sp->bar0; in verify_xena_quiescence()
2119 u64 val64 = readq(&bar0->adapter_status); in verify_xena_quiescence()
2161 sp->device_type == XFRAME_II_DEVICE && in verify_xena_quiescence()
2175 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2184 struct XENA_dev_config __iomem *bar0 = sp->bar0; in fix_mac_address()
2188 writeq(fix_mac[i++], &bar0->gpio_control); in fix_mac_address()
2190 (void) readq(&bar0->gpio_control); in fix_mac_address()
2195 * start_nic - Turns the device on
2204 * SUCCESS on success and -1 on failure.
2209 struct XENA_dev_config __iomem *bar0 = nic->bar0; in start_nic()
2210 struct net_device *dev = nic->dev; in start_nic()
2213 struct config_param *config = &nic->config; in start_nic()
2214 struct mac_info *mac_control = &nic->mac_control; in start_nic()
2217 for (i = 0; i < config->rx_ring_num; i++) { in start_nic()
2218 struct ring_info *ring = &mac_control->rings[i]; in start_nic()
2220 writeq((u64)ring->rx_blocks[0].block_dma_addr, in start_nic()
2221 &bar0->prc_rxd0_n[i]); in start_nic()
2223 val64 = readq(&bar0->prc_ctrl_n[i]); in start_nic()
2224 if (nic->rxd_mode == RXD_MODE_1) in start_nic()
2228 if (nic->device_type == XFRAME_II_DEVICE) in start_nic()
2232 writeq(val64, &bar0->prc_ctrl_n[i]); in start_nic()
2235 if (nic->rxd_mode == RXD_MODE_3B) { in start_nic()
2237 val64 = readq(&bar0->rx_pa_cfg); in start_nic()
2239 writeq(val64, &bar0->rx_pa_cfg); in start_nic()
2243 val64 = readq(&bar0->rx_pa_cfg); in start_nic()
2245 writeq(val64, &bar0->rx_pa_cfg); in start_nic()
2246 nic->vlan_strip_flag = 0; in start_nic()
2250 * Enabling MC-RLDRAM. After enabling the device, we timeout in start_nic()
2254 val64 = readq(&bar0->mc_rldram_mrs); in start_nic()
2256 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); in start_nic()
2257 val64 = readq(&bar0->mc_rldram_mrs); in start_nic()
2262 val64 = readq(&bar0->adapter_control); in start_nic()
2264 writeq(val64, &bar0->adapter_control); in start_nic()
2270 val64 = readq(&bar0->adapter_status); in start_nic()
2274 dev->name, (unsigned long long)val64); in start_nic()
2287 val64 = readq(&bar0->adapter_control); in start_nic()
2289 writeq(val64, &bar0->adapter_control); in start_nic()
2296 schedule_work(&nic->set_link_task); in start_nic()
2298 /* SXE-002: Initialize link and activity LED */ in start_nic()
2299 subid = nic->pdev->subsystem_device; in start_nic()
2301 (nic->device_type == XFRAME_I_DEVICE)) { in start_nic()
2302 val64 = readq(&bar0->gpio_control); in start_nic()
2304 writeq(val64, &bar0->gpio_control); in start_nic()
2312 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2320 struct s2io_nic *nic = fifo_data->nic; in s2io_txdl_getskb()
2326 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { in s2io_txdl_getskb()
2327 dma_unmap_single(&nic->pdev->dev, in s2io_txdl_getskb()
2328 (dma_addr_t)txds->Buffer_Pointer, in s2io_txdl_getskb()
2333 skb = (struct sk_buff *)((unsigned long)txds->Host_Control); in s2io_txdl_getskb()
2335 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); in s2io_txdl_getskb()
2338 dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, in s2io_txdl_getskb()
2340 frg_cnt = skb_shinfo(skb)->nr_frags; in s2io_txdl_getskb()
2344 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; in s2io_txdl_getskb()
2345 if (!txds->Buffer_Pointer) in s2io_txdl_getskb()
2347 dma_unmap_page(&nic->pdev->dev, in s2io_txdl_getskb()
2348 (dma_addr_t)txds->Buffer_Pointer, in s2io_txdl_getskb()
2352 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); in s2io_txdl_getskb()
2357 * free_tx_buffers - Free all queued Tx buffers
2366 struct net_device *dev = nic->dev; in free_tx_buffers()
2371 struct config_param *config = &nic->config; in free_tx_buffers()
2372 struct mac_info *mac_control = &nic->mac_control; in free_tx_buffers()
2373 struct stat_block *stats = mac_control->stats_info; in free_tx_buffers()
2374 struct swStat *swstats = &stats->sw_stat; in free_tx_buffers()
2376 for (i = 0; i < config->tx_fifo_num; i++) { in free_tx_buffers()
2377 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in free_tx_buffers()
2378 struct fifo_info *fifo = &mac_control->fifos[i]; in free_tx_buffers()
2381 spin_lock_irqsave(&fifo->tx_lock, flags); in free_tx_buffers()
2382 for (j = 0; j < tx_cfg->fifo_len; j++) { in free_tx_buffers()
2383 txdp = fifo->list_info[j].list_virt_addr; in free_tx_buffers()
2384 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); in free_tx_buffers()
2386 swstats->mem_freed += skb->truesize; in free_tx_buffers()
2393 dev->name, cnt, i); in free_tx_buffers()
2394 fifo->tx_curr_get_info.offset = 0; in free_tx_buffers()
2395 fifo->tx_curr_put_info.offset = 0; in free_tx_buffers()
2396 spin_unlock_irqrestore(&fifo->tx_lock, flags); in free_tx_buffers()
2401 * stop_nic - To stop the nic
2412 struct XENA_dev_config __iomem *bar0 = nic->bar0; in stop_nic()
2423 val64 = readq(&bar0->adapter_control); in stop_nic()
2425 writeq(val64, &bar0->adapter_control); in stop_nic()
2429 * fill_rx_buffers - Allocates the Rx side skbs
2449 * SUCCESS on success or an appropriate -ve value on failure.
2465 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat; in fill_rx_buffers()
2467 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; in fill_rx_buffers()
2469 block_no1 = ring->rx_curr_get_info.block_index; in fill_rx_buffers()
2471 block_no = ring->rx_curr_put_info.block_index; in fill_rx_buffers()
2473 off = ring->rx_curr_put_info.offset; in fill_rx_buffers()
2475 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; in fill_rx_buffers()
2478 (off == ring->rx_curr_get_info.offset) && in fill_rx_buffers()
2479 (rxdp->Host_Control)) { in fill_rx_buffers()
2481 ring->dev->name); in fill_rx_buffers()
2484 if (off && (off == ring->rxd_count)) { in fill_rx_buffers()
2485 ring->rx_curr_put_info.block_index++; in fill_rx_buffers()
2486 if (ring->rx_curr_put_info.block_index == in fill_rx_buffers()
2487 ring->block_count) in fill_rx_buffers()
2488 ring->rx_curr_put_info.block_index = 0; in fill_rx_buffers()
2489 block_no = ring->rx_curr_put_info.block_index; in fill_rx_buffers()
2491 ring->rx_curr_put_info.offset = off; in fill_rx_buffers()
2492 rxdp = ring->rx_blocks[block_no].block_virt_addr; in fill_rx_buffers()
2494 ring->dev->name, rxdp); in fill_rx_buffers()
2498 if ((rxdp->Control_1 & RXD_OWN_XENA) && in fill_rx_buffers()
2499 ((ring->rxd_mode == RXD_MODE_3B) && in fill_rx_buffers()
2500 (rxdp->Control_2 & s2BIT(0)))) { in fill_rx_buffers()
2501 ring->rx_curr_put_info.offset = off; in fill_rx_buffers()
2505 size = ring->mtu + in fill_rx_buffers()
2508 if (ring->rxd_mode == RXD_MODE_1) in fill_rx_buffers()
2511 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; in fill_rx_buffers()
2514 skb = netdev_alloc_skb(nic->dev, size); in fill_rx_buffers()
2517 ring->dev->name); in fill_rx_buffers()
2520 first_rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2522 swstats->mem_alloc_fail_cnt++; in fill_rx_buffers()
2524 return -ENOMEM ; in fill_rx_buffers()
2526 swstats->mem_allocated += skb->truesize; in fill_rx_buffers()
2528 if (ring->rxd_mode == RXD_MODE_1) { in fill_rx_buffers()
2529 /* 1 buffer mode - normal operation mode */ in fill_rx_buffers()
2533 rxdp1->Buffer0_ptr = in fill_rx_buffers()
2534 dma_map_single(&ring->pdev->dev, skb->data, in fill_rx_buffers()
2535 size - NET_IP_ALIGN, in fill_rx_buffers()
2537 if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr)) in fill_rx_buffers()
2540 rxdp->Control_2 = in fill_rx_buffers()
2541 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); in fill_rx_buffers()
2542 rxdp->Host_Control = (unsigned long)skb; in fill_rx_buffers()
2543 } else if (ring->rxd_mode == RXD_MODE_3B) { in fill_rx_buffers()
2545 * 2 buffer mode - in fill_rx_buffers()
2552 Buffer0_ptr = rxdp3->Buffer0_ptr; in fill_rx_buffers()
2553 Buffer1_ptr = rxdp3->Buffer1_ptr; in fill_rx_buffers()
2556 rxdp3->Buffer0_ptr = Buffer0_ptr; in fill_rx_buffers()
2557 rxdp3->Buffer1_ptr = Buffer1_ptr; in fill_rx_buffers()
2559 ba = &ring->ba[block_no][off]; in fill_rx_buffers()
2561 tmp = (u64)(unsigned long)skb->data; in fill_rx_buffers()
2564 skb->data = (void *) (unsigned long)tmp; in fill_rx_buffers()
2568 rxdp3->Buffer0_ptr = in fill_rx_buffers()
2569 dma_map_single(&ring->pdev->dev, in fill_rx_buffers()
2570 ba->ba_0, BUF0_LEN, in fill_rx_buffers()
2572 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr)) in fill_rx_buffers()
2575 dma_sync_single_for_device(&ring->pdev->dev, in fill_rx_buffers()
2576 (dma_addr_t)rxdp3->Buffer0_ptr, in fill_rx_buffers()
2580 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); in fill_rx_buffers()
2581 if (ring->rxd_mode == RXD_MODE_3B) { in fill_rx_buffers()
2588 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev, in fill_rx_buffers()
2589 skb->data, in fill_rx_buffers()
2590 ring->mtu + 4, in fill_rx_buffers()
2593 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr)) in fill_rx_buffers()
2597 rxdp3->Buffer1_ptr = in fill_rx_buffers()
2598 dma_map_single(&ring->pdev->dev, in fill_rx_buffers()
2599 ba->ba_1, in fill_rx_buffers()
2603 if (dma_mapping_error(&nic->pdev->dev, in fill_rx_buffers()
2604 rxdp3->Buffer1_ptr)) { in fill_rx_buffers()
2605 dma_unmap_single(&ring->pdev->dev, in fill_rx_buffers()
2607 skb->data, in fill_rx_buffers()
2608 ring->mtu + 4, in fill_rx_buffers()
2613 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); in fill_rx_buffers()
2614 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 in fill_rx_buffers()
2615 (ring->mtu + 4); in fill_rx_buffers()
2617 rxdp->Control_2 |= s2BIT(0); in fill_rx_buffers()
2618 rxdp->Host_Control = (unsigned long) (skb); in fill_rx_buffers()
2620 if (alloc_tab & ((1 << rxsync_frequency) - 1)) in fill_rx_buffers()
2621 rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2623 if (off == (ring->rxd_count + 1)) in fill_rx_buffers()
2625 ring->rx_curr_put_info.offset = off; in fill_rx_buffers()
2627 rxdp->Control_2 |= SET_RXD_MARKER; in fill_rx_buffers()
2628 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { in fill_rx_buffers()
2631 first_rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2635 ring->rx_bufs_left += 1; in fill_rx_buffers()
2646 first_rxdp->Control_1 |= RXD_OWN_XENA; in fill_rx_buffers()
2652 swstats->pci_map_fail_cnt++; in fill_rx_buffers()
2653 swstats->mem_freed += skb->truesize; in fill_rx_buffers()
2655 return -ENOMEM; in fill_rx_buffers()
2660 struct net_device *dev = sp->dev; in free_rxd_blk()
2666 struct mac_info *mac_control = &sp->mac_control; in free_rxd_blk()
2667 struct stat_block *stats = mac_control->stats_info; in free_rxd_blk()
2668 struct swStat *swstats = &stats->sw_stat; in free_rxd_blk()
2670 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { in free_rxd_blk()
2671 rxdp = mac_control->rings[ring_no]. in free_rxd_blk()
2673 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); in free_rxd_blk()
2676 if (sp->rxd_mode == RXD_MODE_1) { in free_rxd_blk()
2678 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2679 (dma_addr_t)rxdp1->Buffer0_ptr, in free_rxd_blk()
2680 dev->mtu + in free_rxd_blk()
2685 } else if (sp->rxd_mode == RXD_MODE_3B) { in free_rxd_blk()
2687 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2688 (dma_addr_t)rxdp3->Buffer0_ptr, in free_rxd_blk()
2690 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2691 (dma_addr_t)rxdp3->Buffer1_ptr, in free_rxd_blk()
2693 dma_unmap_single(&sp->pdev->dev, in free_rxd_blk()
2694 (dma_addr_t)rxdp3->Buffer2_ptr, in free_rxd_blk()
2695 dev->mtu + 4, DMA_FROM_DEVICE); in free_rxd_blk()
2698 swstats->mem_freed += skb->truesize; in free_rxd_blk()
2700 mac_control->rings[ring_no].rx_bufs_left -= 1; in free_rxd_blk()
2705 * free_rx_buffers - Frees all Rx buffers
2715 struct net_device *dev = sp->dev; in free_rx_buffers()
2717 struct config_param *config = &sp->config; in free_rx_buffers()
2718 struct mac_info *mac_control = &sp->mac_control; in free_rx_buffers()
2720 for (i = 0; i < config->rx_ring_num; i++) { in free_rx_buffers()
2721 struct ring_info *ring = &mac_control->rings[i]; in free_rx_buffers()
2726 ring->rx_curr_put_info.block_index = 0; in free_rx_buffers()
2727 ring->rx_curr_get_info.block_index = 0; in free_rx_buffers()
2728 ring->rx_curr_put_info.offset = 0; in free_rx_buffers()
2729 ring->rx_curr_get_info.offset = 0; in free_rx_buffers()
2730 ring->rx_bufs_left = 0; in free_rx_buffers()
2732 dev->name, buf_cnt, i); in free_rx_buffers()
2738 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { in s2io_chk_rx_buffers()
2740 ring->dev->name); in s2io_chk_rx_buffers()
2746 * s2io_poll - Rx interrupt handler for NAPI support
2761 struct net_device *dev = ring->dev; in s2io_poll_msix()
2766 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_poll_msix()
2777 /*Re Enable MSI-Rx Vector*/ in s2io_poll_msix()
2778 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; in s2io_poll_msix()
2779 addr += 7 - ring->ring_no; in s2io_poll_msix()
2780 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; in s2io_poll_msix()
2792 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_poll_inta()
2794 struct config_param *config = &nic->config; in s2io_poll_inta()
2795 struct mac_info *mac_control = &nic->mac_control; in s2io_poll_inta()
2800 for (i = 0; i < config->rx_ring_num; i++) { in s2io_poll_inta()
2801 struct ring_info *ring = &mac_control->rings[i]; in s2io_poll_inta()
2805 budget -= ring_pkts_processed; in s2io_poll_inta()
2812 writeq(0, &bar0->rx_traffic_mask); in s2io_poll_inta()
2813 readl(&bar0->rx_traffic_mask); in s2io_poll_inta()
2820 * s2io_netpoll - netpoll event handler entry point
2825 * specific in-kernel networking tasks, such as remote consoles and kernel
2831 const int irq = nic->pdev->irq; in s2io_netpoll()
2832 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_netpoll()
2835 struct config_param *config = &nic->config; in s2io_netpoll()
2836 struct mac_info *mac_control = &nic->mac_control; in s2io_netpoll()
2838 if (pci_channel_offline(nic->pdev)) in s2io_netpoll()
2843 writeq(val64, &bar0->rx_traffic_int); in s2io_netpoll()
2844 writeq(val64, &bar0->tx_traffic_int); in s2io_netpoll()
2850 for (i = 0; i < config->tx_fifo_num; i++) in s2io_netpoll()
2851 tx_intr_handler(&mac_control->fifos[i]); in s2io_netpoll()
2854 for (i = 0; i < config->rx_ring_num; i++) { in s2io_netpoll()
2855 struct ring_info *ring = &mac_control->rings[i]; in s2io_netpoll()
2860 for (i = 0; i < config->rx_ring_num; i++) { in s2io_netpoll()
2861 struct ring_info *ring = &mac_control->rings[i]; in s2io_netpoll()
2863 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { in s2io_netpoll()
2866 dev->name); in s2io_netpoll()
2875 * rx_intr_handler - Rx interrupt handler
2880 * receive ring contains fresh as yet un-processed frames,this function is
2901 get_info = ring_data->rx_curr_get_info; in rx_intr_handler()
2903 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); in rx_intr_handler()
2905 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; in rx_intr_handler()
2915 ring_data->dev->name); in rx_intr_handler()
2918 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); in rx_intr_handler()
2921 ring_data->dev->name); in rx_intr_handler()
2924 if (ring_data->rxd_mode == RXD_MODE_1) { in rx_intr_handler()
2926 dma_unmap_single(&ring_data->pdev->dev, in rx_intr_handler()
2927 (dma_addr_t)rxdp1->Buffer0_ptr, in rx_intr_handler()
2928 ring_data->mtu + in rx_intr_handler()
2933 } else if (ring_data->rxd_mode == RXD_MODE_3B) { in rx_intr_handler()
2935 dma_sync_single_for_cpu(&ring_data->pdev->dev, in rx_intr_handler()
2936 (dma_addr_t)rxdp3->Buffer0_ptr, in rx_intr_handler()
2938 dma_unmap_single(&ring_data->pdev->dev, in rx_intr_handler()
2939 (dma_addr_t)rxdp3->Buffer2_ptr, in rx_intr_handler()
2940 ring_data->mtu + 4, DMA_FROM_DEVICE); in rx_intr_handler()
2942 prefetch(skb->data); in rx_intr_handler()
2945 ring_data->rx_curr_get_info.offset = get_info.offset; in rx_intr_handler()
2946 rxdp = ring_data->rx_blocks[get_block]. in rx_intr_handler()
2948 if (get_info.offset == rxd_count[ring_data->rxd_mode]) { in rx_intr_handler()
2950 ring_data->rx_curr_get_info.offset = get_info.offset; in rx_intr_handler()
2952 if (get_block == ring_data->block_count) in rx_intr_handler()
2954 ring_data->rx_curr_get_info.block_index = get_block; in rx_intr_handler()
2955 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; in rx_intr_handler()
2958 if (ring_data->nic->config.napi) { in rx_intr_handler()
2959 budget--; in rx_intr_handler()
2968 if (ring_data->lro) { in rx_intr_handler()
2971 struct lro *lro = &ring_data->lro0_n[i]; in rx_intr_handler()
2972 if (lro->in_use) { in rx_intr_handler()
2973 update_L3L4_header(ring_data->nic, lro); in rx_intr_handler()
2974 queue_rx_frame(lro->parent, lro->vlan_tag); in rx_intr_handler()
2983 * tx_intr_handler - Transmit interrupt handler
2996 struct s2io_nic *nic = fifo_data->nic; in tx_intr_handler()
3003 struct stat_block *stats = nic->mac_control.stats_info; in tx_intr_handler()
3004 struct swStat *swstats = &stats->sw_stat; in tx_intr_handler()
3006 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags)) in tx_intr_handler()
3009 get_info = fifo_data->tx_curr_get_info; in tx_intr_handler()
3010 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); in tx_intr_handler()
3011 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; in tx_intr_handler()
3012 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && in tx_intr_handler()
3014 (txdlp->Host_Control)) { in tx_intr_handler()
3016 if (txdlp->Control_1 & TXD_T_CODE) { in tx_intr_handler()
3018 err = txdlp->Control_1 & TXD_T_CODE; in tx_intr_handler()
3020 swstats->parity_err_cnt++; in tx_intr_handler()
3027 swstats->tx_buf_abort_cnt++; in tx_intr_handler()
3031 swstats->tx_desc_abort_cnt++; in tx_intr_handler()
3035 swstats->tx_parity_err_cnt++; in tx_intr_handler()
3039 swstats->tx_link_loss_cnt++; in tx_intr_handler()
3043 swstats->tx_list_proc_err_cnt++; in tx_intr_handler()
3050 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); in tx_intr_handler()
3058 swstats->mem_freed += skb->truesize; in tx_intr_handler()
3064 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; in tx_intr_handler()
3065 fifo_data->tx_curr_get_info.offset = get_info.offset; in tx_intr_handler()
3068 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); in tx_intr_handler()
3070 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); in tx_intr_handler()
3074 * s2io_mdio_write - Function to write in to MDIO registers
3088 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_mdio_write()
3094 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3096 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3105 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3107 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3114 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3116 writeq(val64, &bar0->mdio_control); in s2io_mdio_write()
3121 * s2io_mdio_read - Function to write in to MDIO registers
3134 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_mdio_read()
3140 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3142 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3150 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3152 writeq(val64, &bar0->mdio_control); in s2io_mdio_read()
3156 rval64 = readq(&bar0->mdio_control); in s2io_mdio_read()
3163 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3206 "Excessive laser output power may saturate far-end receiver.\n"); in s2io_chk_xpak_counter()
3223 * s2io_updt_xpak_counter - Function to update the xpak counters
3238 struct stat_block *stats = sp->mac_control.stats_info; in s2io_updt_xpak_counter()
3239 struct xpakStat *xstats = &stats->xpak_stat; in s2io_updt_xpak_counter()
3247 "ERR: MDIO slave access failed - Returned %llx\n", in s2io_updt_xpak_counter()
3254 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - " in s2io_updt_xpak_counter()
3255 "Returned: %llx- Expected: 0x%x\n", in s2io_updt_xpak_counter()
3272 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high, in s2io_updt_xpak_counter()
3273 &xstats->xpak_regs_stat, in s2io_updt_xpak_counter()
3277 xstats->alarm_transceiver_temp_low++; in s2io_updt_xpak_counter()
3281 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high, in s2io_updt_xpak_counter()
3282 &xstats->xpak_regs_stat, in s2io_updt_xpak_counter()
3286 xstats->alarm_laser_bias_current_low++; in s2io_updt_xpak_counter()
3290 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high, in s2io_updt_xpak_counter()
3291 &xstats->xpak_regs_stat, in s2io_updt_xpak_counter()
3295 xstats->alarm_laser_output_power_low++; in s2io_updt_xpak_counter()
3303 xstats->warn_transceiver_temp_high++; in s2io_updt_xpak_counter()
3306 xstats->warn_transceiver_temp_low++; in s2io_updt_xpak_counter()
3309 xstats->warn_laser_bias_current_high++; in s2io_updt_xpak_counter()
3312 xstats->warn_laser_bias_current_low++; in s2io_updt_xpak_counter()
3315 xstats->warn_laser_output_power_high++; in s2io_updt_xpak_counter()
3318 xstats->warn_laser_output_power_low++; in s2io_updt_xpak_counter()
3322 * wait_for_cmd_complete - waits for a command to complete.
3367 * check_pci_device_id - Checks if the device id is supported
3387 * s2io_reset - Resets the card.
3398 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_reset()
3409 __func__, pci_name(sp->pdev)); in s2io_reset()
3411 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ in s2io_reset()
3412 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); in s2io_reset()
3415 writeq(val64, &bar0->sw_reset); in s2io_reset()
3416 if (strstr(sp->product_name, "CX4")) in s2io_reset()
3422 pci_restore_state(sp->pdev); in s2io_reset()
3423 pci_save_state(sp->pdev); in s2io_reset()
3424 pci_read_config_word(sp->pdev, 0x2, &val16); in s2io_reset()
3433 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); in s2io_reset()
3446 /* Clear certain PCI/PCI-X fields after reset */ in s2io_reset()
3447 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_reset()
3449 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); in s2io_reset()
3452 pci_write_config_dword(sp->pdev, 0x68, 0x7C); in s2io_reset()
3455 writeq(s2BIT(62), &bar0->txpic_int_reg); in s2io_reset()
3459 memset(&sp->stats, 0, sizeof(struct net_device_stats)); in s2io_reset()
3461 stats = sp->mac_control.stats_info; in s2io_reset()
3462 swstats = &stats->sw_stat; in s2io_reset()
3465 up_cnt = swstats->link_up_cnt; in s2io_reset()
3466 down_cnt = swstats->link_down_cnt; in s2io_reset()
3467 up_time = swstats->link_up_time; in s2io_reset()
3468 down_time = swstats->link_down_time; in s2io_reset()
3469 reset_cnt = swstats->soft_reset_cnt; in s2io_reset()
3470 mem_alloc_cnt = swstats->mem_allocated; in s2io_reset()
3471 mem_free_cnt = swstats->mem_freed; in s2io_reset()
3472 watchdog_cnt = swstats->watchdog_timer_cnt; in s2io_reset()
3477 swstats->link_up_cnt = up_cnt; in s2io_reset()
3478 swstats->link_down_cnt = down_cnt; in s2io_reset()
3479 swstats->link_up_time = up_time; in s2io_reset()
3480 swstats->link_down_time = down_time; in s2io_reset()
3481 swstats->soft_reset_cnt = reset_cnt; in s2io_reset()
3482 swstats->mem_allocated = mem_alloc_cnt; in s2io_reset()
3483 swstats->mem_freed = mem_free_cnt; in s2io_reset()
3484 swstats->watchdog_timer_cnt = watchdog_cnt; in s2io_reset()
3486 /* SXE-002: Configure link and activity LED to turn it off */ in s2io_reset()
3487 subid = sp->pdev->subsystem_device; in s2io_reset()
3489 (sp->device_type == XFRAME_I_DEVICE)) { in s2io_reset()
3490 val64 = readq(&bar0->gpio_control); in s2io_reset()
3492 writeq(val64, &bar0->gpio_control); in s2io_reset()
3501 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_reset()
3502 val64 = readq(&bar0->pcc_err_reg); in s2io_reset()
3503 writeq(val64, &bar0->pcc_err_reg); in s2io_reset()
3506 sp->device_enabled_once = false; in s2io_reset()
3510 * s2io_set_swapper - to set the swapper controle on the card
3521 struct net_device *dev = sp->dev; in s2io_set_swapper()
3522 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_set_swapper()
3527 * the PIF Feed-back register. in s2io_set_swapper()
3530 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_set_swapper()
3541 writeq(value[i], &bar0->swapper_ctrl); in s2io_set_swapper()
3542 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_set_swapper()
3550 dev->name, (unsigned long long)val64); in s2io_set_swapper()
3555 valr = readq(&bar0->swapper_ctrl); in s2io_set_swapper()
3559 writeq(valt, &bar0->xmsi_address); in s2io_set_swapper()
3560 val64 = readq(&bar0->xmsi_address); in s2io_set_swapper()
3572 writeq((value[i] | valr), &bar0->swapper_ctrl); in s2io_set_swapper()
3573 writeq(valt, &bar0->xmsi_address); in s2io_set_swapper()
3574 val64 = readq(&bar0->xmsi_address); in s2io_set_swapper()
3586 val64 = readq(&bar0->swapper_ctrl); in s2io_set_swapper()
3605 if (sp->config.intr_type == INTA) in s2io_set_swapper()
3607 writeq(val64, &bar0->swapper_ctrl); in s2io_set_swapper()
3629 if (sp->config.intr_type == INTA) in s2io_set_swapper()
3631 writeq(val64, &bar0->swapper_ctrl); in s2io_set_swapper()
3633 val64 = readq(&bar0->swapper_ctrl); in s2io_set_swapper()
3639 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_set_swapper()
3644 dev->name, (unsigned long long)val64); in s2io_set_swapper()
3653 struct XENA_dev_config __iomem *bar0 = nic->bar0; in wait_for_msix_trans()
3658 val64 = readq(&bar0->xmsi_access); in wait_for_msix_trans()
3674 struct XENA_dev_config __iomem *bar0 = nic->bar0; in restore_xmsi_data()
3678 if (nic->device_type == XFRAME_I_DEVICE) in restore_xmsi_data()
3682 msix_index = (i) ? ((i-1) * 8 + 1) : 0; in restore_xmsi_data()
3683 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); in restore_xmsi_data()
3684 writeq(nic->msix_info[i].data, &bar0->xmsi_data); in restore_xmsi_data()
3686 writeq(val64, &bar0->xmsi_access); in restore_xmsi_data()
3695 struct XENA_dev_config __iomem *bar0 = nic->bar0; in store_xmsi_data()
3699 if (nic->device_type == XFRAME_I_DEVICE) in store_xmsi_data()
3704 msix_index = (i) ? ((i-1) * 8 + 1) : 0; in store_xmsi_data()
3706 writeq(val64, &bar0->xmsi_access); in store_xmsi_data()
3712 addr = readq(&bar0->xmsi_address); in store_xmsi_data()
3713 data = readq(&bar0->xmsi_data); in store_xmsi_data()
3715 nic->msix_info[i].addr = addr; in store_xmsi_data()
3716 nic->msix_info[i].data = data; in store_xmsi_data()
3723 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_enable_msi_x()
3728 struct stat_block *stats = nic->mac_control.stats_info; in s2io_enable_msi_x()
3729 struct swStat *swstats = &stats->sw_stat; in s2io_enable_msi_x()
3731 size = nic->num_entries * sizeof(struct msix_entry); in s2io_enable_msi_x()
3732 nic->entries = kzalloc(size, GFP_KERNEL); in s2io_enable_msi_x()
3733 if (!nic->entries) { in s2io_enable_msi_x()
3736 swstats->mem_alloc_fail_cnt++; in s2io_enable_msi_x()
3737 return -ENOMEM; in s2io_enable_msi_x()
3739 swstats->mem_allocated += size; in s2io_enable_msi_x()
3741 size = nic->num_entries * sizeof(struct s2io_msix_entry); in s2io_enable_msi_x()
3742 nic->s2io_entries = kzalloc(size, GFP_KERNEL); in s2io_enable_msi_x()
3743 if (!nic->s2io_entries) { in s2io_enable_msi_x()
3746 swstats->mem_alloc_fail_cnt++; in s2io_enable_msi_x()
3747 kfree(nic->entries); in s2io_enable_msi_x()
3748 swstats->mem_freed in s2io_enable_msi_x()
3749 += (nic->num_entries * sizeof(struct msix_entry)); in s2io_enable_msi_x()
3750 return -ENOMEM; in s2io_enable_msi_x()
3752 swstats->mem_allocated += size; in s2io_enable_msi_x()
3754 nic->entries[0].entry = 0; in s2io_enable_msi_x()
3755 nic->s2io_entries[0].entry = 0; in s2io_enable_msi_x()
3756 nic->s2io_entries[0].in_use = MSIX_FLG; in s2io_enable_msi_x()
3757 nic->s2io_entries[0].type = MSIX_ALARM_TYPE; in s2io_enable_msi_x()
3758 nic->s2io_entries[0].arg = &nic->mac_control.fifos; in s2io_enable_msi_x()
3760 for (i = 1; i < nic->num_entries; i++) { in s2io_enable_msi_x()
3761 nic->entries[i].entry = ((i - 1) * 8) + 1; in s2io_enable_msi_x()
3762 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; in s2io_enable_msi_x()
3763 nic->s2io_entries[i].arg = NULL; in s2io_enable_msi_x()
3764 nic->s2io_entries[i].in_use = 0; in s2io_enable_msi_x()
3767 rx_mat = readq(&bar0->rx_mat); in s2io_enable_msi_x()
3768 for (j = 0; j < nic->config.rx_ring_num; j++) { in s2io_enable_msi_x()
3770 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; in s2io_enable_msi_x()
3771 nic->s2io_entries[j+1].type = MSIX_RING_TYPE; in s2io_enable_msi_x()
3772 nic->s2io_entries[j+1].in_use = MSIX_FLG; in s2io_enable_msi_x()
3775 writeq(rx_mat, &bar0->rx_mat); in s2io_enable_msi_x()
3776 readq(&bar0->rx_mat); in s2io_enable_msi_x()
3778 ret = pci_enable_msix_range(nic->pdev, nic->entries, in s2io_enable_msi_x()
3779 nic->num_entries, nic->num_entries); in s2io_enable_msi_x()
3782 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); in s2io_enable_msi_x()
3783 kfree(nic->entries); in s2io_enable_msi_x()
3784 swstats->mem_freed += nic->num_entries * in s2io_enable_msi_x()
3786 kfree(nic->s2io_entries); in s2io_enable_msi_x()
3787 swstats->mem_freed += nic->num_entries * in s2io_enable_msi_x()
3789 nic->entries = NULL; in s2io_enable_msi_x()
3790 nic->s2io_entries = NULL; in s2io_enable_msi_x()
3791 return -ENOMEM; in s2io_enable_msi_x()
3795 * To enable MSI-X, MSI also needs to be enabled, due to a bug in s2io_enable_msi_x()
3798 pci_read_config_word(nic->pdev, 0x42, &msi_control); in s2io_enable_msi_x()
3800 pci_write_config_word(nic->pdev, 0x42, msi_control); in s2io_enable_msi_x()
3810 sp->msi_detected = 1; in s2io_test_intr()
3811 wake_up(&sp->msi_wait); in s2io_test_intr()
3819 struct pci_dev *pdev = sp->pdev; in s2io_test_msi()
3820 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_test_msi()
3824 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, in s2io_test_msi()
3825 sp->name, sp); in s2io_test_msi()
3828 sp->dev->name, pci_name(pdev), pdev->irq); in s2io_test_msi()
3832 init_waitqueue_head(&sp->msi_wait); in s2io_test_msi()
3833 sp->msi_detected = 0; in s2io_test_msi()
3835 saved64 = val64 = readq(&bar0->scheduled_int_ctrl); in s2io_test_msi()
3839 writeq(val64, &bar0->scheduled_int_ctrl); in s2io_test_msi()
3841 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10); in s2io_test_msi()
3843 if (!sp->msi_detected) { in s2io_test_msi()
3847 sp->dev->name, pci_name(pdev)); in s2io_test_msi()
3849 err = -EOPNOTSUPP; in s2io_test_msi()
3852 free_irq(sp->entries[1].vector, sp); in s2io_test_msi()
3854 writeq(saved64, &bar0->scheduled_int_ctrl); in s2io_test_msi()
3864 for (i = 0; i < sp->num_entries; i++) { in remove_msix_isr()
3865 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) { in remove_msix_isr()
3866 int vector = sp->entries[i].vector; in remove_msix_isr()
3867 void *arg = sp->s2io_entries[i].arg; in remove_msix_isr()
3872 kfree(sp->entries); in remove_msix_isr()
3873 kfree(sp->s2io_entries); in remove_msix_isr()
3874 sp->entries = NULL; in remove_msix_isr()
3875 sp->s2io_entries = NULL; in remove_msix_isr()
3877 pci_read_config_word(sp->pdev, 0x42, &msi_control); in remove_msix_isr()
3879 pci_write_config_word(sp->pdev, 0x42, msi_control); in remove_msix_isr()
3881 pci_disable_msix(sp->pdev); in remove_msix_isr()
3886 free_irq(sp->pdev->irq, sp->dev); in remove_inta_isr()
3894 * s2io_open - open entry point of the driver
3901 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3908 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_open()
3916 sp->last_link_state = 0; in s2io_open()
3922 dev->name); in s2io_open()
3926 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) { in s2io_open()
3929 err = -ENODEV; in s2io_open()
3936 if (sp->config.intr_type == MSI_X) { in s2io_open()
3937 if (sp->entries) { in s2io_open()
3938 kfree(sp->entries); in s2io_open()
3939 swstats->mem_freed += sp->num_entries * in s2io_open()
3942 if (sp->s2io_entries) { in s2io_open()
3943 kfree(sp->s2io_entries); in s2io_open()
3944 swstats->mem_freed += sp->num_entries * in s2io_open()
3952 * s2io_close -close entry point of the driver
3960 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3967 struct config_param *config = &sp->config; in s2io_close()
3979 for (offset = 1; offset < config->max_mc_addr; offset++) { in s2io_close()
3991 * s2io_xmit - Tx entry point of te driver
4015 struct config_param *config = &sp->config; in s2io_xmit()
4016 struct mac_info *mac_control = &sp->mac_control; in s2io_xmit()
4017 struct stat_block *stats = mac_control->stats_info; in s2io_xmit()
4018 struct swStat *swstats = &stats->sw_stat; in s2io_xmit()
4020 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name); in s2io_xmit()
4022 if (unlikely(skb->len <= 0)) { in s2io_xmit()
4023 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name); in s2io_xmit()
4030 dev->name); in s2io_xmit()
4038 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { in s2io_xmit()
4039 if (skb->protocol == htons(ETH_P_IP)) { in s2io_xmit()
4046 ip->ihl*4); in s2io_xmit()
4048 if (ip->protocol == IPPROTO_TCP) { in s2io_xmit()
4049 queue_len = sp->total_tcp_fifos; in s2io_xmit()
4050 queue = (ntohs(th->source) + in s2io_xmit()
4051 ntohs(th->dest)) & in s2io_xmit()
4052 sp->fifo_selector[queue_len - 1]; in s2io_xmit()
4054 queue = queue_len - 1; in s2io_xmit()
4055 } else if (ip->protocol == IPPROTO_UDP) { in s2io_xmit()
4056 queue_len = sp->total_udp_fifos; in s2io_xmit()
4057 queue = (ntohs(th->source) + in s2io_xmit()
4058 ntohs(th->dest)) & in s2io_xmit()
4059 sp->fifo_selector[queue_len - 1]; in s2io_xmit()
4061 queue = queue_len - 1; in s2io_xmit()
4062 queue += sp->udp_fifo_idx; in s2io_xmit()
4063 if (skb->len > 1024) in s2io_xmit()
4068 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) in s2io_xmit()
4069 /* get fifo number based on skb->priority value */ in s2io_xmit()
4070 queue = config->fifo_mapping in s2io_xmit()
4071 [skb->priority & (MAX_TX_FIFOS - 1)]; in s2io_xmit()
4072 fifo = &mac_control->fifos[queue]; in s2io_xmit()
4074 spin_lock_irqsave(&fifo->tx_lock, flags); in s2io_xmit()
4076 if (sp->config.multiq) { in s2io_xmit()
4077 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { in s2io_xmit()
4078 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4081 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { in s2io_xmit()
4083 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4088 put_off = (u16)fifo->tx_curr_put_info.offset; in s2io_xmit()
4089 get_off = (u16)fifo->tx_curr_get_info.offset; in s2io_xmit()
4090 txdp = fifo->list_info[put_off].list_virt_addr; in s2io_xmit()
4092 queue_len = fifo->tx_curr_put_info.fifo_len + 1; in s2io_xmit()
4094 if (txdp->Host_Control || in s2io_xmit()
4097 s2io_stop_tx_queue(sp, fifo->fifo_no); in s2io_xmit()
4099 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4105 txdp->Control_1 |= TXD_TCP_LSO_EN; in s2io_xmit()
4106 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); in s2io_xmit()
4108 if (skb->ip_summed == CHECKSUM_PARTIAL) { in s2io_xmit()
4109 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN | in s2io_xmit()
4113 txdp->Control_1 |= TXD_GATHER_CODE_FIRST; in s2io_xmit()
4114 txdp->Control_1 |= TXD_LIST_OWN_XENA; in s2io_xmit()
4115 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); in s2io_xmit()
4118 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST; in s2io_xmit()
4120 txdp->Control_2 |= TXD_VLAN_ENABLE; in s2io_xmit()
4121 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); in s2io_xmit()
4125 txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data, in s2io_xmit()
4127 if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer)) in s2io_xmit()
4130 txdp->Host_Control = (unsigned long)skb; in s2io_xmit()
4131 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); in s2io_xmit()
4133 frg_cnt = skb_shinfo(skb)->nr_frags; in s2io_xmit()
4136 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in s2io_xmit()
4141 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev, in s2io_xmit()
4145 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); in s2io_xmit()
4147 txdp->Control_1 |= TXD_GATHER_CODE_LAST; in s2io_xmit()
4149 tx_fifo = mac_control->tx_FIFO_start[queue]; in s2io_xmit()
4150 val64 = fifo->list_info[put_off].list_phy_addr; in s2io_xmit()
4151 writeq(val64, &tx_fifo->TxDL_Pointer); in s2io_xmit()
4158 writeq(val64, &tx_fifo->List_Control); in s2io_xmit()
4161 if (put_off == fifo->tx_curr_put_info.fifo_len + 1) in s2io_xmit()
4163 fifo->tx_curr_put_info.offset = put_off; in s2io_xmit()
4167 swstats->fifo_full_cnt++; in s2io_xmit()
4171 s2io_stop_tx_queue(sp, fifo->fifo_no); in s2io_xmit()
4173 swstats->mem_allocated += skb->truesize; in s2io_xmit()
4174 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4176 if (sp->config.intr_type == MSI_X) in s2io_xmit()
4182 swstats->pci_map_fail_cnt++; in s2io_xmit()
4183 s2io_stop_tx_queue(sp, fifo->fifo_no); in s2io_xmit()
4184 swstats->mem_freed += skb->truesize; in s2io_xmit()
4186 spin_unlock_irqrestore(&fifo->tx_lock, flags); in s2io_xmit()
4194 struct net_device *dev = sp->dev; in s2io_alarm_handle()
4197 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); in s2io_alarm_handle()
4203 struct s2io_nic *sp = ring->nic; in s2io_msix_ring_handle()
4204 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_msix_ring_handle()
4209 if (sp->config.napi) { in s2io_msix_ring_handle()
4213 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; in s2io_msix_ring_handle()
4214 addr += (7 - ring->ring_no); in s2io_msix_ring_handle()
4215 val8 = (ring->ring_no == 0) ? 0x7f : 0xff; in s2io_msix_ring_handle()
4218 napi_schedule(&ring->napi); in s2io_msix_ring_handle()
4231 struct s2io_nic *sp = fifos->nic; in s2io_msix_fifo_handle()
4232 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_msix_fifo_handle()
4233 struct config_param *config = &sp->config; in s2io_msix_fifo_handle()
4239 reason = readq(&bar0->general_int_status); in s2io_msix_fifo_handle()
4245 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); in s2io_msix_fifo_handle()
4251 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); in s2io_msix_fifo_handle()
4253 for (i = 0; i < config->tx_fifo_num; i++) in s2io_msix_fifo_handle()
4256 writeq(sp->general_int_mask, &bar0->general_int_mask); in s2io_msix_fifo_handle()
4257 readl(&bar0->general_int_status); in s2io_msix_fifo_handle()
4266 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_txpic_intr_handle()
4269 val64 = readq(&bar0->pic_int_status); in s2io_txpic_intr_handle()
4271 val64 = readq(&bar0->gpio_int_reg); in s2io_txpic_intr_handle()
4276 * interrupt and adapter to re-evaluate the link state. in s2io_txpic_intr_handle()
4280 writeq(val64, &bar0->gpio_int_reg); in s2io_txpic_intr_handle()
4281 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4284 writeq(val64, &bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4286 val64 = readq(&bar0->adapter_status); in s2io_txpic_intr_handle()
4288 val64 = readq(&bar0->adapter_control); in s2io_txpic_intr_handle()
4290 writeq(val64, &bar0->adapter_control); in s2io_txpic_intr_handle()
4292 writeq(val64, &bar0->adapter_control); in s2io_txpic_intr_handle()
4293 if (!sp->device_enabled_once) in s2io_txpic_intr_handle()
4294 sp->device_enabled_once = 1; in s2io_txpic_intr_handle()
4298 * unmask link down interrupt and mask link-up in s2io_txpic_intr_handle()
4301 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4304 writeq(val64, &bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4307 val64 = readq(&bar0->adapter_status); in s2io_txpic_intr_handle()
4310 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4313 writeq(val64, &bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4316 val64 = readq(&bar0->adapter_control); in s2io_txpic_intr_handle()
4318 writeq(val64, &bar0->adapter_control); in s2io_txpic_intr_handle()
4321 val64 = readq(&bar0->gpio_int_mask); in s2io_txpic_intr_handle()
4325 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4331 * 1 - if alarm bit set
4332 * 0 - if alarm bit is not set
4349 * s2io_handle_errors - Xframe error indication handler
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_handle_errors()
4364 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; in s2io_handle_errors()
4365 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat; in s2io_handle_errors()
4370 if (pci_channel_offline(sp->pdev)) in s2io_handle_errors()
4373 memset(&sw_stat->ring_full_cnt, 0, in s2io_handle_errors()
4374 sizeof(sw_stat->ring_full_cnt)); in s2io_handle_errors()
4377 if (stats->xpak_timer_count < 72000) { in s2io_handle_errors()
4379 stats->xpak_timer_count++; in s2io_handle_errors()
4383 stats->xpak_timer_count = 0; in s2io_handle_errors()
4388 val64 = readq(&bar0->mac_rmac_err_reg); in s2io_handle_errors()
4389 writeq(val64, &bar0->mac_rmac_err_reg); in s2io_handle_errors()
4391 schedule_work(&sp->set_link_task); in s2io_handle_errors()
4395 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, in s2io_handle_errors()
4396 &sw_stat->serious_err_cnt)) in s2io_handle_errors()
4400 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, in s2io_handle_errors()
4401 &sw_stat->parity_err_cnt)) in s2io_handle_errors()
4405 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_handle_errors()
4406 val64 = readq(&bar0->ring_bump_counter1); in s2io_handle_errors()
4409 temp64 >>= 64 - ((i+1)*16); in s2io_handle_errors()
4410 sw_stat->ring_full_cnt[i] += temp64; in s2io_handle_errors()
4413 val64 = readq(&bar0->ring_bump_counter2); in s2io_handle_errors()
4416 temp64 >>= 64 - ((i+1)*16); in s2io_handle_errors()
4417 sw_stat->ring_full_cnt[i+4] += temp64; in s2io_handle_errors()
4421 val64 = readq(&bar0->txdma_int_status); in s2io_handle_errors()
4427 &bar0->pfc_err_reg, in s2io_handle_errors()
4428 &sw_stat->pfc_err_cnt)) in s2io_handle_errors()
4431 &bar0->pfc_err_reg, in s2io_handle_errors()
4432 &sw_stat->pfc_err_cnt); in s2io_handle_errors()
4440 &bar0->tda_err_reg, in s2io_handle_errors()
4441 &sw_stat->tda_err_cnt)) in s2io_handle_errors()
4444 &bar0->tda_err_reg, in s2io_handle_errors()
4445 &sw_stat->tda_err_cnt); in s2io_handle_errors()
4454 &bar0->pcc_err_reg, in s2io_handle_errors()
4455 &sw_stat->pcc_err_cnt)) in s2io_handle_errors()
4458 &bar0->pcc_err_reg, in s2io_handle_errors()
4459 &sw_stat->pcc_err_cnt); in s2io_handle_errors()
4465 &bar0->tti_err_reg, in s2io_handle_errors()
4466 &sw_stat->tti_err_cnt)) in s2io_handle_errors()
4469 &bar0->tti_err_reg, in s2io_handle_errors()
4470 &sw_stat->tti_err_cnt); in s2io_handle_errors()
4477 &bar0->lso_err_reg, in s2io_handle_errors()
4478 &sw_stat->lso_err_cnt)) in s2io_handle_errors()
4481 &bar0->lso_err_reg, in s2io_handle_errors()
4482 &sw_stat->lso_err_cnt); in s2io_handle_errors()
4488 &bar0->tpa_err_reg, in s2io_handle_errors()
4489 &sw_stat->tpa_err_cnt)) in s2io_handle_errors()
4492 &bar0->tpa_err_reg, in s2io_handle_errors()
4493 &sw_stat->tpa_err_cnt); in s2io_handle_errors()
4499 &bar0->sm_err_reg, in s2io_handle_errors()
4500 &sw_stat->sm_err_cnt)) in s2io_handle_errors()
4504 val64 = readq(&bar0->mac_int_status); in s2io_handle_errors()
4507 &bar0->mac_tmac_err_reg, in s2io_handle_errors()
4508 &sw_stat->mac_tmac_err_cnt)) in s2io_handle_errors()
4513 &bar0->mac_tmac_err_reg, in s2io_handle_errors()
4514 &sw_stat->mac_tmac_err_cnt); in s2io_handle_errors()
4517 val64 = readq(&bar0->xgxs_int_status); in s2io_handle_errors()
4520 &bar0->xgxs_txgxs_err_reg, in s2io_handle_errors()
4521 &sw_stat->xgxs_txgxs_err_cnt)) in s2io_handle_errors()
4524 &bar0->xgxs_txgxs_err_reg, in s2io_handle_errors()
4525 &sw_stat->xgxs_txgxs_err_cnt); in s2io_handle_errors()
4528 val64 = readq(&bar0->rxdma_int_status); in s2io_handle_errors()
4534 &bar0->rc_err_reg, in s2io_handle_errors()
4535 &sw_stat->rc_err_cnt)) in s2io_handle_errors()
4539 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, in s2io_handle_errors()
4540 &sw_stat->rc_err_cnt); in s2io_handle_errors()
4544 &bar0->prc_pcix_err_reg, in s2io_handle_errors()
4545 &sw_stat->prc_pcix_err_cnt)) in s2io_handle_errors()
4550 &bar0->prc_pcix_err_reg, in s2io_handle_errors()
4551 &sw_stat->prc_pcix_err_cnt); in s2io_handle_errors()
4556 &bar0->rpa_err_reg, in s2io_handle_errors()
4557 &sw_stat->rpa_err_cnt)) in s2io_handle_errors()
4560 &bar0->rpa_err_reg, in s2io_handle_errors()
4561 &sw_stat->rpa_err_cnt); in s2io_handle_errors()
4570 &bar0->rda_err_reg, in s2io_handle_errors()
4571 &sw_stat->rda_err_cnt)) in s2io_handle_errors()
4577 &bar0->rda_err_reg, in s2io_handle_errors()
4578 &sw_stat->rda_err_cnt); in s2io_handle_errors()
4583 &bar0->rti_err_reg, in s2io_handle_errors()
4584 &sw_stat->rti_err_cnt)) in s2io_handle_errors()
4587 &bar0->rti_err_reg, in s2io_handle_errors()
4588 &sw_stat->rti_err_cnt); in s2io_handle_errors()
4591 val64 = readq(&bar0->mac_int_status); in s2io_handle_errors()
4594 &bar0->mac_rmac_err_reg, in s2io_handle_errors()
4595 &sw_stat->mac_rmac_err_cnt)) in s2io_handle_errors()
4600 &bar0->mac_rmac_err_reg, in s2io_handle_errors()
4601 &sw_stat->mac_rmac_err_cnt); in s2io_handle_errors()
4604 val64 = readq(&bar0->xgxs_int_status); in s2io_handle_errors()
4607 &bar0->xgxs_rxgxs_err_reg, in s2io_handle_errors()
4608 &sw_stat->xgxs_rxgxs_err_cnt)) in s2io_handle_errors()
4612 val64 = readq(&bar0->mc_int_status); in s2io_handle_errors()
4615 &bar0->mc_err_reg, in s2io_handle_errors()
4616 &sw_stat->mc_err_cnt)) in s2io_handle_errors()
4621 writeq(val64, &bar0->mc_err_reg); in s2io_handle_errors()
4623 sw_stat->double_ecc_errs++; in s2io_handle_errors()
4624 if (sp->device_type != XFRAME_II_DEVICE) { in s2io_handle_errors()
4634 sw_stat->single_ecc_errs++; in s2io_handle_errors()
4641 schedule_work(&sp->rst_timer_task); in s2io_handle_errors()
4642 sw_stat->soft_reset_cnt++; in s2io_handle_errors()
4646 * s2io_isr - ISR handler of the device .
4662 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_isr()
4669 if (pci_channel_offline(sp->pdev)) in s2io_isr()
4675 config = &sp->config; in s2io_isr()
4676 mac_control = &sp->mac_control; in s2io_isr()
4685 reason = readq(&bar0->general_int_status); in s2io_isr()
4692 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); in s2io_isr()
4694 if (config->napi) { in s2io_isr()
4696 napi_schedule(&sp->napi); in s2io_isr()
4697 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); in s2io_isr()
4698 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); in s2io_isr()
4699 readl(&bar0->rx_traffic_int); in s2io_isr()
4708 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); in s2io_isr()
4710 for (i = 0; i < config->rx_ring_num; i++) { in s2io_isr()
4711 struct ring_info *ring = &mac_control->rings[i]; in s2io_isr()
4723 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); in s2io_isr()
4725 for (i = 0; i < config->tx_fifo_num; i++) in s2io_isr()
4726 tx_intr_handler(&mac_control->fifos[i]); in s2io_isr()
4734 if (!config->napi) { in s2io_isr()
4735 for (i = 0; i < config->rx_ring_num; i++) { in s2io_isr()
4736 struct ring_info *ring = &mac_control->rings[i]; in s2io_isr()
4741 writeq(sp->general_int_mask, &bar0->general_int_mask); in s2io_isr()
4742 readl(&bar0->general_int_status); in s2io_isr()
4755 * s2io_updt_stats -
4759 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_updt_stats()
4767 writeq(val64, &bar0->stat_cfg); in s2io_updt_stats()
4770 val64 = readq(&bar0->stat_cfg); in s2io_updt_stats()
4781 * s2io_get_stats - Updates the device statistics structure.
4792 struct mac_info *mac_control = &sp->mac_control; in s2io_get_stats()
4793 struct stat_block *stats = mac_control->stats_info; in s2io_get_stats()
4799 /* A device reset will cause the on-adapter statistics to be zero'ed. in s2io_get_stats()
4806 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | in s2io_get_stats()
4807 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets; in s2io_get_stats()
4808 sp->stats.rx_packets += delta; in s2io_get_stats()
4809 dev->stats.rx_packets += delta; in s2io_get_stats()
4811 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 | in s2io_get_stats()
4812 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets; in s2io_get_stats()
4813 sp->stats.tx_packets += delta; in s2io_get_stats()
4814 dev->stats.tx_packets += delta; in s2io_get_stats()
4816 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | in s2io_get_stats()
4817 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes; in s2io_get_stats()
4818 sp->stats.rx_bytes += delta; in s2io_get_stats()
4819 dev->stats.rx_bytes += delta; in s2io_get_stats()
4821 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | in s2io_get_stats()
4822 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes; in s2io_get_stats()
4823 sp->stats.tx_bytes += delta; in s2io_get_stats()
4824 dev->stats.tx_bytes += delta; in s2io_get_stats()
4826 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors; in s2io_get_stats()
4827 sp->stats.rx_errors += delta; in s2io_get_stats()
4828 dev->stats.rx_errors += delta; in s2io_get_stats()
4830 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | in s2io_get_stats()
4831 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors; in s2io_get_stats()
4832 sp->stats.tx_errors += delta; in s2io_get_stats()
4833 dev->stats.tx_errors += delta; in s2io_get_stats()
4835 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped; in s2io_get_stats()
4836 sp->stats.rx_dropped += delta; in s2io_get_stats()
4837 dev->stats.rx_dropped += delta; in s2io_get_stats()
4839 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped; in s2io_get_stats()
4840 sp->stats.tx_dropped += delta; in s2io_get_stats()
4841 dev->stats.tx_dropped += delta; in s2io_get_stats()
4848 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | in s2io_get_stats()
4849 le32_to_cpu(stats->rmac_vld_mcst_frms); in s2io_get_stats()
4850 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms); in s2io_get_stats()
4851 delta -= sp->stats.multicast; in s2io_get_stats()
4852 sp->stats.multicast += delta; in s2io_get_stats()
4853 dev->stats.multicast += delta; in s2io_get_stats()
4855 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | in s2io_get_stats()
4856 le32_to_cpu(stats->rmac_usized_frms)) + in s2io_get_stats()
4857 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors; in s2io_get_stats()
4858 sp->stats.rx_length_errors += delta; in s2io_get_stats()
4859 dev->stats.rx_length_errors += delta; in s2io_get_stats()
4861 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors; in s2io_get_stats()
4862 sp->stats.rx_crc_errors += delta; in s2io_get_stats()
4863 dev->stats.rx_crc_errors += delta; in s2io_get_stats()
4865 return &dev->stats; in s2io_get_stats()
4869 * s2io_set_multicast - entry point for multicast address enable/disable.
4886 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_set_multicast()
4891 struct config_param *config = &sp->config; in s2io_set_multicast()
4893 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) { in s2io_set_multicast()
4896 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
4898 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
4901 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1); in s2io_set_multicast()
4902 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
4904 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
4908 sp->m_cast_flg = 1; in s2io_set_multicast()
4909 sp->all_multi_pos = config->max_mc_addr - 1; in s2io_set_multicast()
4910 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) { in s2io_set_multicast()
4913 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
4915 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
4918 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); in s2io_set_multicast()
4919 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
4921 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
4925 sp->m_cast_flg = 0; in s2io_set_multicast()
4926 sp->all_multi_pos = 0; in s2io_set_multicast()
4929 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) { in s2io_set_multicast()
4931 add = &bar0->mac_cfg; in s2io_set_multicast()
4932 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4935 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4937 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4941 val64 = readq(&bar0->rx_pa_cfg); in s2io_set_multicast()
4943 writeq(val64, &bar0->rx_pa_cfg); in s2io_set_multicast()
4944 sp->vlan_strip_flag = 0; in s2io_set_multicast()
4947 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4948 sp->promisc_flg = 1; in s2io_set_multicast()
4950 dev->name); in s2io_set_multicast()
4951 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) { in s2io_set_multicast()
4953 add = &bar0->mac_cfg; in s2io_set_multicast()
4954 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4957 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4959 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); in s2io_set_multicast()
4963 val64 = readq(&bar0->rx_pa_cfg); in s2io_set_multicast()
4965 writeq(val64, &bar0->rx_pa_cfg); in s2io_set_multicast()
4966 sp->vlan_strip_flag = 1; in s2io_set_multicast()
4969 val64 = readq(&bar0->mac_cfg); in s2io_set_multicast()
4970 sp->promisc_flg = 0; in s2io_set_multicast()
4971 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name); in s2io_set_multicast()
4975 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) { in s2io_set_multicast()
4977 (config->max_mc_addr - config->max_mac_addr)) { in s2io_set_multicast()
4979 "%s: No more Rx filters can be added - " in s2io_set_multicast()
4981 dev->name); in s2io_set_multicast()
4985 prev_cnt = sp->mc_addr_count; in s2io_set_multicast()
4986 sp->mc_addr_count = netdev_mc_count(dev); in s2io_set_multicast()
4991 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
4993 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
4997 (config->mc_start_offset + i); in s2io_set_multicast()
4998 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
5001 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
5006 dev->name); in s2io_set_multicast()
5016 mac_addr |= ha->addr[j]; in s2io_set_multicast()
5021 &bar0->rmac_addr_data0_mem); in s2io_set_multicast()
5023 &bar0->rmac_addr_data1_mem); in s2io_set_multicast()
5027 (i + config->mc_start_offset); in s2io_set_multicast()
5028 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_set_multicast()
5031 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_set_multicast()
5036 dev->name); in s2io_set_multicast()
5051 struct config_param *config = &sp->config; in do_s2io_store_unicast_mc()
5054 for (offset = 0; offset < config->max_mc_addr; offset++) { in do_s2io_store_unicast_mc()
5067 struct config_param *config = &sp->config; in do_s2io_restore_unicast_mc()
5069 for (offset = 0; offset < config->max_mac_addr; offset++) in do_s2io_restore_unicast_mc()
5070 do_s2io_prog_unicast(sp->dev, in do_s2io_restore_unicast_mc()
5071 sp->def_mac_addr[offset].mac_addr); in do_s2io_restore_unicast_mc()
5074 for (offset = config->mc_start_offset; in do_s2io_restore_unicast_mc()
5075 offset < config->max_mc_addr; offset++) in do_s2io_restore_unicast_mc()
5076 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr); in do_s2io_restore_unicast_mc()
5084 struct config_param *config = &sp->config; in do_s2io_add_mc()
5094 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) { in do_s2io_add_mc()
5103 if (i == config->max_mc_addr) { in do_s2io_add_mc()
5118 struct XENA_dev_config __iomem *bar0 = sp->bar0; in do_s2io_add_mac()
5121 &bar0->rmac_addr_data0_mem); in do_s2io_add_mac()
5125 writeq(val64, &bar0->rmac_addr_cmd_mem); in do_s2io_add_mac()
5128 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in do_s2io_add_mac()
5141 struct config_param *config = &sp->config; in do_s2io_delete_unicast_mc()
5144 offset < config->max_mc_addr; offset++) { in do_s2io_delete_unicast_mc()
5164 struct XENA_dev_config __iomem *bar0 = sp->bar0; in do_s2io_read_unicast_mc()
5169 writeq(val64, &bar0->rmac_addr_cmd_mem); in do_s2io_read_unicast_mc()
5172 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in do_s2io_read_unicast_mc()
5178 tmp64 = readq(&bar0->rmac_addr_data0_mem); in do_s2io_read_unicast_mc()
5184 * s2io_set_mac_addr - driver entry point
5191 if (!is_valid_ether_addr(addr->sa_data)) in s2io_set_mac_addr()
5192 return -EADDRNOTAVAIL; in s2io_set_mac_addr()
5194 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); in s2io_set_mac_addr()
5197 return do_s2io_prog_unicast(dev, dev->dev_addr); in s2io_set_mac_addr()
5200 * do_s2io_prog_unicast - Programs the Xframe mac address
5205 * Return value: SUCCESS on success and an appropriate (-)ve integer
5215 struct config_param *config = &sp->config; in do_s2io_prog_unicast()
5226 perm_addr |= sp->def_mac_addr[0].mac_addr[i]; in do_s2io_prog_unicast()
5234 for (i = 1; i < config->max_mac_addr; i++) { in do_s2io_prog_unicast()
5246 if (i == config->max_mac_addr) { in do_s2io_prog_unicast()
5257 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5273 if ((cmd->base.autoneg == AUTONEG_ENABLE) || in s2io_ethtool_set_link_ksettings()
5274 (cmd->base.speed != SPEED_10000) || in s2io_ethtool_set_link_ksettings()
5275 (cmd->base.duplex != DUPLEX_FULL)) in s2io_ethtool_set_link_ksettings()
5276 return -EINVAL; in s2io_ethtool_set_link_ksettings()
5278 s2io_close(sp->dev); in s2io_ethtool_set_link_ksettings()
5279 s2io_open(sp->dev); in s2io_ethtool_set_link_ksettings()
5286 * s2io_ethtol_get_link_ksettings - Return link specific information.
5310 cmd->base.port = PORT_FIBRE; in s2io_ethtool_get_link_ksettings()
5312 if (netif_carrier_ok(sp->dev)) { in s2io_ethtool_get_link_ksettings()
5313 cmd->base.speed = SPEED_10000; in s2io_ethtool_get_link_ksettings()
5314 cmd->base.duplex = DUPLEX_FULL; in s2io_ethtool_get_link_ksettings()
5316 cmd->base.speed = SPEED_UNKNOWN; in s2io_ethtool_get_link_ksettings()
5317 cmd->base.duplex = DUPLEX_UNKNOWN; in s2io_ethtool_get_link_ksettings()
5320 cmd->base.autoneg = AUTONEG_DISABLE; in s2io_ethtool_get_link_ksettings()
5325 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5340 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver)); in s2io_ethtool_gdrvinfo()
5341 strlcpy(info->version, s2io_driver_version, sizeof(info->version)); in s2io_ethtool_gdrvinfo()
5342 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); in s2io_ethtool_gdrvinfo()
5346 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5366 regs->len = XENA_REG_SPACE; in s2io_ethtool_gregs()
5367 regs->version = sp->pdev->subsystem_device; in s2io_ethtool_gregs()
5369 for (i = 0; i < regs->len; i += 8) { in s2io_ethtool_gregs()
5370 reg = readq(sp->bar0 + i); in s2io_ethtool_gregs()
5376 * s2io_set_led - control NIC led
5380 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_set_led()
5381 u16 subid = sp->pdev->subsystem_device; in s2io_set_led()
5384 if ((sp->device_type == XFRAME_II_DEVICE) || in s2io_set_led()
5386 val64 = readq(&bar0->gpio_control); in s2io_set_led()
5392 writeq(val64, &bar0->gpio_control); in s2io_set_led()
5394 val64 = readq(&bar0->adapter_control); in s2io_set_led()
5400 writeq(val64, &bar0->adapter_control); in s2io_set_led()
5406 * s2io_ethtool_set_led - To physically identify the nic on the system.
5421 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_ethtool_set_led()
5422 u16 subid = sp->pdev->subsystem_device; in s2io_ethtool_set_led()
5424 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { in s2io_ethtool_set_led()
5425 u64 val64 = readq(&bar0->adapter_control); in s2io_ethtool_set_led()
5428 return -EAGAIN; in s2io_ethtool_set_led()
5434 sp->adapt_ctrl_org = readq(&bar0->gpio_control); in s2io_ethtool_set_led()
5446 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) in s2io_ethtool_set_led()
5447 writeq(sp->adapt_ctrl_org, &bar0->gpio_control); in s2io_ethtool_set_led()
5459 if (sp->rxd_mode == RXD_MODE_1) { in s2io_ethtool_gringparam()
5460 ering->rx_max_pending = MAX_RX_DESC_1; in s2io_ethtool_gringparam()
5461 ering->rx_jumbo_max_pending = MAX_RX_DESC_1; in s2io_ethtool_gringparam()
5463 ering->rx_max_pending = MAX_RX_DESC_2; in s2io_ethtool_gringparam()
5464 ering->rx_jumbo_max_pending = MAX_RX_DESC_2; in s2io_ethtool_gringparam()
5467 ering->tx_max_pending = MAX_TX_DESC; in s2io_ethtool_gringparam()
5469 for (i = 0; i < sp->config.rx_ring_num; i++) in s2io_ethtool_gringparam()
5470 rx_desc_count += sp->config.rx_cfg[i].num_rxd; in s2io_ethtool_gringparam()
5471 ering->rx_pending = rx_desc_count; in s2io_ethtool_gringparam()
5472 ering->rx_jumbo_pending = rx_desc_count; in s2io_ethtool_gringparam()
5474 for (i = 0; i < sp->config.tx_fifo_num; i++) in s2io_ethtool_gringparam()
5475 tx_desc_count += sp->config.tx_cfg[i].fifo_len; in s2io_ethtool_gringparam()
5476 ering->tx_pending = tx_desc_count; in s2io_ethtool_gringparam()
5477 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds); in s2io_ethtool_gringparam()
5481 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5494 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_ethtool_getpause_data()
5496 val64 = readq(&bar0->rmac_pause_cfg); in s2io_ethtool_getpause_data()
5498 ep->tx_pause = true; in s2io_ethtool_getpause_data()
5500 ep->rx_pause = true; in s2io_ethtool_getpause_data()
5501 ep->autoneg = false; in s2io_ethtool_getpause_data()
5505 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5520 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_ethtool_setpause_data()
5522 val64 = readq(&bar0->rmac_pause_cfg); in s2io_ethtool_setpause_data()
5523 if (ep->tx_pause) in s2io_ethtool_setpause_data()
5527 if (ep->rx_pause) in s2io_ethtool_setpause_data()
5531 writeq(val64, &bar0->rmac_pause_cfg); in s2io_ethtool_setpause_data()
5537 * read_eeprom - reads 4 bytes of data from user given offset.
5549 * -1 on failure and 0 on success.
5553 int ret = -1; in read_eeprom()
5556 struct XENA_dev_config __iomem *bar0 = sp->bar0; in read_eeprom()
5558 if (sp->device_type == XFRAME_I_DEVICE) { in read_eeprom()
5564 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); in read_eeprom()
5567 val64 = readq(&bar0->i2c_control); in read_eeprom()
5578 if (sp->device_type == XFRAME_II_DEVICE) { in read_eeprom()
5582 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in read_eeprom()
5584 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in read_eeprom()
5586 val64 = readq(&bar0->spi_control); in read_eeprom()
5591 *data = readq(&bar0->spi_data); in read_eeprom()
5604 * write_eeprom - actually writes the relevant part of the data value.
5615 * 0 on success, -1 on failure.
5620 int exit_cnt = 0, ret = -1; in write_eeprom()
5622 struct XENA_dev_config __iomem *bar0 = sp->bar0; in write_eeprom()
5624 if (sp->device_type == XFRAME_I_DEVICE) { in write_eeprom()
5630 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); in write_eeprom()
5633 val64 = readq(&bar0->i2c_control); in write_eeprom()
5644 if (sp->device_type == XFRAME_II_DEVICE) { in write_eeprom()
5646 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data); in write_eeprom()
5651 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in write_eeprom()
5653 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); in write_eeprom()
5655 val64 = readq(&bar0->spi_control); in write_eeprom()
5675 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; in s2io_vpd_read()
5677 if (nic->device_type == XFRAME_II_DEVICE) { in s2io_vpd_read()
5678 strcpy(nic->product_name, "Xframe II 10GbE network adapter"); in s2io_vpd_read()
5681 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); in s2io_vpd_read()
5684 strcpy(nic->serial_num, "NOT AVAILABLE"); in s2io_vpd_read()
5688 swstats->mem_alloc_fail_cnt++; in s2io_vpd_read()
5691 swstats->mem_allocated += 256; in s2io_vpd_read()
5694 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); in s2io_vpd_read()
5695 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); in s2io_vpd_read()
5696 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); in s2io_vpd_read()
5699 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); in s2io_vpd_read()
5708 pci_read_config_dword(nic->pdev, (vpd_addr + 4), in s2io_vpd_read()
5718 if (len < min(VPD_STRING_LEN, 256-cnt-2)) { in s2io_vpd_read()
5719 memcpy(nic->serial_num, in s2io_vpd_read()
5722 memset(nic->serial_num+len, in s2io_vpd_read()
5724 VPD_STRING_LEN-len); in s2io_vpd_read()
5733 memcpy(nic->product_name, &vpd_data[3], len); in s2io_vpd_read()
5734 nic->product_name[len] = 0; in s2io_vpd_read()
5737 swstats->mem_freed += 256; in s2io_vpd_read()
5741 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5760 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); in s2io_ethtool_geeprom()
5762 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE)) in s2io_ethtool_geeprom()
5763 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset; in s2io_ethtool_geeprom()
5765 for (i = 0; i < eeprom->len; i += 4) { in s2io_ethtool_geeprom()
5766 if (read_eeprom(sp, (eeprom->offset + i), &data)) { in s2io_ethtool_geeprom()
5768 return -EFAULT; in s2io_ethtool_geeprom()
5777 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5786 * 0 on success, -EFAULT on failure.
5793 int len = eeprom->len, cnt = 0; in s2io_ethtool_seeprom()
5797 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { in s2io_ethtool_seeprom()
5801 (sp->pdev->vendor | (sp->pdev->device << 16)), in s2io_ethtool_seeprom()
5802 eeprom->magic); in s2io_ethtool_seeprom()
5803 return -EFAULT; in s2io_ethtool_seeprom()
5813 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) { in s2io_ethtool_seeprom()
5817 return -EFAULT; in s2io_ethtool_seeprom()
5820 len--; in s2io_ethtool_seeprom()
5827 * s2io_register_test - reads and writes into all clock domains.
5841 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_register_test()
5845 val64 = readq(&bar0->pif_rd_swapper_fb); in s2io_register_test()
5851 val64 = readq(&bar0->rmac_pause_cfg); in s2io_register_test()
5857 val64 = readq(&bar0->rx_queue_cfg); in s2io_register_test()
5858 if (sp->device_type == XFRAME_II_DEVICE) in s2io_register_test()
5867 val64 = readq(&bar0->xgxs_efifo_cfg); in s2io_register_test()
5874 writeq(val64, &bar0->xmsi_data); in s2io_register_test()
5875 val64 = readq(&bar0->xmsi_data); in s2io_register_test()
5882 writeq(val64, &bar0->xmsi_data); in s2io_register_test()
5883 val64 = readq(&bar0->xmsi_data); in s2io_register_test()
5894 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5900 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5911 struct net_device *dev = sp->dev; in s2io_eeprom_test()
5917 if (sp->device_type == XFRAME_I_DEVICE) in s2io_eeprom_test()
5936 dev->name, (unsigned long long)0x12345, in s2io_eeprom_test()
5945 if (sp->device_type == XFRAME_I_DEVICE) in s2io_eeprom_test()
5958 dev->name, (unsigned long long)0x12345, in s2io_eeprom_test()
5966 if (sp->device_type == XFRAME_I_DEVICE) { in s2io_eeprom_test()
5995 * s2io_bist_test - invokes the MemBist test of the card .
6005 * 0 on success and -1 on failure.
6011 int cnt = 0, ret = -1; in s2io_bist_test()
6013 pci_read_config_byte(sp->pdev, PCI_BIST, &bist); in s2io_bist_test()
6015 pci_write_config_word(sp->pdev, PCI_BIST, bist); in s2io_bist_test()
6018 pci_read_config_byte(sp->pdev, PCI_BIST, &bist); in s2io_bist_test()
6032 * s2io_link_test - verifies the link state of the nic
6046 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_link_test()
6049 val64 = readq(&bar0->adapter_status); in s2io_link_test()
6059 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6073 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_rldram_test()
6077 val64 = readq(&bar0->adapter_control); in s2io_rldram_test()
6079 writeq(val64, &bar0->adapter_control); in s2io_rldram_test()
6081 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6083 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6085 val64 = readq(&bar0->mc_rldram_mrs); in s2io_rldram_test()
6087 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); in s2io_rldram_test()
6090 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); in s2io_rldram_test()
6096 writeq(val64, &bar0->mc_rldram_test_d0); in s2io_rldram_test()
6101 writeq(val64, &bar0->mc_rldram_test_d1); in s2io_rldram_test()
6106 writeq(val64, &bar0->mc_rldram_test_d2); in s2io_rldram_test()
6109 writeq(val64, &bar0->mc_rldram_test_add); in s2io_rldram_test()
6114 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6117 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6127 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6130 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6139 val64 = readq(&bar0->mc_rldram_test_ctrl); in s2io_rldram_test()
6149 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF); in s2io_rldram_test()
6155 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6173 int orig_state = netif_running(sp->dev); in s2io_ethtool_test()
6175 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { in s2io_ethtool_test()
6178 s2io_close(sp->dev); in s2io_ethtool_test()
6181 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6186 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6191 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6194 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6197 s2io_open(sp->dev); in s2io_ethtool_test()
6204 dev->name); in s2io_ethtool_test()
6205 data[0] = -1; in s2io_ethtool_test()
6206 data[1] = -1; in s2io_ethtool_test()
6207 data[2] = -1; in s2io_ethtool_test()
6208 data[3] = -1; in s2io_ethtool_test()
6209 data[4] = -1; in s2io_ethtool_test()
6213 ethtest->flags |= ETH_TEST_FL_FAILED; in s2io_ethtool_test()
6228 struct stat_block *stats = sp->mac_control.stats_info; in s2io_get_ethtool_stats()
6229 struct swStat *swstats = &stats->sw_stat; in s2io_get_ethtool_stats()
6230 struct xpakStat *xstats = &stats->xpak_stat; in s2io_get_ethtool_stats()
6234 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6235 le32_to_cpu(stats->tmac_frms); in s2io_get_ethtool_stats()
6237 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6238 le32_to_cpu(stats->tmac_data_octets); in s2io_get_ethtool_stats()
6239 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms); in s2io_get_ethtool_stats()
6241 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6242 le32_to_cpu(stats->tmac_mcst_frms); in s2io_get_ethtool_stats()
6244 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6245 le32_to_cpu(stats->tmac_bcst_frms); in s2io_get_ethtool_stats()
6246 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms); in s2io_get_ethtool_stats()
6248 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6249 le32_to_cpu(stats->tmac_ttl_octets); in s2io_get_ethtool_stats()
6251 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6252 le32_to_cpu(stats->tmac_ucst_frms); in s2io_get_ethtool_stats()
6254 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6255 le32_to_cpu(stats->tmac_nucst_frms); in s2io_get_ethtool_stats()
6257 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6258 le32_to_cpu(stats->tmac_any_err_frms); in s2io_get_ethtool_stats()
6259 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets); in s2io_get_ethtool_stats()
6260 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets); in s2io_get_ethtool_stats()
6262 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6263 le32_to_cpu(stats->tmac_vld_ip); in s2io_get_ethtool_stats()
6265 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6266 le32_to_cpu(stats->tmac_drop_ip); in s2io_get_ethtool_stats()
6268 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 | in s2io_get_ethtool_stats()
6269 le32_to_cpu(stats->tmac_icmp); in s2io_get_ethtool_stats()
6271 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 | in s2io_get_ethtool_stats()
6272 le32_to_cpu(stats->tmac_rst_tcp); in s2io_get_ethtool_stats()
6273 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp); in s2io_get_ethtool_stats()
6274 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 | in s2io_get_ethtool_stats()
6275 le32_to_cpu(stats->tmac_udp); in s2io_get_ethtool_stats()
6277 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6278 le32_to_cpu(stats->rmac_vld_frms); in s2io_get_ethtool_stats()
6280 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6281 le32_to_cpu(stats->rmac_data_octets); in s2io_get_ethtool_stats()
6282 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms); in s2io_get_ethtool_stats()
6283 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms); in s2io_get_ethtool_stats()
6285 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6286 le32_to_cpu(stats->rmac_vld_mcst_frms); in s2io_get_ethtool_stats()
6288 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6289 le32_to_cpu(stats->rmac_vld_bcst_frms); in s2io_get_ethtool_stats()
6290 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms); in s2io_get_ethtool_stats()
6291 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms); in s2io_get_ethtool_stats()
6292 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms); in s2io_get_ethtool_stats()
6293 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms); in s2io_get_ethtool_stats()
6294 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms); in s2io_get_ethtool_stats()
6296 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 | in s2io_get_ethtool_stats()
6297 le32_to_cpu(stats->rmac_ttl_octets); in s2io_get_ethtool_stats()
6299 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32 in s2io_get_ethtool_stats()
6300 | le32_to_cpu(stats->rmac_accepted_ucst_frms); in s2io_get_ethtool_stats()
6302 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow) in s2io_get_ethtool_stats()
6303 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms); in s2io_get_ethtool_stats()
6305 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6306 le32_to_cpu(stats->rmac_discarded_frms); in s2io_get_ethtool_stats()
6308 (u64)le32_to_cpu(stats->rmac_drop_events_oflow) in s2io_get_ethtool_stats()
6309 << 32 | le32_to_cpu(stats->rmac_drop_events); in s2io_get_ethtool_stats()
6310 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets); in s2io_get_ethtool_stats()
6311 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms); in s2io_get_ethtool_stats()
6313 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6314 le32_to_cpu(stats->rmac_usized_frms); in s2io_get_ethtool_stats()
6316 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6317 le32_to_cpu(stats->rmac_osized_frms); in s2io_get_ethtool_stats()
6319 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6320 le32_to_cpu(stats->rmac_frag_frms); in s2io_get_ethtool_stats()
6322 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 | in s2io_get_ethtool_stats()
6323 le32_to_cpu(stats->rmac_jabber_frms); in s2io_get_ethtool_stats()
6324 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms); in s2io_get_ethtool_stats()
6325 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms); in s2io_get_ethtool_stats()
6326 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms); in s2io_get_ethtool_stats()
6327 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms); in s2io_get_ethtool_stats()
6328 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms); in s2io_get_ethtool_stats()
6329 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms); in s2io_get_ethtool_stats()
6331 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6332 le32_to_cpu(stats->rmac_ip); in s2io_get_ethtool_stats()
6333 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets); in s2io_get_ethtool_stats()
6334 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip); in s2io_get_ethtool_stats()
6336 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6337 le32_to_cpu(stats->rmac_drop_ip); in s2io_get_ethtool_stats()
6339 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 | in s2io_get_ethtool_stats()
6340 le32_to_cpu(stats->rmac_icmp); in s2io_get_ethtool_stats()
6341 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp); in s2io_get_ethtool_stats()
6343 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 | in s2io_get_ethtool_stats()
6344 le32_to_cpu(stats->rmac_udp); in s2io_get_ethtool_stats()
6346 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 | in s2io_get_ethtool_stats()
6347 le32_to_cpu(stats->rmac_err_drp_udp); in s2io_get_ethtool_stats()
6348 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym); in s2io_get_ethtool_stats()
6349 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0); in s2io_get_ethtool_stats()
6350 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1); in s2io_get_ethtool_stats()
6351 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2); in s2io_get_ethtool_stats()
6352 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3); in s2io_get_ethtool_stats()
6353 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4); in s2io_get_ethtool_stats()
6354 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5); in s2io_get_ethtool_stats()
6355 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6); in s2io_get_ethtool_stats()
6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7); in s2io_get_ethtool_stats()
6357 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0); in s2io_get_ethtool_stats()
6358 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1); in s2io_get_ethtool_stats()
6359 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2); in s2io_get_ethtool_stats()
6360 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3); in s2io_get_ethtool_stats()
6361 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4); in s2io_get_ethtool_stats()
6362 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5); in s2io_get_ethtool_stats()
6363 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6); in s2io_get_ethtool_stats()
6364 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7); in s2io_get_ethtool_stats()
6366 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 | in s2io_get_ethtool_stats()
6367 le32_to_cpu(stats->rmac_pause_cnt); in s2io_get_ethtool_stats()
6368 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt); in s2io_get_ethtool_stats()
6369 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt); in s2io_get_ethtool_stats()
6371 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 | in s2io_get_ethtool_stats()
6372 le32_to_cpu(stats->rmac_accepted_ip); in s2io_get_ethtool_stats()
6373 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp); in s2io_get_ethtool_stats()
6374 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt); in s2io_get_ethtool_stats()
6375 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt); in s2io_get_ethtool_stats()
6376 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt); in s2io_get_ethtool_stats()
6377 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt); in s2io_get_ethtool_stats()
6378 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt); in s2io_get_ethtool_stats()
6379 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt); in s2io_get_ethtool_stats()
6380 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt); in s2io_get_ethtool_stats()
6381 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt); in s2io_get_ethtool_stats()
6382 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt); in s2io_get_ethtool_stats()
6383 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt); in s2io_get_ethtool_stats()
6384 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt); in s2io_get_ethtool_stats()
6385 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt); in s2io_get_ethtool_stats()
6386 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt); in s2io_get_ethtool_stats()
6387 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt); in s2io_get_ethtool_stats()
6388 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt); in s2io_get_ethtool_stats()
6389 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt); in s2io_get_ethtool_stats()
6390 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt); in s2io_get_ethtool_stats()
6391 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt); in s2io_get_ethtool_stats()
6394 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_get_ethtool_stats()
6396 le64_to_cpu(stats->rmac_ttl_1519_4095_frms); in s2io_get_ethtool_stats()
6398 le64_to_cpu(stats->rmac_ttl_4096_8191_frms); in s2io_get_ethtool_stats()
6400 le64_to_cpu(stats->rmac_ttl_8192_max_frms); in s2io_get_ethtool_stats()
6401 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms); in s2io_get_ethtool_stats()
6402 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms); in s2io_get_ethtool_stats()
6403 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms); in s2io_get_ethtool_stats()
6404 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms); in s2io_get_ethtool_stats()
6405 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms); in s2io_get_ethtool_stats()
6406 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard); in s2io_get_ethtool_stats()
6407 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard); in s2io_get_ethtool_stats()
6408 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard); in s2io_get_ethtool_stats()
6409 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard); in s2io_get_ethtool_stats()
6410 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard); in s2io_get_ethtool_stats()
6411 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard); in s2io_get_ethtool_stats()
6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard); in s2io_get_ethtool_stats()
6413 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt); in s2io_get_ethtool_stats()
6417 tmp_stats[i++] = swstats->single_ecc_errs; in s2io_get_ethtool_stats()
6418 tmp_stats[i++] = swstats->double_ecc_errs; in s2io_get_ethtool_stats()
6419 tmp_stats[i++] = swstats->parity_err_cnt; in s2io_get_ethtool_stats()
6420 tmp_stats[i++] = swstats->serious_err_cnt; in s2io_get_ethtool_stats()
6421 tmp_stats[i++] = swstats->soft_reset_cnt; in s2io_get_ethtool_stats()
6422 tmp_stats[i++] = swstats->fifo_full_cnt; in s2io_get_ethtool_stats()
6424 tmp_stats[i++] = swstats->ring_full_cnt[k]; in s2io_get_ethtool_stats()
6425 tmp_stats[i++] = xstats->alarm_transceiver_temp_high; in s2io_get_ethtool_stats()
6426 tmp_stats[i++] = xstats->alarm_transceiver_temp_low; in s2io_get_ethtool_stats()
6427 tmp_stats[i++] = xstats->alarm_laser_bias_current_high; in s2io_get_ethtool_stats()
6428 tmp_stats[i++] = xstats->alarm_laser_bias_current_low; in s2io_get_ethtool_stats()
6429 tmp_stats[i++] = xstats->alarm_laser_output_power_high; in s2io_get_ethtool_stats()
6430 tmp_stats[i++] = xstats->alarm_laser_output_power_low; in s2io_get_ethtool_stats()
6431 tmp_stats[i++] = xstats->warn_transceiver_temp_high; in s2io_get_ethtool_stats()
6432 tmp_stats[i++] = xstats->warn_transceiver_temp_low; in s2io_get_ethtool_stats()
6433 tmp_stats[i++] = xstats->warn_laser_bias_current_high; in s2io_get_ethtool_stats()
6434 tmp_stats[i++] = xstats->warn_laser_bias_current_low; in s2io_get_ethtool_stats()
6435 tmp_stats[i++] = xstats->warn_laser_output_power_high; in s2io_get_ethtool_stats()
6436 tmp_stats[i++] = xstats->warn_laser_output_power_low; in s2io_get_ethtool_stats()
6437 tmp_stats[i++] = swstats->clubbed_frms_cnt; in s2io_get_ethtool_stats()
6438 tmp_stats[i++] = swstats->sending_both; in s2io_get_ethtool_stats()
6439 tmp_stats[i++] = swstats->outof_sequence_pkts; in s2io_get_ethtool_stats()
6440 tmp_stats[i++] = swstats->flush_max_pkts; in s2io_get_ethtool_stats()
6441 if (swstats->num_aggregations) { in s2io_get_ethtool_stats()
6442 u64 tmp = swstats->sum_avg_pkts_aggregated; in s2io_get_ethtool_stats()
6445 * Since 64-bit divide does not work on all platforms, in s2io_get_ethtool_stats()
6448 while (tmp >= swstats->num_aggregations) { in s2io_get_ethtool_stats()
6449 tmp -= swstats->num_aggregations; in s2io_get_ethtool_stats()
6455 tmp_stats[i++] = swstats->mem_alloc_fail_cnt; in s2io_get_ethtool_stats()
6456 tmp_stats[i++] = swstats->pci_map_fail_cnt; in s2io_get_ethtool_stats()
6457 tmp_stats[i++] = swstats->watchdog_timer_cnt; in s2io_get_ethtool_stats()
6458 tmp_stats[i++] = swstats->mem_allocated; in s2io_get_ethtool_stats()
6459 tmp_stats[i++] = swstats->mem_freed; in s2io_get_ethtool_stats()
6460 tmp_stats[i++] = swstats->link_up_cnt; in s2io_get_ethtool_stats()
6461 tmp_stats[i++] = swstats->link_down_cnt; in s2io_get_ethtool_stats()
6462 tmp_stats[i++] = swstats->link_up_time; in s2io_get_ethtool_stats()
6463 tmp_stats[i++] = swstats->link_down_time; in s2io_get_ethtool_stats()
6465 tmp_stats[i++] = swstats->tx_buf_abort_cnt; in s2io_get_ethtool_stats()
6466 tmp_stats[i++] = swstats->tx_desc_abort_cnt; in s2io_get_ethtool_stats()
6467 tmp_stats[i++] = swstats->tx_parity_err_cnt; in s2io_get_ethtool_stats()
6468 tmp_stats[i++] = swstats->tx_link_loss_cnt; in s2io_get_ethtool_stats()
6469 tmp_stats[i++] = swstats->tx_list_proc_err_cnt; in s2io_get_ethtool_stats()
6471 tmp_stats[i++] = swstats->rx_parity_err_cnt; in s2io_get_ethtool_stats()
6472 tmp_stats[i++] = swstats->rx_abort_cnt; in s2io_get_ethtool_stats()
6473 tmp_stats[i++] = swstats->rx_parity_abort_cnt; in s2io_get_ethtool_stats()
6474 tmp_stats[i++] = swstats->rx_rda_fail_cnt; in s2io_get_ethtool_stats()
6475 tmp_stats[i++] = swstats->rx_unkn_prot_cnt; in s2io_get_ethtool_stats()
6476 tmp_stats[i++] = swstats->rx_fcs_err_cnt; in s2io_get_ethtool_stats()
6477 tmp_stats[i++] = swstats->rx_buf_size_err_cnt; in s2io_get_ethtool_stats()
6478 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt; in s2io_get_ethtool_stats()
6479 tmp_stats[i++] = swstats->rx_unkn_err_cnt; in s2io_get_ethtool_stats()
6480 tmp_stats[i++] = swstats->tda_err_cnt; in s2io_get_ethtool_stats()
6481 tmp_stats[i++] = swstats->pfc_err_cnt; in s2io_get_ethtool_stats()
6482 tmp_stats[i++] = swstats->pcc_err_cnt; in s2io_get_ethtool_stats()
6483 tmp_stats[i++] = swstats->tti_err_cnt; in s2io_get_ethtool_stats()
6484 tmp_stats[i++] = swstats->tpa_err_cnt; in s2io_get_ethtool_stats()
6485 tmp_stats[i++] = swstats->sm_err_cnt; in s2io_get_ethtool_stats()
6486 tmp_stats[i++] = swstats->lso_err_cnt; in s2io_get_ethtool_stats()
6487 tmp_stats[i++] = swstats->mac_tmac_err_cnt; in s2io_get_ethtool_stats()
6488 tmp_stats[i++] = swstats->mac_rmac_err_cnt; in s2io_get_ethtool_stats()
6489 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt; in s2io_get_ethtool_stats()
6490 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt; in s2io_get_ethtool_stats()
6491 tmp_stats[i++] = swstats->rc_err_cnt; in s2io_get_ethtool_stats()
6492 tmp_stats[i++] = swstats->prc_pcix_err_cnt; in s2io_get_ethtool_stats()
6493 tmp_stats[i++] = swstats->rpa_err_cnt; in s2io_get_ethtool_stats()
6494 tmp_stats[i++] = swstats->rda_err_cnt; in s2io_get_ethtool_stats()
6495 tmp_stats[i++] = swstats->rti_err_cnt; in s2io_get_ethtool_stats()
6496 tmp_stats[i++] = swstats->mc_err_cnt; in s2io_get_ethtool_stats()
6518 switch (sp->device_type) { in s2io_get_sset_count()
6527 return -EOPNOTSUPP; in s2io_get_sset_count()
6544 if (sp->device_type == XFRAME_II_DEVICE) { in s2io_ethtool_get_strings()
6559 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO; in s2io_set_features()
6566 dev->features = features; in s2io_set_features()
6600 * s2io_ioctl - Entry point for the Ioctl
6613 return -EOPNOTSUPP; in s2io_ioctl()
6617 * s2io_change_mtu - entry point to change MTU size for the device.
6623 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6632 dev->mtu = new_mtu; in s2io_change_mtu()
6644 struct XENA_dev_config __iomem *bar0 = sp->bar0; in s2io_change_mtu()
6647 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); in s2io_change_mtu()
6654 * s2io_set_link - Set the LInk status
6663 struct net_device *dev = nic->dev; in s2io_set_link()
6664 struct XENA_dev_config __iomem *bar0 = nic->bar0; in s2io_set_link()
6673 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) { in s2io_set_link()
6678 subid = nic->pdev->subsystem_device; in s2io_set_link()
6687 val64 = readq(&bar0->adapter_status); in s2io_set_link()
6689 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) { in s2io_set_link()
6691 val64 = readq(&bar0->adapter_control); in s2io_set_link()
6693 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6695 nic->device_type, subid)) { in s2io_set_link()
6696 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6698 writeq(val64, &bar0->gpio_control); in s2io_set_link()
6699 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6702 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6704 nic->device_enabled_once = true; in s2io_set_link()
6708 dev->name); in s2io_set_link()
6712 val64 = readq(&bar0->adapter_control); in s2io_set_link()
6714 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6717 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, in s2io_set_link()
6719 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6721 writeq(val64, &bar0->gpio_control); in s2io_set_link()
6722 val64 = readq(&bar0->gpio_control); in s2io_set_link()
6725 val64 = readq(&bar0->adapter_control); in s2io_set_link()
6727 writeq(val64, &bar0->adapter_control); in s2io_set_link()
6730 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state)); in s2io_set_link()
6741 struct net_device *dev = sp->dev; in set_rxd_buffer_pointer()
6742 struct swStat *stats = &sp->mac_control.stats_info->sw_stat; in set_rxd_buffer_pointer()
6744 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { in set_rxd_buffer_pointer()
6754 rxdp1->Buffer0_ptr = *temp0; in set_rxd_buffer_pointer()
6760 dev->name, "1 buf mode SKBs"); in set_rxd_buffer_pointer()
6761 stats->mem_alloc_fail_cnt++; in set_rxd_buffer_pointer()
6762 return -ENOMEM ; in set_rxd_buffer_pointer()
6764 stats->mem_allocated += (*skb)->truesize; in set_rxd_buffer_pointer()
6769 rxdp1->Buffer0_ptr = *temp0 = in set_rxd_buffer_pointer()
6770 dma_map_single(&sp->pdev->dev, (*skb)->data, in set_rxd_buffer_pointer()
6771 size - NET_IP_ALIGN, in set_rxd_buffer_pointer()
6773 if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr)) in set_rxd_buffer_pointer()
6775 rxdp->Host_Control = (unsigned long) (*skb); in set_rxd_buffer_pointer()
6777 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { in set_rxd_buffer_pointer()
6781 rxdp3->Buffer2_ptr = *temp2; in set_rxd_buffer_pointer()
6782 rxdp3->Buffer0_ptr = *temp0; in set_rxd_buffer_pointer()
6783 rxdp3->Buffer1_ptr = *temp1; in set_rxd_buffer_pointer()
6789 dev->name, in set_rxd_buffer_pointer()
6791 stats->mem_alloc_fail_cnt++; in set_rxd_buffer_pointer()
6792 return -ENOMEM; in set_rxd_buffer_pointer()
6794 stats->mem_allocated += (*skb)->truesize; in set_rxd_buffer_pointer()
6795 rxdp3->Buffer2_ptr = *temp2 = in set_rxd_buffer_pointer()
6796 dma_map_single(&sp->pdev->dev, (*skb)->data, in set_rxd_buffer_pointer()
6797 dev->mtu + 4, DMA_FROM_DEVICE); in set_rxd_buffer_pointer()
6798 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr)) in set_rxd_buffer_pointer()
6800 rxdp3->Buffer0_ptr = *temp0 = in set_rxd_buffer_pointer()
6801 dma_map_single(&sp->pdev->dev, ba->ba_0, in set_rxd_buffer_pointer()
6803 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) { in set_rxd_buffer_pointer()
6804 dma_unmap_single(&sp->pdev->dev, in set_rxd_buffer_pointer()
6805 (dma_addr_t)rxdp3->Buffer2_ptr, in set_rxd_buffer_pointer()
6806 dev->mtu + 4, in set_rxd_buffer_pointer()
6810 rxdp->Host_Control = (unsigned long) (*skb); in set_rxd_buffer_pointer()
6812 /* Buffer-1 will be dummy buffer not used */ in set_rxd_buffer_pointer()
6813 rxdp3->Buffer1_ptr = *temp1 = in set_rxd_buffer_pointer()
6814 dma_map_single(&sp->pdev->dev, ba->ba_1, in set_rxd_buffer_pointer()
6816 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) { in set_rxd_buffer_pointer()
6817 dma_unmap_single(&sp->pdev->dev, in set_rxd_buffer_pointer()
6818 (dma_addr_t)rxdp3->Buffer0_ptr, in set_rxd_buffer_pointer()
6820 dma_unmap_single(&sp->pdev->dev, in set_rxd_buffer_pointer()
6821 (dma_addr_t)rxdp3->Buffer2_ptr, in set_rxd_buffer_pointer()
6822 dev->mtu + 4, in set_rxd_buffer_pointer()
6831 stats->pci_map_fail_cnt++; in set_rxd_buffer_pointer()
6832 stats->mem_freed += (*skb)->truesize; in set_rxd_buffer_pointer()
6834 return -ENOMEM; in set_rxd_buffer_pointer()
6840 struct net_device *dev = sp->dev; in set_rxd_buffer_size()
6841 if (sp->rxd_mode == RXD_MODE_1) { in set_rxd_buffer_size()
6842 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); in set_rxd_buffer_size()
6843 } else if (sp->rxd_mode == RXD_MODE_3B) { in set_rxd_buffer_size()
6844 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); in set_rxd_buffer_size()
6845 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); in set_rxd_buffer_size()
6846 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4); in set_rxd_buffer_size()
6853 struct config_param *config = &sp->config; in rxd_owner_bit_reset()
6854 struct mac_info *mac_control = &sp->mac_control; in rxd_owner_bit_reset()
6855 struct net_device *dev = sp->dev; in rxd_owner_bit_reset()
6862 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + in rxd_owner_bit_reset()
6864 if (sp->rxd_mode == RXD_MODE_1) in rxd_owner_bit_reset()
6866 else if (sp->rxd_mode == RXD_MODE_3B) in rxd_owner_bit_reset()
6867 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; in rxd_owner_bit_reset()
6869 for (i = 0; i < config->rx_ring_num; i++) { in rxd_owner_bit_reset()
6870 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in rxd_owner_bit_reset()
6871 struct ring_info *ring = &mac_control->rings[i]; in rxd_owner_bit_reset()
6873 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1); in rxd_owner_bit_reset()
6876 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { in rxd_owner_bit_reset()
6877 rxdp = ring->rx_blocks[j].rxds[k].virt_addr; in rxd_owner_bit_reset()
6878 if (sp->rxd_mode == RXD_MODE_3B) in rxd_owner_bit_reset()
6879 ba = &ring->ba[j][k]; in rxd_owner_bit_reset()
6884 size) == -ENOMEM) { in rxd_owner_bit_reset()
6891 rxdp->Control_1 |= RXD_OWN_XENA; in rxd_owner_bit_reset()
6902 struct net_device *dev = sp->dev; in s2io_add_isr()
6905 if (sp->config.intr_type == MSI_X) in s2io_add_isr()
6908 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); in s2io_add_isr()
6909 sp->config.intr_type = INTA; in s2io_add_isr()
6919 if (sp->config.intr_type == MSI_X) { in s2io_add_isr()
6922 for (i = 0; i < sp->num_entries; i++) { in s2io_add_isr()
6923 if (sp->s2io_entries[i].in_use == MSIX_FLG) { in s2io_add_isr()
6924 if (sp->s2io_entries[i].type == in s2io_add_isr()
6926 snprintf(sp->desc[i], in s2io_add_isr()
6927 sizeof(sp->desc[i]), in s2io_add_isr()
6928 "%s:MSI-X-%d-RX", in s2io_add_isr()
6929 dev->name, i); in s2io_add_isr()
6930 err = request_irq(sp->entries[i].vector, in s2io_add_isr()
6933 sp->desc[i], in s2io_add_isr()
6934 sp->s2io_entries[i].arg); in s2io_add_isr()
6935 } else if (sp->s2io_entries[i].type == in s2io_add_isr()
6937 snprintf(sp->desc[i], in s2io_add_isr()
6938 sizeof(sp->desc[i]), in s2io_add_isr()
6939 "%s:MSI-X-%d-TX", in s2io_add_isr()
6940 dev->name, i); in s2io_add_isr()
6941 err = request_irq(sp->entries[i].vector, in s2io_add_isr()
6944 sp->desc[i], in s2io_add_isr()
6945 sp->s2io_entries[i].arg); in s2io_add_isr()
6949 if (!(sp->msix_info[i].addr && in s2io_add_isr()
6950 sp->msix_info[i].data)) { in s2io_add_isr()
6953 sp->desc[i], in s2io_add_isr()
6955 sp->msix_info[i].addr, in s2io_add_isr()
6957 ntohl(sp->msix_info[i].data)); in s2io_add_isr()
6964 "%s:MSI-X-%d registration " in s2io_add_isr()
6965 "failed\n", dev->name, i); in s2io_add_isr()
6969 dev->name); in s2io_add_isr()
6970 sp->config.intr_type = INTA; in s2io_add_isr()
6973 sp->s2io_entries[i].in_use = in s2io_add_isr()
6978 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt); in s2io_add_isr()
6980 "MSI-X-TX entries enabled through alarm vector\n"); in s2io_add_isr()
6983 if (sp->config.intr_type == INTA) { in s2io_add_isr()
6984 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED, in s2io_add_isr()
6985 sp->name, dev); in s2io_add_isr()
6988 dev->name); in s2io_add_isr()
6989 return -1; in s2io_add_isr()
6997 if (sp->config.intr_type == MSI_X) in s2io_rem_isr()
7006 struct XENA_dev_config __iomem *bar0 = sp->bar0; in do_s2io_card_down()
7009 config = &sp->config; in do_s2io_card_down()
7014 del_timer_sync(&sp->alarm_timer); in do_s2io_card_down()
7016 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) in do_s2io_card_down()
7018 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); in do_s2io_card_down()
7021 if (sp->config.napi) { in do_s2io_card_down()
7023 if (config->intr_type == MSI_X) { in do_s2io_card_down()
7024 for (; off < sp->config.rx_ring_num; off++) in do_s2io_card_down()
7025 napi_disable(&sp->mac_control.rings[off].napi); in do_s2io_card_down()
7028 napi_disable(&sp->napi); in do_s2io_card_down()
7051 val64 = readq(&bar0->adapter_status); in do_s2io_card_down()
7053 if (verify_pcc_quiescent(sp, sp->device_enabled_once)) in do_s2io_card_down()
7060 DBG_PRINT(ERR_DBG, "Device not Quiescent - " in do_s2io_card_down()
7075 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); in do_s2io_card_down()
7088 struct net_device *dev = sp->dev; in s2io_card_up()
7095 dev->name); in s2io_card_up()
7096 if (ret != -EIO) in s2io_card_up()
7105 config = &sp->config; in s2io_card_up()
7106 mac_control = &sp->mac_control; in s2io_card_up()
7108 for (i = 0; i < config->rx_ring_num; i++) { in s2io_card_up()
7109 struct ring_info *ring = &mac_control->rings[i]; in s2io_card_up()
7111 ring->mtu = dev->mtu; in s2io_card_up()
7112 ring->lro = !!(dev->features & NETIF_F_LRO); in s2io_card_up()
7116 dev->name); in s2io_card_up()
7119 return -ENOMEM; in s2io_card_up()
7122 ring->rx_bufs_left); in s2io_card_up()
7126 if (config->napi) { in s2io_card_up()
7127 if (config->intr_type == MSI_X) { in s2io_card_up()
7128 for (i = 0; i < sp->config.rx_ring_num; i++) in s2io_card_up()
7129 napi_enable(&sp->mac_control.rings[i].napi); in s2io_card_up()
7131 napi_enable(&sp->napi); in s2io_card_up()
7136 if (sp->promisc_flg) in s2io_card_up()
7137 sp->promisc_flg = 0; in s2io_card_up()
7138 if (sp->m_cast_flg) { in s2io_card_up()
7139 sp->m_cast_flg = 0; in s2io_card_up()
7140 sp->all_multi_pos = 0; in s2io_card_up()
7146 if (dev->features & NETIF_F_LRO) { in s2io_card_up()
7148 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; in s2io_card_up()
7150 if (lro_max_pkts < sp->lro_max_aggr_per_sess) in s2io_card_up()
7151 sp->lro_max_aggr_per_sess = lro_max_pkts; in s2io_card_up()
7156 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); in s2io_card_up()
7159 return -ENODEV; in s2io_card_up()
7164 if (sp->config.intr_type == MSI_X) in s2io_card_up()
7168 return -ENODEV; in s2io_card_up()
7171 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); in s2io_card_up()
7172 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); in s2io_card_up()
7174 set_bit(__S2IO_STATE_CARD_UP, &sp->state); in s2io_card_up()
7178 if (sp->config.intr_type != INTA) { in s2io_card_up()
7191 * s2io_restart_nic - Resets the NIC.
7203 struct net_device *dev = sp->dev; in s2io_restart_nic()
7212 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name); in s2io_restart_nic()
7215 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name); in s2io_restart_nic()
7221 * s2io_tx_watchdog - Watchdog for transmit side.
7226 * for a pre-defined amount of time when the Interface is still up.
7237 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_tx_watchdog()
7240 swstats->watchdog_timer_cnt++; in s2io_tx_watchdog()
7241 schedule_work(&sp->rst_timer_task); in s2io_tx_watchdog()
7242 swstats->soft_reset_cnt++; in s2io_tx_watchdog()
7247 * rx_osm_handler - To perform some OS related operations on SKB.
7258 * SUCCESS on success and -1 on failure.
7262 struct s2io_nic *sp = ring_data->nic; in rx_osm_handler()
7263 struct net_device *dev = ring_data->dev; in rx_osm_handler()
7265 ((unsigned long)rxdp->Host_Control); in rx_osm_handler()
7266 int ring_no = ring_data->ring_no; in rx_osm_handler()
7268 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; in rx_osm_handler()
7271 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in rx_osm_handler()
7273 skb->dev = dev; in rx_osm_handler()
7278 swstats->parity_err_cnt++; in rx_osm_handler()
7283 swstats->rx_parity_err_cnt++; in rx_osm_handler()
7287 swstats->rx_abort_cnt++; in rx_osm_handler()
7291 swstats->rx_parity_abort_cnt++; in rx_osm_handler()
7295 swstats->rx_rda_fail_cnt++; in rx_osm_handler()
7299 swstats->rx_unkn_prot_cnt++; in rx_osm_handler()
7303 swstats->rx_fcs_err_cnt++; in rx_osm_handler()
7307 swstats->rx_buf_size_err_cnt++; in rx_osm_handler()
7311 swstats->rx_rxd_corrupt_cnt++; in rx_osm_handler()
7315 swstats->rx_unkn_err_cnt++; in rx_osm_handler()
7327 dev->name, err_mask); in rx_osm_handler()
7328 dev->stats.rx_crc_errors++; in rx_osm_handler()
7329 swstats->mem_freed in rx_osm_handler()
7330 += skb->truesize; in rx_osm_handler()
7332 ring_data->rx_bufs_left -= 1; in rx_osm_handler()
7333 rxdp->Host_Control = 0; in rx_osm_handler()
7338 rxdp->Host_Control = 0; in rx_osm_handler()
7339 if (sp->rxd_mode == RXD_MODE_1) { in rx_osm_handler()
7340 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); in rx_osm_handler()
7343 } else if (sp->rxd_mode == RXD_MODE_3B) { in rx_osm_handler()
7344 int get_block = ring_data->rx_curr_get_info.block_index; in rx_osm_handler()
7345 int get_off = ring_data->rx_curr_get_info.offset; in rx_osm_handler()
7346 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); in rx_osm_handler()
7347 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); in rx_osm_handler()
7350 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; in rx_osm_handler()
7351 memcpy(buff, ba->ba_0, buf0_len); in rx_osm_handler()
7355 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && in rx_osm_handler()
7356 ((!ring_data->lro) || in rx_osm_handler()
7357 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) && in rx_osm_handler()
7358 (dev->features & NETIF_F_RXCSUM)) { in rx_osm_handler()
7359 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); in rx_osm_handler()
7360 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); in rx_osm_handler()
7367 skb->ip_summed = CHECKSUM_UNNECESSARY; in rx_osm_handler()
7368 if (ring_data->lro) { in rx_osm_handler()
7374 skb->data, &tcp, in rx_osm_handler()
7379 lro->parent = skb; in rx_osm_handler()
7386 queue_rx_frame(lro->parent, in rx_osm_handler()
7387 lro->vlan_tag); in rx_osm_handler()
7389 swstats->flush_max_pkts++; in rx_osm_handler()
7392 lro->parent->data_len = lro->frags_len; in rx_osm_handler()
7393 swstats->sending_both++; in rx_osm_handler()
7394 queue_rx_frame(lro->parent, in rx_osm_handler()
7395 lro->vlan_tag); in rx_osm_handler()
7399 case -1: /* non-TCP or not L2 aggregatable */ in rx_osm_handler()
7422 swstats->mem_freed += skb->truesize; in rx_osm_handler()
7425 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); in rx_osm_handler()
7427 sp->mac_control.rings[ring_no].rx_bufs_left -= 1; in rx_osm_handler()
7432 * s2io_link - stops/starts the Tx queue.
7446 struct net_device *dev = sp->dev; in s2io_link()
7447 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_link()
7449 if (link != sp->last_link_state) { in s2io_link()
7452 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); in s2io_link()
7455 if (swstats->link_up_cnt) in s2io_link()
7456 swstats->link_up_time = in s2io_link()
7457 jiffies - sp->start_time; in s2io_link()
7458 swstats->link_down_cnt++; in s2io_link()
7460 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); in s2io_link()
7461 if (swstats->link_down_cnt) in s2io_link()
7462 swstats->link_down_time = in s2io_link()
7463 jiffies - sp->start_time; in s2io_link()
7464 swstats->link_up_cnt++; in s2io_link()
7469 sp->last_link_state = link; in s2io_link()
7470 sp->start_time = jiffies; in s2io_link()
7474 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7478 * This function initializes a few of the PCI and PCI-X configuration registers
7488 /* Enable Data Parity Error Recovery in PCI-X command register. */ in s2io_init_pci()
7489 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, in s2io_init_pci()
7491 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, in s2io_init_pci()
7493 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, in s2io_init_pci()
7497 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); in s2io_init_pci()
7498 pci_write_config_word(sp->pdev, PCI_COMMAND, in s2io_init_pci()
7500 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); in s2io_init_pci()
7554 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && in s2io_verify_parm()
7555 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { in s2io_verify_parm()
7563 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n"); in s2io_verify_parm()
7579 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7586 * '-1' on failure (endian settings incorrect).
7590 struct XENA_dev_config __iomem *bar0 = nic->bar0; in rts_ds_steer()
7597 writeq(val64, &bar0->rts_ds_mem_data); in rts_ds_steer()
7603 writeq(val64, &bar0->rts_ds_mem_ctrl); in rts_ds_steer()
7605 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, in rts_ds_steer()
7628 * s2io_init_nic - Initialization of the adapter .
7669 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { in s2io_init_nic()
7672 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { in s2io_init_nic()
7676 return -ENOMEM; in s2io_init_nic()
7678 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { in s2io_init_nic()
7682 return -ENOMEM; in s2io_init_nic()
7686 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n", in s2io_init_nic()
7689 return -ENODEV; in s2io_init_nic()
7698 return -ENODEV; in s2io_init_nic()
7703 SET_NETDEV_DEV(dev, &pdev->dev); in s2io_init_nic()
7707 sp->dev = dev; in s2io_init_nic()
7708 sp->pdev = pdev; in s2io_init_nic()
7709 sp->high_dma_flag = dma_flag; in s2io_init_nic()
7710 sp->device_enabled_once = false; in s2io_init_nic()
7712 sp->rxd_mode = RXD_MODE_1; in s2io_init_nic()
7714 sp->rxd_mode = RXD_MODE_3B; in s2io_init_nic()
7716 sp->config.intr_type = dev_intr_type; in s2io_init_nic()
7718 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || in s2io_init_nic()
7719 (pdev->device == PCI_DEVICE_ID_HERC_UNI)) in s2io_init_nic()
7720 sp->device_type = XFRAME_II_DEVICE; in s2io_init_nic()
7722 sp->device_type = XFRAME_I_DEVICE; in s2io_init_nic()
7725 /* Initialize some PCI/PCI-X fields of the NIC. */ in s2io_init_nic()
7735 config = &sp->config; in s2io_init_nic()
7736 mac_control = &sp->mac_control; in s2io_init_nic()
7738 config->napi = napi; in s2io_init_nic()
7739 config->tx_steering_type = tx_steering_type; in s2io_init_nic()
7742 if (config->tx_steering_type == TX_PRIORITY_STEERING) in s2io_init_nic()
7743 config->tx_fifo_num = MAX_TX_FIFOS; in s2io_init_nic()
7745 config->tx_fifo_num = tx_fifo_num; in s2io_init_nic()
7748 if (config->tx_fifo_num < 5) { in s2io_init_nic()
7749 if (config->tx_fifo_num == 1) in s2io_init_nic()
7750 sp->total_tcp_fifos = 1; in s2io_init_nic()
7752 sp->total_tcp_fifos = config->tx_fifo_num - 1; in s2io_init_nic()
7753 sp->udp_fifo_idx = config->tx_fifo_num - 1; in s2io_init_nic()
7754 sp->total_udp_fifos = 1; in s2io_init_nic()
7755 sp->other_fifo_idx = sp->total_tcp_fifos - 1; in s2io_init_nic()
7757 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - in s2io_init_nic()
7759 sp->udp_fifo_idx = sp->total_tcp_fifos; in s2io_init_nic()
7760 sp->total_udp_fifos = FIFO_UDP_MAX_NUM; in s2io_init_nic()
7761 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; in s2io_init_nic()
7764 config->multiq = dev_multiq; in s2io_init_nic()
7765 for (i = 0; i < config->tx_fifo_num; i++) { in s2io_init_nic()
7766 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in s2io_init_nic()
7768 tx_cfg->fifo_len = tx_fifo_len[i]; in s2io_init_nic()
7769 tx_cfg->fifo_priority = i; in s2io_init_nic()
7774 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i]; in s2io_init_nic()
7777 for (i = 0; i < config->tx_fifo_num; i++) in s2io_init_nic()
7778 sp->fifo_selector[i] = fifo_selector[i]; in s2io_init_nic()
7781 config->tx_intr_type = TXD_INT_TYPE_UTILZ; in s2io_init_nic()
7782 for (i = 0; i < config->tx_fifo_num; i++) { in s2io_init_nic()
7783 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; in s2io_init_nic()
7785 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER); in s2io_init_nic()
7786 if (tx_cfg->fifo_len < 65) { in s2io_init_nic()
7787 config->tx_intr_type = TXD_INT_TYPE_PER_LIST; in s2io_init_nic()
7791 /* + 2 because one Txd for skb->data and one Txd for UFO */ in s2io_init_nic()
7792 config->max_txds = MAX_SKB_FRAGS + 2; in s2io_init_nic()
7795 config->rx_ring_num = rx_ring_num; in s2io_init_nic()
7796 for (i = 0; i < config->rx_ring_num; i++) { in s2io_init_nic()
7797 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in s2io_init_nic()
7798 struct ring_info *ring = &mac_control->rings[i]; in s2io_init_nic()
7800 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1); in s2io_init_nic()
7801 rx_cfg->ring_priority = i; in s2io_init_nic()
7802 ring->rx_bufs_left = 0; in s2io_init_nic()
7803 ring->rxd_mode = sp->rxd_mode; in s2io_init_nic()
7804 ring->rxd_count = rxd_count[sp->rxd_mode]; in s2io_init_nic()
7805 ring->pdev = sp->pdev; in s2io_init_nic()
7806 ring->dev = sp->dev; in s2io_init_nic()
7810 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; in s2io_init_nic()
7812 rx_cfg->ring_org = RING_ORG_BUFF1; in s2io_init_nic()
7813 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER); in s2io_init_nic()
7817 mac_control->rmac_pause_time = rmac_pause_time; in s2io_init_nic()
7818 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3; in s2io_init_nic()
7819 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; in s2io_init_nic()
7824 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name); in s2io_init_nic()
7825 ret = -ENOMEM; in s2io_init_nic()
7829 sp->bar0 = pci_ioremap_bar(pdev, 0); in s2io_init_nic()
7830 if (!sp->bar0) { in s2io_init_nic()
7832 dev->name); in s2io_init_nic()
7833 ret = -ENOMEM; in s2io_init_nic()
7837 sp->bar1 = pci_ioremap_bar(pdev, 2); in s2io_init_nic()
7838 if (!sp->bar1) { in s2io_init_nic()
7840 dev->name); in s2io_init_nic()
7841 ret = -ENOMEM; in s2io_init_nic()
7847 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); in s2io_init_nic()
7851 dev->netdev_ops = &s2io_netdev_ops; in s2io_init_nic()
7852 dev->ethtool_ops = &netdev_ethtool_ops; in s2io_init_nic()
7853 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | in s2io_init_nic()
7856 dev->features |= dev->hw_features | in s2io_init_nic()
7858 if (sp->high_dma_flag == true) in s2io_init_nic()
7859 dev->features |= NETIF_F_HIGHDMA; in s2io_init_nic()
7860 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; in s2io_init_nic()
7861 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); in s2io_init_nic()
7862 INIT_WORK(&sp->set_link_task, s2io_set_link); in s2io_init_nic()
7864 pci_save_state(sp->pdev); in s2io_init_nic()
7869 dev->name); in s2io_init_nic()
7870 ret = -EAGAIN; in s2io_init_nic()
7875 if (sp->device_type & XFRAME_II_DEVICE) { in s2io_init_nic()
7880 ret = -EBADSLT; in s2io_init_nic()
7885 if (sp->config.intr_type == MSI_X) { in s2io_init_nic()
7886 sp->num_entries = config->rx_ring_num + 1; in s2io_init_nic()
7891 /* rollback MSI-X, will re-enable during add_isr() */ in s2io_init_nic()
7897 "MSI-X requested but failed to enable\n"); in s2io_init_nic()
7898 sp->config.intr_type = INTA; in s2io_init_nic()
7902 if (config->intr_type == MSI_X) { in s2io_init_nic()
7903 for (i = 0; i < config->rx_ring_num ; i++) { in s2io_init_nic()
7904 struct ring_info *ring = &mac_control->rings[i]; in s2io_init_nic()
7906 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64); in s2io_init_nic()
7909 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); in s2io_init_nic()
7913 if (sp->device_type & XFRAME_I_DEVICE) { in s2io_init_nic()
7926 bar0 = sp->bar0; in s2io_init_nic()
7929 writeq(val64, &bar0->rmac_addr_cmd_mem); in s2io_init_nic()
7930 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, in s2io_init_nic()
7933 tmp64 = readq(&bar0->rmac_addr_data0_mem); in s2io_init_nic()
7937 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); in s2io_init_nic()
7938 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8); in s2io_init_nic()
7939 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16); in s2io_init_nic()
7940 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24); in s2io_init_nic()
7941 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); in s2io_init_nic()
7942 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); in s2io_init_nic()
7944 /* Set the factory defined MAC address initially */ in s2io_init_nic()
7945 dev->addr_len = ETH_ALEN; in s2io_init_nic()
7946 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); in s2io_init_nic()
7949 if (sp->device_type == XFRAME_I_DEVICE) { in s2io_init_nic()
7950 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES; in s2io_init_nic()
7951 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES; in s2io_init_nic()
7952 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET; in s2io_init_nic()
7953 } else if (sp->device_type == XFRAME_II_DEVICE) { in s2io_init_nic()
7954 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES; in s2io_init_nic()
7955 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES; in s2io_init_nic()
7956 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET; in s2io_init_nic()
7959 /* MTU range: 46 - 9600 */ in s2io_init_nic()
7960 dev->min_mtu = MIN_MTU; in s2io_init_nic()
7961 dev->max_mtu = S2IO_JUMBO_SIZE; in s2io_init_nic()
7967 if ((sp->device_type == XFRAME_II_DEVICE) && in s2io_init_nic()
7968 (config->intr_type == MSI_X)) in s2io_init_nic()
7969 sp->num_entries = config->rx_ring_num + 1; in s2io_init_nic()
7980 sp->state = 0; in s2io_init_nic()
7983 for (i = 0; i < sp->config.tx_fifo_num; i++) { in s2io_init_nic()
7984 struct fifo_info *fifo = &mac_control->fifos[i]; in s2io_init_nic()
7986 spin_lock_init(&fifo->tx_lock); in s2io_init_nic()
7990 * SXE-002: Configure link and activity LED to init state in s2io_init_nic()
7993 subid = sp->pdev->subsystem_device; in s2io_init_nic()
7995 val64 = readq(&bar0->gpio_control); in s2io_init_nic()
7997 writeq(val64, &bar0->gpio_control); in s2io_init_nic()
8000 val64 = readq(&bar0->gpio_control); in s2io_init_nic()
8003 sp->rx_csum = 1; /* Rx chksum verify enabled by default */ in s2io_init_nic()
8007 ret = -ENODEV; in s2io_init_nic()
8011 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n"); in s2io_init_nic()
8012 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, in s2io_init_nic()
8013 sp->product_name, pdev->revision); in s2io_init_nic()
8014 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, in s2io_init_nic()
8016 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr); in s2io_init_nic()
8017 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num); in s2io_init_nic()
8018 if (sp->device_type & XFRAME_II_DEVICE) { in s2io_init_nic()
8021 ret = -EBADSLT; in s2io_init_nic()
8026 switch (sp->rxd_mode) { in s2io_init_nic()
8028 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", in s2io_init_nic()
8029 dev->name); in s2io_init_nic()
8032 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", in s2io_init_nic()
8033 dev->name); in s2io_init_nic()
8037 switch (sp->config.napi) { in s2io_init_nic()
8039 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); in s2io_init_nic()
8042 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); in s2io_init_nic()
8046 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, in s2io_init_nic()
8047 sp->config.tx_fifo_num); in s2io_init_nic()
8049 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, in s2io_init_nic()
8050 sp->config.rx_ring_num); in s2io_init_nic()
8052 switch (sp->config.intr_type) { in s2io_init_nic()
8054 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); in s2io_init_nic()
8057 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); in s2io_init_nic()
8060 if (sp->config.multiq) { in s2io_init_nic()
8061 for (i = 0; i < sp->config.tx_fifo_num; i++) { in s2io_init_nic()
8062 struct fifo_info *fifo = &mac_control->fifos[i]; in s2io_init_nic()
8064 fifo->multiq = config->multiq; in s2io_init_nic()
8067 dev->name); in s2io_init_nic()
8070 dev->name); in s2io_init_nic()
8072 switch (sp->config.tx_steering_type) { in s2io_init_nic()
8075 dev->name); in s2io_init_nic()
8080 dev->name); in s2io_init_nic()
8085 dev->name); in s2io_init_nic()
8089 dev->name); in s2io_init_nic()
8091 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, in s2io_init_nic()
8092 sp->product_name); in s2io_init_nic()
8095 sp->vlan_strip_flag = 1; in s2io_init_nic()
8097 sp->vlan_strip_flag = 0; in s2io_init_nic()
8110 iounmap(sp->bar1); in s2io_init_nic()
8112 iounmap(sp->bar0); in s2io_init_nic()
8124 * s2io_rem_nic - Free the PCI device
8144 cancel_work_sync(&sp->rst_timer_task); in s2io_rem_nic()
8145 cancel_work_sync(&sp->set_link_task); in s2io_rem_nic()
8150 iounmap(sp->bar0); in s2io_rem_nic()
8151 iounmap(sp->bar1); in s2io_rem_nic()
8164 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; in check_L2_lro_capable()
8166 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { in check_L2_lro_capable()
8168 "%s: Non-TCP frames not supported for LRO\n", in check_L2_lro_capable()
8170 return -1; in check_L2_lro_capable()
8180 if ((!sp->vlan_strip_flag) && in check_L2_lro_capable()
8181 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) in check_L2_lro_capable()
8184 /* LLC, SNAP etc are considered non-mergeable */ in check_L2_lro_capable()
8185 return -1; in check_L2_lro_capable()
8189 ip_len = (u8)((*ip)->ihl); in check_L2_lro_capable()
8200 if ((lro->iph->saddr != ip->saddr) || in check_for_socket_match()
8201 (lro->iph->daddr != ip->daddr) || in check_for_socket_match()
8202 (lro->tcph->source != tcp->source) || in check_for_socket_match()
8203 (lro->tcph->dest != tcp->dest)) in check_for_socket_match()
8204 return -1; in check_for_socket_match()
8210 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2); in get_l4_pyld_length()
8218 lro->l2h = l2h; in initiate_new_session()
8219 lro->iph = ip; in initiate_new_session()
8220 lro->tcph = tcp; in initiate_new_session()
8221 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); in initiate_new_session()
8222 lro->tcp_ack = tcp->ack_seq; in initiate_new_session()
8223 lro->sg_num = 1; in initiate_new_session()
8224 lro->total_len = ntohs(ip->tot_len); in initiate_new_session()
8225 lro->frags_len = 0; in initiate_new_session()
8226 lro->vlan_tag = vlan_tag; in initiate_new_session()
8231 if (tcp->doff == 8) { in initiate_new_session()
8234 lro->saw_ts = 1; in initiate_new_session()
8235 lro->cur_tsval = ntohl(*(ptr+1)); in initiate_new_session()
8236 lro->cur_tsecr = *(ptr+2); in initiate_new_session()
8238 lro->in_use = 1; in initiate_new_session()
8243 struct iphdr *ip = lro->iph; in update_L3L4_header()
8244 struct tcphdr *tcp = lro->tcph; in update_L3L4_header()
8245 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in update_L3L4_header()
8250 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len)); in update_L3L4_header()
8251 ip->tot_len = htons(lro->total_len); in update_L3L4_header()
8254 tcp->ack_seq = lro->tcp_ack; in update_L3L4_header()
8255 tcp->window = lro->window; in update_L3L4_header()
8258 if (lro->saw_ts) { in update_L3L4_header()
8260 *(ptr+2) = lro->cur_tsecr; in update_L3L4_header()
8266 swstats->sum_avg_pkts_aggregated += lro->sg_num; in update_L3L4_header()
8267 swstats->num_aggregations++; in update_L3L4_header()
8274 lro->total_len += l4_pyld; in aggregate_new_rx()
8275 lro->frags_len += l4_pyld; in aggregate_new_rx()
8276 lro->tcp_next_seq += l4_pyld; in aggregate_new_rx()
8277 lro->sg_num++; in aggregate_new_rx()
8280 lro->tcp_ack = tcp->ack_seq; in aggregate_new_rx()
8281 lro->window = tcp->window; in aggregate_new_rx()
8283 if (lro->saw_ts) { in aggregate_new_rx()
8287 lro->cur_tsval = ntohl(*(ptr+1)); in aggregate_new_rx()
8288 lro->cur_tsecr = *(ptr + 2); in aggregate_new_rx()
8301 return -1; in verify_l3_l4_lro_capable()
8304 if (ip->ihl != 5) /* IP has options */ in verify_l3_l4_lro_capable()
8305 return -1; in verify_l3_l4_lro_capable()
8309 return -1; in verify_l3_l4_lro_capable()
8312 if (tcp->urg || tcp->psh || tcp->rst || in verify_l3_l4_lro_capable()
8313 tcp->syn || tcp->fin || in verify_l3_l4_lro_capable()
8314 tcp->ece || tcp->cwr || !tcp->ack) { in verify_l3_l4_lro_capable()
8320 return -1; in verify_l3_l4_lro_capable()
8327 if (tcp->doff != 5 && tcp->doff != 8) in verify_l3_l4_lro_capable()
8328 return -1; in verify_l3_l4_lro_capable()
8330 if (tcp->doff == 8) { in verify_l3_l4_lro_capable()
8335 return -1; in verify_l3_l4_lro_capable()
8339 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2)))) in verify_l3_l4_lro_capable()
8340 return -1; in verify_l3_l4_lro_capable()
8342 /* timestamp echo reply should be non-zero */ in verify_l3_l4_lro_capable()
8344 return -1; in verify_l3_l4_lro_capable()
8358 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in s2io_club_tcp_session()
8365 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr); in s2io_club_tcp_session()
8367 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); in s2io_club_tcp_session()
8371 struct lro *l_lro = &ring_data->lro0_n[i]; in s2io_club_tcp_session()
8372 if (l_lro->in_use) { in s2io_club_tcp_session()
8378 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { in s2io_club_tcp_session()
8382 (*lro)->tcp_next_seq, in s2io_club_tcp_session()
8383 ntohl(tcph->seq)); in s2io_club_tcp_session()
8385 swstats->outof_sequence_pkts++; in s2io_club_tcp_session()
8409 struct lro *l_lro = &ring_data->lro0_n[i]; in s2io_club_tcp_session()
8410 if (!(l_lro->in_use)) { in s2io_club_tcp_session()
8435 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { in s2io_club_tcp_session()
8457 struct net_device *dev = skb->dev; in queue_rx_frame()
8460 skb->protocol = eth_type_trans(skb, dev); in queue_rx_frame()
8461 if (vlan_tag && sp->vlan_strip_flag) in queue_rx_frame()
8463 if (sp->config.napi) in queue_rx_frame()
8472 struct sk_buff *first = lro->parent; in lro_append_pkt()
8473 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; in lro_append_pkt()
8475 first->len += tcp_len; in lro_append_pkt()
8476 first->data_len = lro->frags_len; in lro_append_pkt()
8477 skb_pull(skb, (skb->len - tcp_len)); in lro_append_pkt()
8478 if (skb_shinfo(first)->frag_list) in lro_append_pkt()
8479 lro->last_frag->next = skb; in lro_append_pkt()
8481 skb_shinfo(first)->frag_list = skb; in lro_append_pkt()
8482 first->truesize += skb->truesize; in lro_append_pkt()
8483 lro->last_frag = skb; in lro_append_pkt()
8484 swstats->clubbed_frms_cnt++; in lro_append_pkt()
8488 * s2io_io_error_detected - called when PCI error is detected
8516 * s2io_io_slot_reset - called after the pci bus has been reset.
8519 * Restart the card from scratch, as if from a cold-boot.
8530 pr_err("Cannot re-enable PCI device after reset.\n"); in s2io_io_slot_reset()
8541 * s2io_io_resume - called when traffic can start flowing again.
8558 if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) { in s2io_io_resume()