Lines Matching refs:adapter
42 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) in ixgbe_cache_ring_dcb_sriov() argument
45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_cache_ring_dcb_sriov()
47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_dcb_sriov()
50 u8 tcs = netdev_get_num_tc(adapter->netdev); in ixgbe_cache_ring_dcb_sriov()
57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_cache_ring_dcb_sriov()
62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { in ixgbe_cache_ring_dcb_sriov()
66 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
70 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_dcb_sriov()
74 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
79 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) in ixgbe_cache_ring_dcb_sriov()
89 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); in ixgbe_cache_ring_dcb_sriov()
92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { in ixgbe_cache_ring_dcb_sriov()
94 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
99 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { in ixgbe_cache_ring_dcb_sriov()
101 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
111 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, in ixgbe_get_first_reg_idx() argument
114 struct net_device *dev = adapter->netdev; in ixgbe_get_first_reg_idx()
115 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_first_reg_idx()
169 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) in ixgbe_cache_ring_dcb() argument
171 struct net_device *dev = adapter->netdev; in ixgbe_cache_ring_dcb()
180 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_cache_ring_dcb()
183 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); in ixgbe_cache_ring_dcb()
185 adapter->tx_ring[offset + i]->reg_idx = tx_idx; in ixgbe_cache_ring_dcb()
186 adapter->rx_ring[offset + i]->reg_idx = rx_idx; in ixgbe_cache_ring_dcb()
187 adapter->tx_ring[offset + i]->dcb_tc = tc; in ixgbe_cache_ring_dcb()
188 adapter->rx_ring[offset + i]->dcb_tc = tc; in ixgbe_cache_ring_dcb()
204 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) in ixgbe_cache_ring_sriov() argument
207 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_cache_ring_sriov()
209 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_sriov()
210 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; in ixgbe_cache_ring_sriov()
215 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) in ixgbe_cache_ring_sriov()
220 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov()
229 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
234 for (; i < adapter->num_rx_queues; i++, reg_idx++) in ixgbe_cache_ring_sriov()
235 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
239 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov()
248 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
253 for (; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_sriov()
254 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
268 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) in ixgbe_cache_ring_rss() argument
272 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_cache_ring_rss()
273 adapter->rx_ring[i]->reg_idx = i; in ixgbe_cache_ring_rss()
274 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_cache_ring_rss()
275 adapter->tx_ring[i]->reg_idx = i; in ixgbe_cache_ring_rss()
291 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) in ixgbe_cache_ring_register() argument
294 adapter->rx_ring[0]->reg_idx = 0; in ixgbe_cache_ring_register()
295 adapter->tx_ring[0]->reg_idx = 0; in ixgbe_cache_ring_register()
298 if (ixgbe_cache_ring_dcb_sriov(adapter)) in ixgbe_cache_ring_register()
301 if (ixgbe_cache_ring_dcb(adapter)) in ixgbe_cache_ring_register()
305 if (ixgbe_cache_ring_sriov(adapter)) in ixgbe_cache_ring_register()
308 ixgbe_cache_ring_rss(adapter); in ixgbe_cache_ring_register()
328 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) in ixgbe_set_dcb_sriov_queues() argument
331 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; in ixgbe_set_dcb_sriov_queues()
336 u8 tcs = netdev_get_num_tc(adapter->netdev); in ixgbe_set_dcb_sriov_queues()
343 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_set_dcb_sriov_queues()
347 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_dcb_sriov_queues()
365 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_dcb_sriov_queues()
368 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; in ixgbe_set_dcb_sriov_queues()
369 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; in ixgbe_set_dcb_sriov_queues()
375 adapter->ring_feature[RING_F_RSS].indices = 1; in ixgbe_set_dcb_sriov_queues()
376 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; in ixgbe_set_dcb_sriov_queues()
379 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_dcb_sriov_queues()
381 adapter->num_rx_pools = vmdq_i; in ixgbe_set_dcb_sriov_queues()
382 adapter->num_rx_queues_per_pool = tcs; in ixgbe_set_dcb_sriov_queues()
384 adapter->num_tx_queues = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues()
385 adapter->num_rx_queues = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues()
388 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_dcb_sriov_queues()
391 fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_dcb_sriov_queues()
402 adapter->num_tx_queues += fcoe_i; in ixgbe_set_dcb_sriov_queues()
403 adapter->num_rx_queues += fcoe_i; in ixgbe_set_dcb_sriov_queues()
407 fcoe->offset = ixgbe_fcoe_get_tc(adapter); in ixgbe_set_dcb_sriov_queues()
409 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_set_dcb_sriov_queues()
419 netdev_set_tc_queue(adapter->netdev, i, 1, i); in ixgbe_set_dcb_sriov_queues()
424 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) in ixgbe_set_dcb_queues() argument
426 struct net_device *dev = adapter->netdev; in ixgbe_set_dcb_queues()
440 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_set_dcb_queues()
455 f = &adapter->ring_feature[RING_F_RSS]; in ixgbe_set_dcb_queues()
461 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_dcb_queues()
469 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_dcb_queues()
470 u8 tc = ixgbe_fcoe_get_tc(adapter); in ixgbe_set_dcb_queues()
472 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_dcb_queues()
481 adapter->num_tx_queues = rss_i * tcs; in ixgbe_set_dcb_queues()
482 adapter->num_rx_queues = rss_i * tcs; in ixgbe_set_dcb_queues()
497 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) in ixgbe_set_sriov_queues() argument
499 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; in ixgbe_set_sriov_queues()
501 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; in ixgbe_set_sriov_queues()
506 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_set_sriov_queues()
509 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_set_sriov_queues()
513 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_sriov_queues()
537 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_sriov_queues()
540 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; in ixgbe_set_sriov_queues()
541 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; in ixgbe_set_sriov_queues()
544 adapter->ring_feature[RING_F_RSS].indices = rss_i; in ixgbe_set_sriov_queues()
545 adapter->ring_feature[RING_F_RSS].mask = rss_m; in ixgbe_set_sriov_queues()
547 adapter->num_rx_pools = vmdq_i; in ixgbe_set_sriov_queues()
548 adapter->num_rx_queues_per_pool = rss_i; in ixgbe_set_sriov_queues()
550 adapter->num_rx_queues = vmdq_i * rss_i; in ixgbe_set_sriov_queues()
551 adapter->num_tx_queues = vmdq_i * rss_i; in ixgbe_set_sriov_queues()
554 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_sriov_queues()
562 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_sriov_queues()
565 fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_sriov_queues()
579 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) in ixgbe_set_sriov_queues()
590 adapter->num_tx_queues += fcoe_i; in ixgbe_set_sriov_queues()
591 adapter->num_rx_queues += fcoe_i; in ixgbe_set_sriov_queues()
606 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) in ixgbe_set_rss_queues() argument
608 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rss_queues()
613 f = &adapter->ring_feature[RING_F_RSS]; in ixgbe_set_rss_queues()
624 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_rss_queues()
631 if (rss_i > 1 && adapter->atr_sample_rate) { in ixgbe_set_rss_queues()
632 f = &adapter->ring_feature[RING_F_FDIR]; in ixgbe_set_rss_queues()
636 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) in ixgbe_set_rss_queues()
637 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_rss_queues()
649 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_rss_queues()
650 struct net_device *dev = adapter->netdev; in ixgbe_set_rss_queues()
653 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_rss_queues()
660 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) in ixgbe_set_rss_queues()
670 adapter->num_rx_queues = rss_i; in ixgbe_set_rss_queues()
671 adapter->num_tx_queues = rss_i; in ixgbe_set_rss_queues()
687 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) in ixgbe_set_num_queues() argument
690 adapter->num_rx_queues = 1; in ixgbe_set_num_queues()
691 adapter->num_tx_queues = 1; in ixgbe_set_num_queues()
692 adapter->num_rx_pools = adapter->num_rx_queues; in ixgbe_set_num_queues()
693 adapter->num_rx_queues_per_pool = 1; in ixgbe_set_num_queues()
696 if (ixgbe_set_dcb_sriov_queues(adapter)) in ixgbe_set_num_queues()
699 if (ixgbe_set_dcb_queues(adapter)) in ixgbe_set_num_queues()
703 if (ixgbe_set_sriov_queues(adapter)) in ixgbe_set_num_queues()
706 ixgbe_set_rss_queues(adapter); in ixgbe_set_num_queues()
717 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) in ixgbe_acquire_msix_vectors() argument
719 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_acquire_msix_vectors()
723 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbe_acquire_msix_vectors()
748 adapter->msix_entries = kcalloc(vectors, in ixgbe_acquire_msix_vectors()
751 if (!adapter->msix_entries) in ixgbe_acquire_msix_vectors()
755 adapter->msix_entries[i].entry = i; in ixgbe_acquire_msix_vectors()
757 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, in ixgbe_acquire_msix_vectors()
767 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_acquire_msix_vectors()
768 kfree(adapter->msix_entries); in ixgbe_acquire_msix_vectors()
769 adapter->msix_entries = NULL; in ixgbe_acquire_msix_vectors()
777 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; in ixgbe_acquire_msix_vectors()
783 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); in ixgbe_acquire_msix_vectors()
808 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, in ixgbe_alloc_q_vector() argument
818 u8 tcs = netdev_get_num_tc(adapter->netdev); in ixgbe_alloc_q_vector()
825 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { in ixgbe_alloc_q_vector()
826 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_alloc_q_vector()
827 if (rss_i > 1 && adapter->atr_sample_rate) { in ixgbe_alloc_q_vector()
853 netif_napi_add(adapter->netdev, &q_vector->napi, in ixgbe_alloc_q_vector()
862 adapter->q_vector[v_idx] = q_vector; in ixgbe_alloc_q_vector()
863 q_vector->adapter = adapter; in ixgbe_alloc_q_vector()
867 q_vector->tx.work_limit = adapter->tx_work_limit; in ixgbe_alloc_q_vector()
875 if (adapter->tx_itr_setting == 1) in ixgbe_alloc_q_vector()
878 q_vector->itr = adapter->tx_itr_setting; in ixgbe_alloc_q_vector()
881 if (adapter->rx_itr_setting == 1) in ixgbe_alloc_q_vector()
884 q_vector->itr = adapter->rx_itr_setting; in ixgbe_alloc_q_vector()
889 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector()
890 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector()
899 ring->count = adapter->tx_ring_count; in ixgbe_alloc_q_vector()
900 if (adapter->num_rx_pools > 1) in ixgbe_alloc_q_vector()
902 txr_idx % adapter->num_rx_queues_per_pool; in ixgbe_alloc_q_vector()
907 adapter->tx_ring[txr_idx] = ring; in ixgbe_alloc_q_vector()
919 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector()
920 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector()
932 if (adapter->hw.mac.type == ixgbe_mac_82599EB) in ixgbe_alloc_q_vector()
936 if (adapter->netdev->features & NETIF_F_FCOE_MTU) { in ixgbe_alloc_q_vector()
938 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_alloc_q_vector()
946 ring->count = adapter->rx_ring_count; in ixgbe_alloc_q_vector()
947 if (adapter->num_rx_pools > 1) in ixgbe_alloc_q_vector()
949 rxr_idx % adapter->num_rx_queues_per_pool; in ixgbe_alloc_q_vector()
954 adapter->rx_ring[rxr_idx] = ring; in ixgbe_alloc_q_vector()
976 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) in ixgbe_free_q_vector() argument
978 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; in ixgbe_free_q_vector()
982 adapter->tx_ring[ring->queue_index] = NULL; in ixgbe_free_q_vector()
985 adapter->rx_ring[ring->queue_index] = NULL; in ixgbe_free_q_vector()
987 adapter->q_vector[v_idx] = NULL; in ixgbe_free_q_vector()
1005 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) in ixgbe_alloc_q_vectors() argument
1007 int q_vectors = adapter->num_q_vectors; in ixgbe_alloc_q_vectors()
1008 int rxr_remaining = adapter->num_rx_queues; in ixgbe_alloc_q_vectors()
1009 int txr_remaining = adapter->num_tx_queues; in ixgbe_alloc_q_vectors()
1014 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) in ixgbe_alloc_q_vectors()
1019 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, in ixgbe_alloc_q_vectors()
1034 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, in ixgbe_alloc_q_vectors()
1051 adapter->num_tx_queues = 0; in ixgbe_alloc_q_vectors()
1052 adapter->num_rx_queues = 0; in ixgbe_alloc_q_vectors()
1053 adapter->num_q_vectors = 0; in ixgbe_alloc_q_vectors()
1056 ixgbe_free_q_vector(adapter, v_idx); in ixgbe_alloc_q_vectors()
1069 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) in ixgbe_free_q_vectors() argument
1071 int v_idx = adapter->num_q_vectors; in ixgbe_free_q_vectors()
1073 adapter->num_tx_queues = 0; in ixgbe_free_q_vectors()
1074 adapter->num_rx_queues = 0; in ixgbe_free_q_vectors()
1075 adapter->num_q_vectors = 0; in ixgbe_free_q_vectors()
1078 ixgbe_free_q_vector(adapter, v_idx); in ixgbe_free_q_vectors()
1081 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) in ixgbe_reset_interrupt_capability() argument
1083 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_reset_interrupt_capability()
1084 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_reset_interrupt_capability()
1085 pci_disable_msix(adapter->pdev); in ixgbe_reset_interrupt_capability()
1086 kfree(adapter->msix_entries); in ixgbe_reset_interrupt_capability()
1087 adapter->msix_entries = NULL; in ixgbe_reset_interrupt_capability()
1088 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { in ixgbe_reset_interrupt_capability()
1089 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; in ixgbe_reset_interrupt_capability()
1090 pci_disable_msi(adapter->pdev); in ixgbe_reset_interrupt_capability()
1101 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) in ixgbe_set_interrupt_capability() argument
1106 if (!ixgbe_acquire_msix_vectors(adapter)) in ixgbe_set_interrupt_capability()
1115 if (netdev_get_num_tc(adapter->netdev) > 1) { in ixgbe_set_interrupt_capability()
1117 netdev_reset_tc(adapter->netdev); in ixgbe_set_interrupt_capability()
1119 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_set_interrupt_capability()
1120 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; in ixgbe_set_interrupt_capability()
1122 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_set_interrupt_capability()
1123 adapter->temp_dcb_cfg.pfc_mode_enable = false; in ixgbe_set_interrupt_capability()
1124 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_set_interrupt_capability()
1127 adapter->dcb_cfg.num_tcs.pg_tcs = 1; in ixgbe_set_interrupt_capability()
1128 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; in ixgbe_set_interrupt_capability()
1132 ixgbe_disable_sriov(adapter); in ixgbe_set_interrupt_capability()
1136 adapter->ring_feature[RING_F_RSS].limit = 1; in ixgbe_set_interrupt_capability()
1141 ixgbe_set_num_queues(adapter); in ixgbe_set_interrupt_capability()
1142 adapter->num_q_vectors = 1; in ixgbe_set_interrupt_capability()
1144 err = pci_enable_msi(adapter->pdev); in ixgbe_set_interrupt_capability()
1149 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; in ixgbe_set_interrupt_capability()
1162 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) in ixgbe_init_interrupt_scheme() argument
1167 ixgbe_set_num_queues(adapter); in ixgbe_init_interrupt_scheme()
1170 ixgbe_set_interrupt_capability(adapter); in ixgbe_init_interrupt_scheme()
1172 err = ixgbe_alloc_q_vectors(adapter); in ixgbe_init_interrupt_scheme()
1178 ixgbe_cache_ring_register(adapter); in ixgbe_init_interrupt_scheme()
1181 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", in ixgbe_init_interrupt_scheme()
1182 adapter->num_rx_queues, adapter->num_tx_queues); in ixgbe_init_interrupt_scheme()
1184 set_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_init_interrupt_scheme()
1189 ixgbe_reset_interrupt_capability(adapter); in ixgbe_init_interrupt_scheme()
1200 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) in ixgbe_clear_interrupt_scheme() argument
1202 adapter->num_tx_queues = 0; in ixgbe_clear_interrupt_scheme()
1203 adapter->num_rx_queues = 0; in ixgbe_clear_interrupt_scheme()
1205 ixgbe_free_q_vectors(adapter); in ixgbe_clear_interrupt_scheme()
1206 ixgbe_reset_interrupt_capability(adapter); in ixgbe_clear_interrupt_scheme()