Home
last modified time | relevance | path

Searched refs:vmdq (Results 1 – 13 of 13) sorted by relevance

/drivers/net/ethernet/intel/ixgbe/
Dixgbe_lib.c47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_dcb_sriov() local
61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
64 if ((reg_idx & ~vmdq->mask) >= tcs) in ixgbe_cache_ring_dcb_sriov()
65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
72 if ((reg_idx & ~vmdq->mask) >= tcs) in ixgbe_cache_ring_dcb_sriov()
73 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
88 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
91 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; in ixgbe_cache_ring_dcb_sriov()
93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; in ixgbe_cache_ring_dcb_sriov()
[all …]
Dixgbe_common.h72 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
90 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
91 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
92 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
Dixgbe_sriov.c640 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_write_qde() local
641 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_write_qde()
659 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_reset_msg() local
665 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_reset_msg()
927 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_get_vf_queues() local
942 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_get_vf_queues()
943 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_get_vf_queues()
1335 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_set_vf_rate_limit() local
1374 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_set_vf_rate_limit()
Dixgbe_common.c1782 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, in ixgbe_set_rar_generic() argument
1795 hw->mac.ops.set_vmdq(hw, index, vmdq); in ixgbe_set_rar_generic()
2881 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_clear_vmdq_generic() argument
2901 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { in ixgbe_clear_vmdq_generic()
2910 } else if (vmdq < 32) { in ixgbe_clear_vmdq_generic()
2911 mpsar_lo &= ~(1 << vmdq); in ixgbe_clear_vmdq_generic()
2914 mpsar_hi &= ~(1 << (vmdq - 32)); in ixgbe_clear_vmdq_generic()
2930 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_set_vmdq_generic() argument
2941 if (vmdq < 32) { in ixgbe_set_vmdq_generic()
2943 mpsar |= 1 << vmdq; in ixgbe_set_vmdq_generic()
[all …]
Dixgbe_82598.c831 static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_set_vmdq_82598() argument
844 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); in ixgbe_set_vmdq_82598()
855 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) in ixgbe_clear_vmdq_82598() argument
Dixgbe_type.h3221 u32 *vmdq);
Dixgbe_main.c6578 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending() local
6579 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_tx_pending()
/drivers/net/ethernet/intel/ixgbevf/
Dvf.h44 u32 *vmdq);
Dvf.c390 u32 vmdq) in ixgbevf_set_rar_vf() argument
/drivers/net/ethernet/intel/i40evf/
Di40e_type.h244 bool vmdq; member
/drivers/net/ethernet/intel/i40e/
Di40e_type.h244 bool vmdq; member
Di40e_common.c2863 p->vmdq = true; in i40e_parse_discover_capabilities()
Di40e_main.c8243 if (pf->hw.func_caps.vmdq) { in i40e_sw_init()