Home
last modified time | relevance | path

Searched refs:vsi (Results 1 – 25 of 67) sorted by relevance

123

/drivers/net/ethernet/intel/ice/
Dice_lib.c16 struct ice_vsi *vsi = ring->vsi; in ice_setup_rx_ctx() local
17 struct ice_hw *hw = &vsi->back->hw; in ice_setup_rx_ctx()
25 pf_q = vsi->rxq_map[ring->q_index]; in ice_setup_rx_ctx()
37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; in ice_setup_rx_ctx()
63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame, in ice_setup_rx_ctx()
64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); in ice_setup_rx_ctx()
72 if (vsi->type != ICE_VSI_VF) { in ice_setup_rx_ctx()
90 dev_err(&vsi->back->pdev->dev, in ice_setup_rx_ctx()
96 if (vsi->type == ICE_VSI_VF) in ice_setup_rx_ctx()
118 struct ice_vsi *vsi = ring->vsi; in ice_setup_tx_ctx() local
[all …]
Dice_lib.h23 ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
28 void ice_update_eth_stats(struct ice_vsi *vsi);
30 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
32 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
34 void ice_vsi_cfg_msix(struct ice_vsi *vsi);
38 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
41 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
44 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
48 void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
51 int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
[all …]
Dice_main.c73 struct ice_vsi *vsi = NULL; in ice_check_for_hang_subtask() local
80 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
81 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
85 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) in ice_check_for_hang_subtask()
88 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask()
91 hw = &vsi->back->hw; in ice_check_for_hang_subtask()
93 for (i = 0; i < vsi->num_txq; i++) { in ice_check_for_hang_subtask()
94 struct ice_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
133 struct ice_vsi *vsi; in ice_init_mac_fltr() local
135 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
[all …]
Dice_virtchnl_pf.c162 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); in ice_free_vf_res()
186 struct ice_vsi *vsi; in ice_dis_vf_mappings() local
191 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_dis_vf_mappings()
208 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) in ice_dis_vf_mappings()
214 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) in ice_dis_vf_mappings()
277 struct ice_vsi *vsi; in ice_dis_vf_qs() local
279 vsi = pf->vsi[vf->lan_vsi_idx]; in ice_dis_vf_qs()
281 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); in ice_dis_vf_qs()
282 ice_vsi_stop_rx_rings(vsi); in ice_dis_vf_qs()
459 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) in ice_vsi_manage_pvid() argument
[all …]
Dice_ethtool.c31 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * in ice_q_stats_len()
167 struct ice_vsi *vsi = np->vsi; in ice_get_drvinfo() local
168 struct ice_pf *pf = vsi->back; in ice_get_drvinfo()
188 struct ice_pf *pf = np->vsi->back; in ice_get_regs()
202 struct ice_pf *pf = np->vsi->back; in ice_get_msglevel()
216 struct ice_pf *pf = np->vsi->back; in ice_set_msglevel()
231 struct ice_pf *pf = np->vsi->back; in ice_get_eeprom_len()
242 struct ice_vsi *vsi = np->vsi; in ice_get_eeprom() local
243 struct ice_pf *pf = vsi->back; in ice_get_eeprom()
308 status = ice_get_link_status(np->vsi->port_info, &link_up); in ice_link_test()
[all …]
Dice.h97 #define ice_for_each_txq(vsi, i) \ argument
98 for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
100 #define ice_for_each_rxq(vsi, i) \ argument
101 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
104 #define ice_for_each_alloc_txq(vsi, i) \ argument
105 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
107 #define ice_for_each_alloc_rxq(vsi, i) \ argument
108 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
110 #define ice_for_each_q_vector(vsi, i) \ argument
111 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
[all …]
Dice_dcb_lib.c11 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) in ice_vsi_cfg_netdev_tc() argument
13 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
14 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
27 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) in ice_vsi_cfg_netdev_tc()
33 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
35 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
36 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
37 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
43 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
106 void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) in ice_vsi_cfg_dcb_rings() argument
[all …]
/drivers/media/platform/mtk-vcodec/vdec/
Dvdec_vp9_if.c197 struct vdec_vp9_vsi *vsi; member
205 struct vdec_vp9_vsi *vsi = inst->vsi; in vp9_is_sf_ref_fb() local
207 for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) { in vp9_is_sf_ref_fb()
208 if (fb == &vsi->sf_ref_fb[i].fb) in vp9_is_sf_ref_fb()
260 struct vdec_vp9_vsi *vsi = inst->vsi; in vp9_ref_cnt_fb() local
263 if (ref_idx >= 0 && vsi->frm_bufs[ref_idx].ref_cnt > 0) { in vp9_ref_cnt_fb()
264 vsi->frm_bufs[ref_idx].ref_cnt--; in vp9_ref_cnt_fb()
266 if (vsi->frm_bufs[ref_idx].ref_cnt == 0) { in vp9_ref_cnt_fb()
268 vsi->frm_bufs[ref_idx].buf.fb)) { in vp9_ref_cnt_fb()
272 vsi->frm_bufs[ref_idx].buf.fb->base_y.va); in vp9_ref_cnt_fb()
[all …]
Dvdec_h264_if.c132 struct vdec_h264_vsi *vsi; member
151 inst->vsi->pred_buf_dma = inst->pred_buf.dma_addr; in allocate_predication_buf()
161 inst->vsi->pred_buf_dma = 0; in free_predication_buf()
184 inst->vsi->mv_buf_dma[i] = mem->dma_addr; in alloc_mv_buf()
196 inst->vsi->mv_buf_dma[i] = 0; in free_mv_buf()
207 list = disp_list ? &inst->vsi->list_disp : &inst->vsi->list_free; in check_list_validity()
229 list = &inst->vsi->list_free; in put_fb_to_free()
248 *pic = inst->vsi->pic; in get_pic_info()
257 cr->left = inst->vsi->crop.left; in get_crop_info()
258 cr->top = inst->vsi->crop.top; in get_crop_info()
[all …]
Dvdec_vp8_if.c165 struct vdec_vp8_vsi *vsi; member
185 struct vdec_vp8_vsi *vsi = inst->vsi; in write_hw_segmentation_data() local
189 for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) { in write_hw_segmentation_data()
190 for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) { in write_hw_segmentation_data()
194 val = vsi->segment_buf[i][j]; in write_hw_segmentation_data()
206 struct vdec_vp8_vsi *vsi = inst->vsi; in read_hw_segmentation_data() local
210 for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) { in read_hw_segmentation_data()
211 for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) { in read_hw_segmentation_data()
216 vsi->segment_buf[i][j] = val; in read_hw_segmentation_data()
254 u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET]; in store_dec_table()
[all …]
/drivers/net/ethernet/intel/i40e/
Di40e_main.c38 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
40 static int i40e_add_vsi(struct i40e_vsi *vsi);
41 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
276 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
277 return pf->vsi[i]; in i40e_find_vsi_from_id()
307 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout() local
308 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
334 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
335 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
337 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
[all …]
Di40e_debugfs.c26 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) in i40e_dbg_find_vsi()
27 return pf->vsi[i]; in i40e_dbg_find_vsi()
83 pf->vsi[pf->lan_vsi]->netdev->name, in i40e_dbg_command_read()
113 struct i40e_vsi *vsi; in i40e_dbg_dump_vsi_seid() local
116 vsi = i40e_dbg_find_vsi(pf, seid); in i40e_dbg_dump_vsi_seid()
117 if (!vsi) { in i40e_dbg_dump_vsi_seid()
123 if (vsi->netdev) { in i40e_dbg_dump_vsi_seid()
124 struct net_device *nd = vsi->netdev; in i40e_dbg_dump_vsi_seid()
137 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); in i40e_dbg_dump_vsi_seid()
141 i, vsi->state[i]); in i40e_dbg_dump_vsi_seid()
[all …]
Di40e_virtchnl_pf.c176 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); in i40e_vc_isvalid_vsi_id() local
178 return (vsi && (vsi->vf_id == vf->vf_id)); in i40e_vc_isvalid_vsi_id()
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); in i40e_vc_isvalid_queue_id() local
195 return (vsi && (qid < vsi->alloc_queue_pairs)); in i40e_vc_isvalid_queue_id()
226 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); in i40e_vc_get_pf_queue_id() local
229 if (!vsi) in i40e_vc_get_pf_queue_id()
232 if (le16_to_cpu(vsi->info.mapping_flags) & in i40e_vc_get_pf_queue_id()
235 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); in i40e_vc_get_pf_queue_id()
237 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + in i40e_vc_get_pf_queue_id()
550 struct i40e_vsi *vsi; in i40e_config_vsi_tx_queue() local
[all …]
Di40e_ethtool.c1049 struct i40e_pf *pf = np->vsi->back; in i40e_get_link_ksettings()
1140 struct i40e_pf *pf = np->vsi->back; in i40e_set_link_ksettings()
1141 struct i40e_vsi *vsi = np->vsi; in i40e_set_link_ksettings() local
1156 if (vsi != pf->vsi[pf->lan_vsi]) in i40e_set_link_ksettings()
1339 i40e_print_link_message(vsi, false); in i40e_set_link_ksettings()
1376 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_cfg()
1437 struct i40e_pf *pf = np->vsi->back; in i40e_get_fec_param()
1479 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_param()
1523 struct i40e_pf *pf = np->vsi->back; in i40e_nway_reset()
1550 struct i40e_pf *pf = np->vsi->back; in i40e_get_pauseparam()
[all …]
Di40e.h593 struct i40e_vsi **vsi; member
851 struct i40e_vsi *vsi; member
856 struct i40e_vsi *vsi; member
934 struct i40e_vsi *vsi = np->vsi; in i40e_netdev_to_pf() local
936 return vsi->back; in i40e_netdev_to_pf()
939 static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi, in i40e_vsi_setup_irqhandler() argument
942 vsi->irq_handler = irq_handler; in i40e_vsi_setup_irqhandler()
992 int i40e_up(struct i40e_vsi *vsi);
993 void i40e_down(struct i40e_vsi *vsi);
998 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
[all …]
Di40e_xsk.c19 static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) in i40e_xsk_umem_dma_map() argument
21 struct i40e_pf *pf = vsi->back; in i40e_xsk_umem_dma_map()
53 static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) in i40e_xsk_umem_dma_unmap() argument
55 struct i40e_pf *pf = vsi->back; in i40e_xsk_umem_dma_unmap()
77 static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, in i40e_xsk_umem_enable() argument
80 struct net_device *netdev = vsi->netdev; in i40e_xsk_umem_enable()
85 if (vsi->type != I40E_VSI_MAIN) in i40e_xsk_umem_enable()
88 if (qid >= vsi->num_queue_pairs) in i40e_xsk_umem_enable()
95 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); in i40e_xsk_umem_enable()
101 err = i40e_xsk_umem_dma_map(vsi, umem); in i40e_xsk_umem_enable()
[all …]
Di40e_client.c47 int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) in i40e_client_get_params() argument
49 struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config; in i40e_client_get_params()
57 if (!(vsi->tc_config.enabled_tc & BIT(tc))) in i40e_client_get_params()
60 qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]); in i40e_client_get_params()
64 dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n", in i40e_client_get_params()
65 tc, vsi->id); in i40e_client_get_params()
70 params->mtu = vsi->netdev->mtu; in i40e_client_get_params()
84 i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) in i40e_notify_client_of_vf_msg() argument
86 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_vf_msg()
110 void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) in i40e_notify_client_of_l2_param_changes() argument
[all …]
Di40e_txrx.c25 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << in i40e_fdir()
95 struct i40e_vsi *vsi; in i40e_program_fdir_filter() local
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_program_fdir_filter()
103 if (!vsi) in i40e_program_fdir_filter()
106 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
173 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, in i40e_add_del_fdir_udpv4() argument
177 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_udpv4()
245 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, in i40e_add_del_fdir_tcpv4() argument
249 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_tcpv4()
[all …]
Di40e_dcb_nl.c145 void i40e_dcbnl_set_all(struct i40e_vsi *vsi) in i40e_dcbnl_set_all() argument
147 struct net_device *dev = vsi->netdev; in i40e_dcbnl_set_all()
171 if (tc_map & vsi->tc_config.enabled_tc) { in i40e_dcbnl_set_all()
191 static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, in i40e_dcbnl_vsi_del_app() argument
194 struct net_device *dev = vsi->netdev; in i40e_dcbnl_vsi_del_app()
219 if (pf->vsi[v] && pf->vsi[v]->netdev) { in i40e_dcbnl_del_app()
220 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); in i40e_dcbnl_del_app()
222 pf->vsi[v]->seid, err, app->selector, in i40e_dcbnl_del_app()
284 void i40e_dcbnl_setup(struct i40e_vsi *vsi) in i40e_dcbnl_setup() argument
286 struct net_device *dev = vsi->netdev; in i40e_dcbnl_setup()
[all …]
Di40e_xsk.h11 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
12 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
13 int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
19 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
/drivers/net/ethernet/intel/iavf/
Diavf_client.c36 void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params) in iavf_client_get_params() argument
41 params->mtu = vsi->netdev->mtu; in iavf_client_get_params()
42 params->link_up = vsi->back->link_up; in iavf_client_get_params()
46 params->qos.prio_qos[i].qs_handle = vsi->qs_handle; in iavf_client_get_params()
58 void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) in iavf_notify_client_message() argument
62 if (!vsi) in iavf_notify_client_message()
65 cinst = vsi->back->cinst; in iavf_notify_client_message()
68 dev_dbg(&vsi->back->pdev->dev, in iavf_notify_client_message()
82 void iavf_notify_client_l2_params(struct iavf_vsi *vsi) in iavf_notify_client_l2_params() argument
87 if (!vsi) in iavf_notify_client_l2_params()
[all …]
/drivers/media/platform/mtk-vcodec/venc/
Dvenc_vp8_if.c132 struct venc_vp8_vsi *vsi; member
161 struct venc_vp8_vpu_buf *wb = inst->vsi->work_bufs; in vp8_enc_alloc_work_buf()
262 ac_tag[6] = inst->vsi->config.pic_w; in vp8_enc_compose_one_frame()
263 ac_tag[7] = inst->vsi->config.pic_w >> 8; in vp8_enc_compose_one_frame()
264 ac_tag[8] = inst->vsi->config.pic_h; in vp8_enc_compose_one_frame()
265 ac_tag[9] = inst->vsi->config.pic_h >> 8; in vp8_enc_compose_one_frame()
345 inst->vsi = (struct venc_vp8_vsi *)inst->vpu_inst.vsi; in vp8_enc_init()
405 inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt; in vp8_enc_set_param()
406 inst->vsi->config.bitrate = enc_prm->bitrate; in vp8_enc_set_param()
407 inst->vsi->config.pic_w = enc_prm->width; in vp8_enc_set_param()
[all …]
Dvenc_h264_if.c144 struct venc_h264_vsi *vsi; member
237 struct venc_h264_vpu_buf *wb = inst->vsi->work_bufs; in h264_enc_alloc_work_buf()
480 inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi; in h264_enc_init()
598 inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt; in h264_enc_set_param()
599 inst->vsi->config.bitrate = enc_prm->bitrate; in h264_enc_set_param()
600 inst->vsi->config.pic_w = enc_prm->width; in h264_enc_set_param()
601 inst->vsi->config.pic_h = enc_prm->height; in h264_enc_set_param()
602 inst->vsi->config.buf_w = enc_prm->buf_width; in h264_enc_set_param()
603 inst->vsi->config.buf_h = enc_prm->buf_height; in h264_enc_set_param()
604 inst->vsi->config.gop_size = enc_prm->gop_size; in h264_enc_set_param()
[all …]
/drivers/media/platform/mtk-mdp/
Dmtk_mdp_regs.c51 struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer; in mtk_mdp_hw_set_input_addr()
61 struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer; in mtk_mdp_hw_set_output_addr()
71 struct mdp_config *config = &ctx->vpu.vsi->src_config; in mtk_mdp_hw_set_in_size()
92 struct mdp_config *config = &ctx->vpu.vsi->src_config; in mtk_mdp_hw_set_in_image_format()
93 struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer; in mtk_mdp_hw_set_in_image_format()
107 struct mdp_config *config = &ctx->vpu.vsi->dst_config; in mtk_mdp_hw_set_out_size()
123 struct mdp_config *config = &ctx->vpu.vsi->dst_config; in mtk_mdp_hw_set_out_image_format()
124 struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer; in mtk_mdp_hw_set_out_image_format()
136 struct mdp_config_misc *misc = &ctx->vpu.vsi->misc; in mtk_mdp_hw_set_rotation()
145 struct mdp_config_misc *misc = &ctx->vpu.vsi->misc; in mtk_mdp_hw_set_global_alpha()
/drivers/infiniband/hw/i40iw/
Di40iw_puda.c45 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
47 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
294 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq; in i40iw_puda_poll_completion()
337 rsrc->receive(rsrc->vsi, buf); in i40iw_puda_poll_completion()
347 rsrc->xmit_complete(rsrc->vsi, sqwrid); in i40iw_puda_poll_completion()
583 qp->vsi = rsrc->vsi; in i40iw_puda_qp_create()
682 cq->vsi = rsrc->vsi; in i40iw_puda_cq_create()
789 void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, in i40iw_puda_dele_resources() argument
793 struct i40iw_sc_dev *dev = vsi->dev; in i40iw_puda_dele_resources()
801 rsrc = vsi->ilq; in i40iw_puda_dele_resources()
[all …]

123