Home
last modified time | relevance | path

Searched refs:vf (Results 1 – 25 of 142) sorted by relevance

123456

/drivers/net/ethernet/sfc/
Dsiena_sriov.c193 static unsigned abs_index(struct siena_vf *vf, unsigned index) in abs_index() argument
195 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; in abs_index()
303 static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) in efx_siena_sriov_reset_tx_filter() argument
305 struct efx_nic *efx = vf->efx; in efx_siena_sriov_reset_tx_filter()
310 if (vf->tx_filter_id != -1) { in efx_siena_sriov_reset_tx_filter()
312 vf->tx_filter_id); in efx_siena_sriov_reset_tx_filter()
314 vf->pci_name, vf->tx_filter_id); in efx_siena_sriov_reset_tx_filter()
315 vf->tx_filter_id = -1; in efx_siena_sriov_reset_tx_filter()
318 if (is_zero_ether_addr(vf->addr.mac_addr)) in efx_siena_sriov_reset_tx_filter()
324 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) in efx_siena_sriov_reset_tx_filter()
[all …]
Def10_sriov.c121 if (!nic_data->vf) in efx_ef10_sriov_free_vf_vports()
125 struct ef10_vf *vf = nic_data->vf + i; in efx_ef10_sriov_free_vf_vports() local
128 if (vf->pci_dev && in efx_ef10_sriov_free_vf_vports()
129 vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) in efx_ef10_sriov_free_vf_vports()
132 if (vf->vport_assigned) { in efx_ef10_sriov_free_vf_vports()
134 vf->vport_assigned = 0; in efx_ef10_sriov_free_vf_vports()
137 if (!is_zero_ether_addr(vf->mac)) { in efx_ef10_sriov_free_vf_vports()
138 efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); in efx_ef10_sriov_free_vf_vports()
139 eth_zero_addr(vf->mac); in efx_ef10_sriov_free_vf_vports()
142 if (vf->vport_id) { in efx_ef10_sriov_free_vf_vports()
[all …]
/drivers/net/ethernet/intel/i40e/
Di40e_virtchnl_pf.c47 struct i40e_vf *vf = pf->vf; in i40e_vc_vf_broadcast() local
50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { in i40e_vc_vf_broadcast()
51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; in i40e_vc_vf_broadcast()
53 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && in i40e_vc_vf_broadcast()
54 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) in i40e_vc_vf_broadcast()
71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) in i40e_vc_notify_vf_link_state() argument
74 struct i40e_pf *pf = vf->pf; in i40e_vc_notify_vf_link_state()
77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; in i40e_vc_notify_vf_link_state()
81 if (vf->link_forced) { in i40e_vc_notify_vf_link_state()
82 pfe.event_data.link_event.link_status = vf->link_up; in i40e_vc_notify_vf_link_state()
[all …]
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_sriov.c67 mv_list[i].vf = -1; in ixgbe_alloc_vf_macvlans()
150 int vf = 0; in ixgbe_get_vfs() local
165 if (vf >= adapter->num_vfs) in ixgbe_get_vfs()
168 adapter->vfinfo[vf].vfdev = vfdev; in ixgbe_get_vfs()
169 ++vf; in ixgbe_get_vfs()
229 unsigned int num_vfs = adapter->num_vfs, vf; in ixgbe_disable_sriov() local
239 for (vf = 0; vf < num_vfs; ++vf) { in ixgbe_disable_sriov()
240 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; in ixgbe_disable_sriov()
244 adapter->vfinfo[vf].vfdev = NULL; in ixgbe_disable_sriov()
403 u32 *msgbuf, u32 vf) in ixgbe_set_vf_multicasts() argument
[all …]
Dixgbe_sriov.h51 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
53 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
54 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
56 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
58 int vf, struct ifla_vf_info *ivi);
67 u16 vid, u16 qos, u32 vf) in ixgbe_set_vmvir() argument
72 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); in ixgbe_set_vmvir()
/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_sriov.c30 struct bnx2x_virtf **vf,
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_igu_ack_sb() argument
86 u32 func_encode = vf->abs_vfid; in bnx2x_vf_igu_ack_sb()
114 struct bnx2x_virtf *vf, in bnx2x_validate_vf_sp_objs() argument
117 if (!bnx2x_leading_vfq(vf, sp_initialized)) { in bnx2x_validate_vf_sp_objs()
128 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_tx() argument
135 vf->abs_vfid, in bnx2x_vfop_qctor_dump_tx()
144 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_rx() argument
153 vf->abs_vfid, in bnx2x_vfop_qctor_dump_rx()
169 struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_prep() argument
[all …]
Dbnx2x_sriov.h164 #define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) argument
165 #define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) argument
166 #define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) argument
167 #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) argument
168 #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) argument
169 #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) argument
177 #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) argument
178 #define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) argument
221 #define for_each_vfq(vf, var) \ argument
222 for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
[all …]
Dbnx2x_vfpf.c542 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_leading_vfq_init() argument
545 u8 cl_id = vfq_cl_id(vf, q); in bnx2x_leading_vfq_init()
546 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); in bnx2x_leading_vfq_init()
551 bnx2x_vf_sp(bp, vf, mac_rdata), in bnx2x_leading_vfq_init()
552 bnx2x_vf_sp_map(bp, vf, mac_rdata), in bnx2x_leading_vfq_init()
554 &vf->filter_state, in bnx2x_leading_vfq_init()
556 &vf->vf_macs_pool); in bnx2x_leading_vfq_init()
560 bnx2x_vf_sp(bp, vf, vlan_rdata), in bnx2x_leading_vfq_init()
561 bnx2x_vf_sp_map(bp, vf, vlan_rdata), in bnx2x_leading_vfq_init()
563 &vf->filter_state, in bnx2x_leading_vfq_init()
[all …]
/drivers/crypto/cavium/cpt/
Dcptpf_mbox.c11 static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf, in cpt_send_msg_to_vf() argument
15 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1), in cpt_send_msg_to_vf()
17 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg); in cpt_send_msg_to_vf()
23 static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf, in cpt_mbox_send_ack() argument
28 cpt_send_msg_to_vf(cpt, vf, mbx); in cpt_mbox_send_ack()
31 static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf) in cpt_clear_mbox_intr() argument
34 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf)); in cpt_clear_mbox_intr()
40 static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size) in cpt_cfg_qlen_for_vf() argument
44 pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf)); in cpt_cfg_qlen_for_vf()
47 cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u); in cpt_cfg_qlen_for_vf()
[all …]
/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_sriov_pf.c742 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_channel_cfg_cmd() local
743 struct qlcnic_vport *vp = vf->vp; in qlcnic_sriov_pf_channel_cfg_cmd()
746 u16 func = vf->pci_func; in qlcnic_sriov_pf_channel_cfg_cmd()
750 adapter = vf->adapter; in qlcnic_sriov_pf_channel_cfg_cmd()
762 size = sizeof(*vf->sriov_vlans); in qlcnic_sriov_pf_channel_cfg_cmd()
764 memset(vf->sriov_vlans, 0, size); in qlcnic_sriov_pf_channel_cfg_cmd()
776 set_bit(QLC_BC_VF_STATE, &vf->state); in qlcnic_sriov_pf_channel_cfg_cmd()
778 clear_bit(QLC_BC_VF_STATE, &vf->state); in qlcnic_sriov_pf_channel_cfg_cmd()
788 struct qlcnic_vf_info *vf, in qlcnic_sriov_cfg_vf_def_mac() argument
799 vp = vf->vp; in qlcnic_sriov_cfg_vf_def_mac()
[all …]
Dqlcnic_sriov_common.c147 struct qlcnic_vf_info *vf; in qlcnic_sriov_init() local
191 vf = &sriov->vf_info[i]; in qlcnic_sriov_init()
192 vf->adapter = adapter; in qlcnic_sriov_init()
193 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); in qlcnic_sriov_init()
194 mutex_init(&vf->send_cmd_lock); in qlcnic_sriov_init()
195 spin_lock_init(&vf->vlan_list_lock); in qlcnic_sriov_init()
196 INIT_LIST_HEAD(&vf->rcv_act.wait_list); in qlcnic_sriov_init()
197 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); in qlcnic_sriov_init()
198 spin_lock_init(&vf->rcv_act.lock); in qlcnic_sriov_init()
199 spin_lock_init(&vf->rcv_pend.lock); in qlcnic_sriov_init()
[all …]
/drivers/net/ethernet/cisco/enic/
Denic_pp.c38 int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err) in enic_is_valid_pp_vf() argument
40 if (vf != PORT_SELF_VF) { in enic_is_valid_pp_vf()
43 if (vf < 0 || vf >= enic->num_vfs) { in enic_is_valid_pp_vf()
57 if (vf == PORT_SELF_VF && !enic_is_dynamic(enic)) { in enic_is_valid_pp_vf()
69 static int enic_set_port_profile(struct enic *enic, int vf) in enic_set_port_profile() argument
81 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_port_profile()
99 } else if (vf == PORT_SELF_VF) { in enic_set_port_profile()
103 "for VF %d\n", vf); in enic_set_port_profile()
135 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_init_prov2, (u8 *)vp, in enic_set_port_profile()
145 static int enic_unset_port_profile(struct enic *enic, int vf) in enic_unset_port_profile() argument
[all …]
Denic_pp.h22 #define ENIC_PP_BY_INDEX(enic, vf, pp, err) \ argument
24 if (enic_is_valid_pp_vf(enic, vf, err)) \
25 pp = (vf == PORT_SELF_VF) ? enic->pp : enic->pp + vf; \
30 int enic_process_set_pp_request(struct enic *enic, int vf,
32 int enic_process_get_pp_request(struct enic *enic, int vf,
34 int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err);
/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_sriov.c26 struct bnxt_vf_info *vf, u16 event_id) in bnxt_hwrm_fwd_async_event_cmpl() argument
34 if (vf) in bnxt_hwrm_fwd_async_event_cmpl()
35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); in bnxt_hwrm_fwd_async_event_cmpl()
84 struct bnxt_vf_info *vf; in bnxt_set_vf_spoofchk() local
96 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_spoofchk()
97 if (vf->flags & BNXT_VF_SPOOFCHK) in bnxt_set_vf_spoofchk()
102 func_flags = vf->func_flags; in bnxt_set_vf_spoofchk()
111 req.fid = cpu_to_le16(vf->fw_fid); in bnxt_set_vf_spoofchk()
115 vf->func_flags = func_flags; in bnxt_set_vf_spoofchk()
117 vf->flags |= BNXT_VF_SPOOFCHK; in bnxt_set_vf_spoofchk()
[all …]
/drivers/net/ethernet/netronome/nfp/
Dnfp_net_sriov.c48 nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) in nfp_net_sriov_check() argument
61 if (vf < 0 || vf >= app->pf->num_vfs) { in nfp_net_sriov_check()
62 nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); in nfp_net_sriov_check()
70 nfp_net_sriov_update(struct nfp_app *app, int vf, u16 update, const char *msg) in nfp_net_sriov_update() argument
76 writeb(vf, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_NUM); in nfp_net_sriov_update()
92 int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) in nfp_app_set_vf_mac() argument
98 err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); in nfp_app_set_vf_mac()
105 mac, vf); in nfp_app_set_vf_mac()
110 vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; in nfp_app_set_vf_mac()
115 return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); in nfp_app_set_vf_mac()
[all …]
Dnfp_netvf_main.c103 struct nfp_net_vf *vf; in nfp_netvf_pci_probe() local
111 vf = kzalloc(sizeof(*vf), GFP_KERNEL); in nfp_netvf_pci_probe()
112 if (!vf) in nfp_netvf_pci_probe()
114 pci_set_drvdata(pdev, vf); in nfp_netvf_pci_probe()
210 vf->nn = nn; in nfp_netvf_pci_probe()
234 vf->q_bar = ioremap_nocache(map_addr, bar_sz); in nfp_netvf_pci_probe()
235 if (!vf->q_bar) { in nfp_netvf_pci_probe()
242 nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off); in nfp_netvf_pci_probe()
244 nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off); in nfp_netvf_pci_probe()
269 num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, in nfp_netvf_pci_probe()
[all …]
/drivers/net/
Ddummy.c172 static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac) in dummy_set_vf_mac() argument
176 if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) in dummy_set_vf_mac()
179 memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN); in dummy_set_vf_mac()
184 static int dummy_set_vf_vlan(struct net_device *dev, int vf, in dummy_set_vf_vlan() argument
189 if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7)) in dummy_set_vf_vlan()
192 priv->vfinfo[vf].pf_vlan = vlan; in dummy_set_vf_vlan()
193 priv->vfinfo[vf].pf_qos = qos; in dummy_set_vf_vlan()
194 priv->vfinfo[vf].vlan_proto = vlan_proto; in dummy_set_vf_vlan()
199 static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max) in dummy_set_vf_rate() argument
203 if (vf >= num_vfs) in dummy_set_vf_rate()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Dsriov.c46 static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) in sriov_restore_guids() argument
53 if (sriov->vfs_ctx[vf].node_guid || in sriov_restore_guids()
54 sriov->vfs_ctx[vf].port_guid || in sriov_restore_guids()
55 sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) { in sriov_restore_guids()
60 in->node_guid = sriov->vfs_ctx[vf].node_guid; in sriov_restore_guids()
61 in->port_guid = sriov->vfs_ctx[vf].port_guid; in sriov_restore_guids()
62 in->policy = sriov->vfs_ctx[vf].policy; in sriov_restore_guids()
68 err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in); in sriov_restore_guids()
70 mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf); in sriov_restore_guids()
82 int vf; in mlx5_device_enable_sriov() local
[all …]
/drivers/net/ethernet/qlogic/qed/
Dqed_sriov.c184 struct qed_vf_info *vf = NULL; in qed_iov_get_vf_info() local
193 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; in qed_iov_get_vf_info()
198 return vf; in qed_iov_get_vf_info()
451 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; in qed_iov_setup_vfdb() local
454 vf->vf_mbx.req_virt = p_req_virt_addr + idx; in qed_iov_setup_vfdb()
455 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); in qed_iov_setup_vfdb()
456 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; in qed_iov_setup_vfdb()
457 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); in qed_iov_setup_vfdb()
459 vf->state = VF_STOPPED; in qed_iov_setup_vfdb()
460 vf->b_init = false; in qed_iov_setup_vfdb()
[all …]
/drivers/net/ethernet/cavium/thunder/
Dnic_main.c126 static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) in nic_clear_mbx_intr() argument
128 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf)); in nic_clear_mbx_intr()
131 static u64 nic_get_mbx_addr(int vf) in nic_get_mbx_addr() argument
133 return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); in nic_get_mbx_addr()
140 static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) in nic_send_msg_to_vf() argument
142 void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); in nic_send_msg_to_vf()
165 static void nic_mbx_send_ready(struct nicpf *nic, int vf) in nic_mbx_send_ready() argument
172 mbx.nic_cfg.vf_id = vf; in nic_mbx_send_ready()
176 if (vf < nic->num_vf_en) { in nic_mbx_send_ready()
177 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); in nic_mbx_send_ready()
[all …]
/drivers/infiniband/hw/mlx5/
Dib_virt.c51 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port, in mlx5_ib_get_vf_config() argument
63 err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep); in mlx5_ib_get_vf_config()
66 vf, err); in mlx5_ib_get_vf_config()
93 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, in mlx5_ib_set_vf_link_state() argument
112 err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); in mlx5_ib_set_vf_link_state()
114 vfs_ctx[vf].policy = in->policy; in mlx5_ib_set_vf_link_state()
121 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, in mlx5_ib_get_vf_stats() argument
137 err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz); in mlx5_ib_get_vf_stats()
152 static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid) in set_vf_node_guid() argument
166 err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); in set_vf_node_guid()
[all …]
/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.c71 us_ibdev = qp_grp->vf->pf; in usnic_ib_fill_create_qp_resp()
72 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); in usnic_ib_fill_create_qp_resp()
79 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0); in usnic_ib_fill_create_qp_resp()
86 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); in usnic_ib_fill_create_qp_resp()
151 struct usnic_ib_vf *vf; in find_free_vf_and_create_qp_grp() local
171 vf = pci_get_drvdata(to_pci_dev(dev)); in find_free_vf_and_create_qp_grp()
172 spin_lock(&vf->lock); in find_free_vf_and_create_qp_grp()
173 vnic = vf->vnic; in find_free_vf_and_create_qp_grp()
180 vf, pd, in find_free_vf_and_create_qp_grp()
184 spin_unlock(&vf->lock); in find_free_vf_and_create_qp_grp()
[all …]
Dusnic_ib_main.c78 struct usnic_ib_vf *vf = obj; in usnic_ib_dump_vf_hdr() local
79 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name); in usnic_ib_dump_vf_hdr()
83 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) in usnic_ib_dump_vf() argument
85 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, in usnic_ib_dump_vf()
90 void usnic_ib_log_vf(struct usnic_ib_vf *vf) in usnic_ib_log_vf() argument
93 usnic_ib_dump_vf(vf, buf, sizeof(buf)); in usnic_ib_log_vf()
553 struct usnic_ib_vf *vf; in usnic_ib_pci_probe() local
556 vf = kzalloc(sizeof(*vf), GFP_KERNEL); in usnic_ib_pci_probe()
557 if (!vf) in usnic_ib_pci_probe()
575 pci_set_drvdata(pdev, vf); in usnic_ib_pci_probe()
[all …]
Dusnic_ib_qp_grp.c89 usnic_vnic_get_index(qp_grp->vf->vnic), in usnic_ib_qp_grp_dump_rows()
117 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); in enable_qp_grp()
158 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); in disable_qp_grp()
196 uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); in init_filter_action()
402 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); in usnic_ib_qp_grp_modify()
488 ib_event.device = &qp_grp->vf->pf->ib_dev; in usnic_ib_qp_grp_modify()
588 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf, in qp_grp_and_vf_bind() argument
595 lockdep_assert_held(&vf->lock); in qp_grp_and_vf_bind()
597 pdev = usnic_vnic_get_pdev(vf->vnic); in qp_grp_and_vf_bind()
598 if (vf->qp_grp_ref_cnt == 0) { in qp_grp_and_vf_bind()
[all …]
/drivers/crypto/qat/qat_common/
Dadf_vf_isr.c86 accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); in adf_enable_msi()
87 if (!accel_dev->vf.irq_name) in adf_enable_msi()
97 kfree(accel_dev->vf.irq_name); in adf_disable_msi()
158 accel_dev->vf.pf_version = in adf_pf2vf_bh_handler()
161 accel_dev->vf.compatible = in adf_pf2vf_bh_handler()
164 complete(&accel_dev->vf.iov_msg_completion); in adf_pf2vf_bh_handler()
185 tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, in adf_setup_pf2vf_bh()
188 mutex_init(&accel_dev->vf.vf2pf_lock); in adf_setup_pf2vf_bh()
194 tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); in adf_cleanup_pf2vf_bh()
195 tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); in adf_cleanup_pf2vf_bh()
[all …]

123456