Home
last modified time | relevance | path

Searched refs:pf (Results 1 – 25 of 189) sorted by relevance

12345678

/drivers/net/ethernet/netronome/nfp/
Dnfp_net_main.c75 void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) in nfp_net_get_mac_addr() argument
102 nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, in nfp_net_pf_rtsym_read_optional() argument
109 snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); in nfp_net_pf_rtsym_read_optional()
111 val = nfp_rtsym_read_le(pf->rtbl, name, &err); in nfp_net_pf_rtsym_read_optional()
115 nfp_err(pf->cpp, "Unable to read symbol %s\n", name); in nfp_net_pf_rtsym_read_optional()
122 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) in nfp_net_pf_get_num_ports() argument
124 return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); in nfp_net_pf_get_num_ports()
127 static int nfp_net_pf_get_app_id(struct nfp_pf *pf) in nfp_net_pf_get_app_id() argument
129 return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", in nfp_net_pf_get_app_id()
134 nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, in nfp_net_pf_map_rtsym() argument
[all …]
Dnfp_main.c77 static bool nfp_board_ready(struct nfp_pf *pf) in nfp_board_ready() argument
83 cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); in nfp_board_ready()
94 static int nfp_pf_board_state_wait(struct nfp_pf *pf) in nfp_pf_board_state_wait() argument
98 while (!nfp_board_ready(pf)) { in nfp_pf_board_state_wait()
100 nfp_err(pf->cpp, "NFP board initialization timeout\n"); in nfp_pf_board_state_wait()
104 nfp_info(pf->cpp, "waiting for board initialization\n"); in nfp_pf_board_state_wait()
109 kfree(pf->hwinfo); in nfp_pf_board_state_wait()
110 pf->hwinfo = nfp_hwinfo_read(pf->cpp); in nfp_pf_board_state_wait()
116 static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) in nfp_pcie_sriov_read_nfd_limit() argument
120 pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); in nfp_pcie_sriov_read_nfd_limit()
[all …]
Dnfp_devlink.c58 nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index, in nfp_devlink_fill_eth_port_from_id() argument
63 port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index); in nfp_devlink_fill_eth_port_from_id()
69 nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes) in nfp_devlink_set_lanes() argument
74 nsp = nfp_eth_config_start(pf->cpp, idx); in nfp_devlink_set_lanes()
90 return nfp_net_refresh_port_table_sync(pf); in nfp_devlink_set_lanes()
97 struct nfp_pf *pf = devlink_priv(devlink); in nfp_devlink_port_split() local
105 mutex_lock(&pf->lock); in nfp_devlink_port_split()
108 ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port); in nfp_devlink_port_split()
123 ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); in nfp_devlink_port_split()
125 mutex_unlock(&pf->lock); in nfp_devlink_port_split()
[all …]
Dnfp_net_sriov.c52 if (!app || !app->pf->vfcfg_tbl2) in nfp_net_sriov_check()
55 cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); in nfp_net_sriov_check()
57 nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); in nfp_net_sriov_check()
61 if (vf < 0 || vf >= app->pf->num_vfs) { in nfp_net_sriov_check()
62 nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); in nfp_net_sriov_check()
76 writeb(vf, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_NUM); in nfp_net_sriov_update()
77 writew(update, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_UPD); in nfp_net_sriov_update()
79 nn = list_first_entry(&app->pf->vnics, struct nfp_net, vnic_list); in nfp_net_sriov_update()
85 ret = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_RET); in nfp_net_sriov_update()
87 nfp_warn(app->pf->cpp, in nfp_net_sriov_update()
[all …]
Dnfp_hwmon.c69 struct nfp_pf *pf = dev_get_drvdata(dev); in nfp_hwmon_read() local
84 if (!(pf->nspi->sensor_mask & BIT(id))) in nfp_hwmon_read()
88 return nfp_hwmon_read_sensor(pf->cpp, id, val); in nfp_hwmon_read()
90 return nfp_hwmon_read_sensor(pf->cpp, id, val); in nfp_hwmon_read()
165 int nfp_hwmon_register(struct nfp_pf *pf) in nfp_hwmon_register() argument
170 if (!pf->nspi) { in nfp_hwmon_register()
171 nfp_warn(pf->cpp, "not registering HWMON (no NSP info)\n"); in nfp_hwmon_register()
174 if (!pf->nspi->sensor_mask) { in nfp_hwmon_register()
175 nfp_info(pf->cpp, in nfp_hwmon_register()
180 pf->hwmon_dev = hwmon_device_register_with_info(&pf->pdev->dev, "nfp", in nfp_hwmon_register()
[all …]
Dnfp_port.c104 nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id) in nfp_port_from_id() argument
108 lockdep_assert_held(&pf->lock); in nfp_port_from_id()
113 list_for_each_entry(port, &pf->ports, port_list) in nfp_port_from_id()
208 int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, in nfp_port_init_phy_port() argument
212 if (!pf->eth_tbl || id >= pf->eth_tbl->count) { in nfp_port_init_phy_port()
218 if (pf->eth_tbl->ports[id].override_changed) { in nfp_port_init_phy_port()
221 pf->eth_tbl->ports[id].index); in nfp_port_init_phy_port()
226 port->eth_port = &pf->eth_tbl->ports[id]; in nfp_port_init_phy_port()
227 port->eth_id = pf->eth_tbl->ports[id].index; in nfp_port_init_phy_port()
228 if (pf->mac_stats_mem) in nfp_port_init_phy_port()
[all …]
Dnfp_app.c73 if (!app || !app->pf->mip) in nfp_app_mip_name()
75 return nfp_mip_name(app->pf->mip); in nfp_app_mip_name()
103 lockdep_is_held(&app->pf->lock)); in nfp_app_reprs_set()
115 struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) in nfp_app_alloc() argument
124 nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id); in nfp_app_alloc()
135 app->pf = pf; in nfp_app_alloc()
136 app->cpp = pf->cpp; in nfp_app_alloc()
137 app->pdev = pf->pdev; in nfp_app_alloc()
Dnfp_app_nic.c42 nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, in nfp_app_nic_vnic_init_phy_port() argument
47 if (!pf->eth_tbl) in nfp_app_nic_vnic_init_phy_port()
54 err = nfp_port_init_phy_port(pf, app, nn->port, id); in nfp_app_nic_vnic_init_phy_port()
68 err = nfp_app_nic_vnic_init_phy_port(app->pf, app, nn, id); in nfp_app_nic_vnic_alloc()
72 nfp_net_get_mac_addr(app->pf, nn->port); in nfp_app_nic_vnic_alloc()
/drivers/net/ethernet/intel/i40e/
Di40e_ptp.c59 static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) in i40e_ptp_read() argument
61 struct i40e_hw *hw = &pf->hw; in i40e_ptp_read()
83 static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) in i40e_ptp_write() argument
85 struct i40e_hw *hw = &pf->hw; in i40e_ptp_write()
122 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); in i40e_ptp_adjfreq() local
123 struct i40e_hw *hw = &pf->hw; in i40e_ptp_adjfreq()
133 adj = ACCESS_ONCE(pf->ptp_base_adj); in i40e_ptp_adjfreq()
160 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); in i40e_ptp_adjtime() local
163 mutex_lock(&pf->tmreg_lock); in i40e_ptp_adjtime()
165 i40e_ptp_read(pf, &now); in i40e_ptp_adjtime()
[all …]
Di40e_debugfs.c41 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) in i40e_dbg_find_vsi() argument
46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); in i40e_dbg_find_vsi()
48 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_dbg_find_vsi()
49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) in i40e_dbg_find_vsi()
50 return pf->vsi[i]; in i40e_dbg_find_vsi()
60 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) in i40e_dbg_find_veb() argument
65 if (pf->veb[i] && pf->veb[i]->seid == seid) in i40e_dbg_find_veb()
66 return pf->veb[i]; in i40e_dbg_find_veb()
89 struct i40e_pf *pf = filp->private_data; in i40e_dbg_command_read() local
106 pf->vsi[pf->lan_vsi]->netdev->name, in i40e_dbg_command_read()
[all …]
Di40e_main.c60 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
63 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64 static int i40e_setup_misc_vector(struct i40e_pf *pf);
65 static void i40e_determine_queue_usage(struct i40e_pf *pf);
66 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68 static int i40e_reset(struct i40e_pf *pf);
69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_allocate_dma_mem_d() local
[all …]
Di40e_client.c109 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_vf_msg() local
110 struct i40e_client_instance *cdev = pf->cinst; in i40e_notify_client_of_vf_msg()
115 dev_dbg(&pf->pdev->dev, in i40e_notify_client_of_vf_msg()
120 dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n"); in i40e_notify_client_of_vf_msg()
135 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_l2_param_changes() local
136 struct i40e_client_instance *cdev = pf->cinst; in i40e_notify_client_of_l2_param_changes()
171 struct i40e_pf *pf = ldev->pf; in i40e_client_release_qvlist() local
179 wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); in i40e_client_release_qvlist()
194 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_netdev_close() local
195 struct i40e_client_instance *cdev = pf->cinst; in i40e_notify_client_of_netdev_close()
[all …]
Di40e_virtchnl_pf.c41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, in i40e_vc_vf_broadcast() argument
46 struct i40e_hw *hw = &pf->hw; in i40e_vc_vf_broadcast()
47 struct i40e_vf *vf = pf->vf; in i40e_vc_vf_broadcast()
50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { in i40e_vc_vf_broadcast()
74 struct i40e_pf *pf = vf->pf; in i40e_vc_notify_vf_link_state() local
75 struct i40e_hw *hw = &pf->hw; in i40e_vc_notify_vf_link_state()
76 struct i40e_link_status *ls = &pf->hw.phy.link_info; in i40e_vc_notify_vf_link_state()
101 void i40e_vc_notify_link_state(struct i40e_pf *pf) in i40e_vc_notify_link_state() argument
105 for (i = 0; i < pf->num_alloc_vfs; i++) in i40e_vc_notify_link_state()
106 i40e_vc_notify_vf_link_state(&pf->vf[i]); in i40e_vc_notify_link_state()
[all …]
Di40e_ethtool.c246 static void i40e_partition_setting_complaint(struct i40e_pf *pf) in i40e_partition_setting_complaint() argument
248 dev_info(&pf->pdev->dev, in i40e_partition_setting_complaint()
259 static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, in i40e_phy_type_to_ethtool() argument
262 struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; in i40e_phy_type_to_ethtool()
263 u64 phy_types = pf->hw.phy.phy_types; in i40e_phy_type_to_ethtool()
274 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { in i40e_phy_type_to_ethtool()
343 if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) in i40e_phy_type_to_ethtool()
348 if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) in i40e_phy_type_to_ethtool()
359 if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) in i40e_phy_type_to_ethtool()
364 if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) in i40e_phy_type_to_ethtool()
[all …]
Di40e_dcb_nl.c57 struct i40e_pf *pf = i40e_netdev_to_pf(dev); in i40e_dcbnl_ieee_getets() local
59 struct i40e_hw *hw = &pf->hw; in i40e_dcbnl_ieee_getets()
61 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) in i40e_dcbnl_ieee_getets()
96 struct i40e_pf *pf = i40e_netdev_to_pf(dev); in i40e_dcbnl_ieee_getpfc() local
98 struct i40e_hw *hw = &pf->hw; in i40e_dcbnl_ieee_getpfc()
101 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) in i40e_dcbnl_ieee_getpfc()
112 pfc->requests[i] = pf->stats.priority_xoff_tx[i]; in i40e_dcbnl_ieee_getpfc()
113 pfc->indications[i] = pf->stats.priority_xoff_rx[i]; in i40e_dcbnl_ieee_getpfc()
127 struct i40e_pf *pf = i40e_netdev_to_pf(dev); in i40e_dcbnl_getdcbx() local
129 return pf->dcbx_cap; in i40e_dcbnl_getdcbx()
[all …]
Di40e_fcoe.c153 static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf, in i40e_fcoe_ddp_unmap() argument
160 dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc, in i40e_fcoe_ddp_unmap()
247 struct i40e_pf *pf = np->vsi->back; in i40e_fcoe_ddp_put() local
248 struct i40e_fcoe *fcoe = &pf->fcoe; in i40e_fcoe_ddp_put()
257 i40e_fcoe_ddp_unmap(pf, ddp); in i40e_fcoe_ddp_put()
266 void i40e_init_pf_fcoe(struct i40e_pf *pf) in i40e_init_pf_fcoe() argument
268 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_fcoe()
271 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; in i40e_init_pf_fcoe()
272 pf->num_fcoe_qps = 0; in i40e_init_pf_fcoe()
273 pf->fcoe_hmc_cntx_num = 0; in i40e_init_pf_fcoe()
[all …]
Di40e.h77 #define i40e_default_queues_per_vmdq(pf) \ argument
78 (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
81 #define i40e_pf_get_max_q_per_tc(pf) \ argument
82 (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
582 struct i40e_pf *pf; member
734 struct i40e_pf *pf; member
803 static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) in i40e_get_fd_cnt_all() argument
805 return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; in i40e_get_fd_cnt_all()
816 static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr) in i40e_read_fd_input_set() argument
820 val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1)); in i40e_read_fd_input_set()
[all …]
Di40e_txrx.c56 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir() local
81 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << in i40e_fdir()
120 u8 *raw_packet, struct i40e_pf *pf, in i40e_program_fdir_filter() argument
133 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_program_fdir_filter()
208 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_udpv4() local
240 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); in i40e_add_del_fdir_udpv4()
242 dev_info(&pf->pdev->dev, in i40e_add_del_fdir_udpv4()
248 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { in i40e_add_del_fdir_udpv4()
250 dev_info(&pf->pdev->dev, in i40e_add_del_fdir_udpv4()
254 dev_info(&pf->pdev->dev, in i40e_add_del_fdir_udpv4()
[all …]
Di40e_virtchnl_pf.h75 struct i40e_pf *pf; member
118 void i40e_free_vfs(struct i40e_pf *pf);
120 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
121 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
123 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
125 void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
140 void i40e_vc_notify_link_state(struct i40e_pf *pf);
141 void i40e_vc_notify_reset(struct i40e_pf *pf);
/drivers/block/paride/
Dpf.c245 static int pf_identify(struct pf_unit *pf);
246 static void pf_lock(struct pf_unit *pf, int func);
247 static void pf_eject(struct pf_unit *pf);
282 struct pf_unit *pf; in pf_init_units() local
286 for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) { in pf_init_units()
297 pf->disk = disk; in pf_init_units()
298 pf->pi = &pf->pia; in pf_init_units()
299 pf->media_status = PF_NM; in pf_init_units()
300 pf->drive = (*drives[unit])[D_SLV]; in pf_init_units()
301 pf->lun = (*drives[unit])[D_LUN]; in pf_init_units()
[all …]
/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_sriov.c69 if (!bp->pf.active_vfs) { in bnxt_vf_ndo_prep()
73 if (vf_id >= bp->pf.active_vfs) { in bnxt_vf_ndo_prep()
96 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_spoofchk()
136 vf = &bp->pf.vf[vf_id]; in bnxt_get_vf_config()
174 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_mac()
210 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_vlan()
239 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_bw()
279 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_link_state()
309 vf = &bp->pf.vf[i]; in bnxt_set_vf_attr()
318 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_vf_resource_free() local
[all …]
/drivers/iio/
Dindustrialio-trigger.c246 struct iio_poll_func *pf) in iio_trigger_attach_poll_func() argument
253 __module_get(pf->indio_dev->info->driver_module); in iio_trigger_attach_poll_func()
256 pf->irq = iio_trigger_get_irq(trig); in iio_trigger_attach_poll_func()
257 if (pf->irq < 0) in iio_trigger_attach_poll_func()
261 ret = request_threaded_irq(pf->irq, pf->h, pf->thread, in iio_trigger_attach_poll_func()
262 pf->type, pf->name, in iio_trigger_attach_poll_func()
263 pf); in iio_trigger_attach_poll_func()
279 if (pf->indio_dev->dev.parent == trig->dev.parent) in iio_trigger_attach_poll_func()
285 free_irq(pf->irq, pf); in iio_trigger_attach_poll_func()
287 iio_trigger_put_irq(trig, pf->irq); in iio_trigger_attach_poll_func()
[all …]
/drivers/net/ethernet/netronome/nfp/nic/
Dmain.c41 struct nfp_pf *pf = app->pf; in nfp_nic_init() local
43 if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) { in nfp_nic_init()
44 nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", in nfp_nic_init()
45 pf->max_data_vnics, pf->eth_tbl->count); in nfp_nic_init()
/drivers/net/ethernet/mellanox/mlx5/core/
Dlag.c60 struct lag_func pf[MLX5_MAX_PORTS]; member
151 if (ldev->pf[i].netdev == ndev) in mlx5_lag_dev_get_netdev_idx()
181 struct mlx5_core_dev *dev0 = ldev->pf[0].dev; in mlx5_activate_lag()
198 struct mlx5_core_dev *dev0 = ldev->pf[0].dev; in mlx5_deactivate_lag()
212 struct mlx5_core_dev *dev0 = ldev->pf[0].dev; in mlx5_do_bond()
213 struct mlx5_core_dev *dev1 = ldev->pf[1].dev; in mlx5_do_bond()
230 mlx5_remove_dev_by_protocol(ldev->pf[i].dev, in mlx5_do_bond()
259 if (ldev->pf[i].dev) in mlx5_do_bond()
260 mlx5_add_dev_by_protocol(ldev->pf[i].dev, in mlx5_do_bond()
413 if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) || in mlx5_lag_check_prereq()
[all …]
/drivers/net/ethernet/netronome/nfp/flower/
Dmain.c145 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); in nfp_flower_spawn_vnic_reprs()
180 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; in nfp_flower_spawn_vnic_reprs()
226 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; in nfp_flower_spawn_phy_reprs()
259 err = nfp_port_init_phy_port(app->pf, app, port, i); in nfp_flower_spawn_phy_reprs()
266 nfp_net_get_mac_addr(app->pf, port); in nfp_flower_spawn_phy_reprs()
330 if (app->pf->num_vfs) in nfp_flower_vnic_clean()
355 if (app->pf->num_vfs) { in nfp_flower_vnic_init()
359 app->pf->num_vfs); in nfp_flower_vnic_init()
377 const struct nfp_pf *pf = app->pf; in nfp_flower_init() local
382 if (!pf->eth_tbl) { in nfp_flower_init()
[all …]

12345678