Home
last modified time | relevance | path

Searched refs:prof (Results 1 – 25 of 29) sorted by relevance

12

/drivers/net/ethernet/intel/ice/
Dice_flow.c591 struct ice_flow_prof *prof; member
669 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? in ice_flow_calc_seg_sz()
673 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) in ice_flow_calc_seg_sz()
675 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) in ice_flow_calc_seg_sz()
677 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) in ice_flow_calc_seg_sz()
679 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) in ice_flow_calc_seg_sz()
684 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) in ice_flow_calc_seg_sz()
686 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) in ice_flow_calc_seg_sz()
688 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) in ice_flow_calc_seg_sz()
690 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) in ice_flow_calc_seg_sz()
[all …]
Dice_ethtool_fdir.c283 struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow); in ice_fdir_erase_flow_from_hw() local
286 if (!prof) in ice_fdir_erase_flow_from_hw()
294 for (j = 0; j < prof->cnt; j++) { in ice_fdir_erase_flow_from_hw()
297 if (!prof->entry_h[j][tun] || !prof->vsi_h[j]) in ice_fdir_erase_flow_from_hw()
299 vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]); in ice_fdir_erase_flow_from_hw()
301 ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]); in ice_fdir_erase_flow_from_hw()
302 prof->entry_h[j][tun] = 0; in ice_fdir_erase_flow_from_hw()
319 struct ice_fd_hw_prof *prof; in ice_fdir_rem_flow() local
322 prof = ice_fdir_get_hw_prof(hw, blk, flow); in ice_fdir_rem_flow()
323 if (!prof) in ice_fdir_rem_flow()
[all …]
Dice_flex_pipe.c2379 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, in ice_prof_has_mask_idx() argument
2394 if (hw->blk[blk].es.mask_ena[prof] & BIT(i)) in ice_prof_has_mask_idx()
2422 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) in ice_prof_has_mask() argument
2428 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i])) in ice_prof_has_mask()
3161 sizeof(*hw->blk[block_id].prof.t); in ice_fill_tbl()
3162 dst = (u8 *)hw->blk[block_id].prof.t; in ice_fill_tbl()
3163 dst_len = hw->blk[block_id].prof.count * in ice_fill_tbl()
3164 sizeof(*hw->blk[block_id].prof.t); in ice_fill_tbl()
3235 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); in ice_fill_blk_tbls()
3337 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); in ice_free_hw_tbls()
[all …]
Dice_flow.h350 struct ice_flow_prof *prof; member
389 struct ice_flow_prof **prof);
Dice_flex_type.h570 struct ice_prof_tcam prof; member
/drivers/s390/block/
Ddasd_proc.c200 struct dasd_profile_info *prof; in dasd_stats_proc_show() local
204 prof = dasd_global_profile.data; in dasd_stats_proc_show()
205 if (!prof) { in dasd_stats_proc_show()
214 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; in dasd_stats_proc_show()
217 seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs); in dasd_stats_proc_show()
219 prof->dasd_io_sects); in dasd_stats_proc_show()
231 dasd_statistics_array(m, prof->dasd_io_secs, factor); in dasd_stats_proc_show()
233 dasd_statistics_array(m, prof->dasd_io_times, factor); in dasd_stats_proc_show()
235 dasd_statistics_array(m, prof->dasd_io_timps, factor); in dasd_stats_proc_show()
237 dasd_statistics_array(m, prof->dasd_io_time1, factor); in dasd_stats_proc_show()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Den_main.c166 params->prof[i].rx_pause = !(pfcrx || pfctx); in mlx4_en_get_profile()
167 params->prof[i].rx_ppp = pfcrx; in mlx4_en_get_profile()
168 params->prof[i].tx_pause = !(pfcrx || pfctx); in mlx4_en_get_profile()
169 params->prof[i].tx_ppp = pfctx; in mlx4_en_get_profile()
171 params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE; in mlx4_en_get_profile()
172 params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE; in mlx4_en_get_profile()
174 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; in mlx4_en_get_profile()
175 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; in mlx4_en_get_profile()
177 params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; in mlx4_en_get_profile()
178 params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up; in mlx4_en_get_profile()
[all …]
Den_dcb_nl.c159 struct mlx4_en_port_profile *prof = priv->prof; in mlx4_en_dcbnl_set_all() local
168 rx_ppp = prof->rx_ppp; in mlx4_en_dcbnl_set_all()
169 tx_ppp = prof->tx_ppp; in mlx4_en_dcbnl_set_all()
195 rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause; in mlx4_en_dcbnl_set_all()
196 tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause; in mlx4_en_dcbnl_set_all()
200 rx_pause = prof->rx_pause; in mlx4_en_dcbnl_set_all()
201 tx_pause = prof->tx_pause; in mlx4_en_dcbnl_set_all()
211 prof->tx_ppp = tx_ppp; in mlx4_en_dcbnl_set_all()
212 prof->rx_ppp = rx_ppp; in mlx4_en_dcbnl_set_all()
213 prof->tx_pause = tx_pause; in mlx4_en_dcbnl_set_all()
[all …]
Den_netdev.c104 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); in mlx4_en_alloc_tx_queue_per_tc()
1773 priv->prof->tx_pause, in mlx4_en_start_port()
1774 priv->prof->tx_ppp, in mlx4_en_start_port()
1775 priv->prof->rx_pause, in mlx4_en_start_port()
1776 priv->prof->rx_ppp); in mlx4_en_start_port()
2144 priv->prof->rx_ring_size, priv->stride); in mlx4_en_free_resources()
2153 struct mlx4_en_port_profile *prof = priv->prof; in mlx4_en_alloc_resources() local
2162 prof->tx_ring_size, i, t, node)) in mlx4_en_alloc_resources()
2166 prof->tx_ring_size, in mlx4_en_alloc_resources()
2176 prof->rx_ring_size, i, RX, node)) in mlx4_en_alloc_resources()
[all …]
Den_ethtool.c792 if (priv->prof->tx_pause) in ethtool_get_ptys_link_ksettings()
795 if (priv->prof->tx_pause ^ priv->prof->rx_pause) in ethtool_get_ptys_link_ksettings()
1091 rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp; in mlx4_en_set_pauseparam()
1092 tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp; in mlx4_en_set_pauseparam()
1105 priv->prof->tx_pause = tx_pause; in mlx4_en_set_pauseparam()
1106 priv->prof->rx_pause = rx_pause; in mlx4_en_set_pauseparam()
1107 priv->prof->tx_ppp = tx_ppp; in mlx4_en_set_pauseparam()
1108 priv->prof->rx_ppp = rx_ppp; in mlx4_en_set_pauseparam()
1136 pause->tx_pause = priv->prof->tx_pause; in mlx4_en_get_pauseparam()
1137 pause->rx_pause = priv->prof->rx_pause; in mlx4_en_get_pauseparam()
[all …]
Dmlx4_en.h396 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; member
525 struct mlx4_en_port_profile *prof; member
660 struct mlx4_en_port_profile *prof);
672 struct mlx4_en_port_profile *prof,
Den_resources.c67 if (user_prio >= 0 && priv->prof->num_up == MLX4_EN_NUM_UP_HIGH) { in mlx4_en_fill_qp_context()
Den_rx.c190 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { in mlx4_en_fill_rx_buffers()
258 mdev->profile.prof[i].rx_ring_num = in mlx4_en_set_num_rx_rings()
1187 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) in mlx4_en_config_rss_steer()
1190 rss_rings = priv->prof->rss_rings; in mlx4_en_config_rss_steer()
/drivers/net/ethernet/marvell/octeontx2/nic/
Dcn10k.c377 aq->prof.icolor = 0x03; in cn10k_set_ipolicer_rate()
381 aq->prof.cir_exponent = rate_exp; in cn10k_set_ipolicer_rate()
384 aq->prof.cir_mantissa = rate_mantissa; in cn10k_set_ipolicer_rate()
387 aq->prof.cbs_exponent = burst_exp; in cn10k_set_ipolicer_rate()
390 aq->prof.cbs_mantissa = burst_mantissa; in cn10k_set_ipolicer_rate()
393 aq->prof.rdiv = rdiv; in cn10k_set_ipolicer_rate()
410 aq->prof.adjust_exponent = 1; in cn10k_set_ipolicer_rate()
413 aq->prof.adjust_mantissa = 384; in cn10k_set_ipolicer_rate()
416 aq->prof.lmode = 0x1; in cn10k_set_ipolicer_rate()
423 aq->prof.meter_algo = 2; in cn10k_set_ipolicer_rate()
[all …]
/drivers/s390/net/
Dctcm_sysfs.c96 priv->channel[WRITE]->prof.maxmulti); in ctcm_print_statistics()
98 priv->channel[WRITE]->prof.maxcqueue); in ctcm_print_statistics()
100 priv->channel[WRITE]->prof.doios_single); in ctcm_print_statistics()
102 priv->channel[WRITE]->prof.doios_multi); in ctcm_print_statistics()
104 priv->channel[WRITE]->prof.txlen); in ctcm_print_statistics()
106 jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time)); in ctcm_print_statistics()
133 memset(&priv->channel[WRITE]->prof, 0, in stats_write()
134 sizeof(priv->channel[CTCM_WRITE]->prof)); in stats_write()
Dnetiucv.c152 struct connection_profile prof; member
693 conn->prof.tx_pending--; in conn_action_txdone()
722 if (conn->collect_len > conn->prof.maxmulti) in conn_action_txdone()
723 conn->prof.maxmulti = conn->collect_len; in conn_action_txdone()
733 conn->prof.send_stamp = jiffies; in conn_action_txdone()
738 conn->prof.doios_multi++; in conn_action_txdone()
739 conn->prof.txlen += conn->tx_buff->len; in conn_action_txdone()
740 conn->prof.tx_pending++; in conn_action_txdone()
741 if (conn->prof.tx_pending > conn->prof.tx_max_pending) in conn_action_txdone()
742 conn->prof.tx_max_pending = conn->prof.tx_pending; in conn_action_txdone()
[all …]
Dctcm_fsms.c259 duration = done_stamp - ch->prof.send_stamp; in chx_txdone()
260 if (duration > ch->prof.tx_time) in chx_txdone()
261 ch->prof.tx_time = duration; in chx_txdone()
290 if (ch->prof.maxmulti < (ch->collect_len + 2)) in chx_txdone()
291 ch->prof.maxmulti = ch->collect_len + 2; in chx_txdone()
292 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) in chx_txdone()
293 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); in chx_txdone()
309 ch->prof.send_stamp = jiffies; in chx_txdone()
311 ch->prof.doios_multi++; in chx_txdone()
1230 duration = done_stamp - ch->prof.send_stamp; in ctcmpc_chx_txdone()
[all …]
Dctcm_main.c505 ch->prof.txlen += skb->len; in ctcm_transmit_skb()
571 ch->prof.send_stamp = jiffies; in ctcm_transmit_skb()
575 ch->prof.doios_single++; in ctcm_transmit_skb()
737 ch->prof.txlen += skb->len - PDU_HEADER_LENGTH; in ctcmpc_transmit_skb()
794 ch->prof.send_stamp = jiffies; in ctcmpc_transmit_skb()
798 ch->prof.doios_single++; in ctcmpc_transmit_skb()
Dctcm_main.h196 struct ctcm_profile prof; member
/drivers/net/ethernet/marvell/octeontx2/af/
Drvu_debugfs.c1710 struct nix_bandprof_s *prof) in print_band_prof_ctx() argument
1714 switch (prof->pc_mode) { in print_band_prof_ctx()
1729 str = (prof->icolor == 3) ? "Color blind" : in print_band_prof_ctx()
1730 (prof->icolor == 0) ? "Green" : in print_band_prof_ctx()
1731 (prof->icolor == 1) ? "Yellow" : "Red"; in print_band_prof_ctx()
1733 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena); in print_band_prof_ctx()
1734 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent); in print_band_prof_ctx()
1735 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent); in print_band_prof_ctx()
1736 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent); in print_band_prof_ctx()
1737 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent); in print_band_prof_ctx()
[all …]
Drvu_nix.c1000 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); in rvu_nix_blk_aq_enq_inst()
1079 memcpy(&rsp->prof, ctx, in rvu_nix_blk_aq_enq_inst()
4830 if (!req->prof.hl_en) in nix_verify_bandprof()
4844 prof_idx = req->prof.band_prof_id; in nix_verify_bandprof()
4856 int blkaddr, layer, prof, idx, err; in rvu_mbox_handler_nix_bandprof_alloc() local
4881 prof = rvu_alloc_rsrc(&ipolicer->band_prof); in rvu_mbox_handler_nix_bandprof_alloc()
4882 if (prof < 0) in rvu_mbox_handler_nix_bandprof_alloc()
4885 rsp->prof_idx[layer][idx] = prof; in rvu_mbox_handler_nix_bandprof_alloc()
4886 ipolicer->pfvf_map[prof] = pcifunc; in rvu_mbox_handler_nix_bandprof_alloc()
5009 aq_req->prof.band_prof_id = mid_prof; in nix_ipolicer_map_leaf_midprofs()
[all …]
Dmbox.h798 struct nix_bandprof_s prof; member
818 struct nix_bandprof_s prof; member
834 u64 prof; member
854 struct nix_bandprof_s prof; member
/drivers/net/wireless/intel/iwlwifi/fw/
Dacpi.c455 struct iwl_sar_profile *prof; in iwl_sar_fill_table() local
466 prof = &fwrt->sar_profiles[profs[i] - 1]; in iwl_sar_fill_table()
469 if (!prof->enabled) { in iwl_sar_fill_table()
486 cpu_to_le16(prof->chains[i].subbands[j]); in iwl_sar_fill_table()
488 j, prof->chains[i].subbands[j]); in iwl_sar_fill_table()
/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
Dipoib.c694 const struct mlx5e_profile *prof = mlx5_get_profile(mdev); in mlx5_rdma_setup_rn() local
717 err = mlx5e_priv_init(epriv, prof, netdev, mdev); in mlx5_rdma_setup_rn()
721 epriv->profile = prof; in mlx5_rdma_setup_rn()
724 prof->init(mdev, netdev); in mlx5_rdma_setup_rn()
745 prof->cleanup(epriv); in mlx5_rdma_setup_rn()
/drivers/net/ethernet/mellanox/mlx5/core/
Dmain.c497 struct mlx5_profile *prof = &dev->profile; in handle_hca_cap() local
518 if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { in handle_hca_cap()
519 prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp)); in handle_hca_cap()
520 } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { in handle_hca_cap()
522 prof->log_max_qp, in handle_hca_cap()
524 prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); in handle_hca_cap()
526 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) in handle_hca_cap()
528 prof->log_max_qp); in handle_hca_cap()

12