Home
last modified time | relevance | path

Searched refs:qos (Results 1 – 25 of 174) sorted by relevance

1234567

/drivers/base/power/
Dqos.c54 struct dev_pm_qos *qos = dev->power.qos; in __dev_pm_qos_flags() local
60 if (IS_ERR_OR_NULL(qos)) in __dev_pm_qos_flags()
63 pqf = &qos->flags; in __dev_pm_qos_flags()
112 struct dev_pm_qos *qos = dev->power.qos; in dev_pm_qos_read_value() local
120 ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT in dev_pm_qos_read_value()
121 : pm_qos_read_value(&qos->resume_latency); in dev_pm_qos_read_value()
124 ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE in dev_pm_qos_read_value()
125 : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN); in dev_pm_qos_read_value()
128 ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE in dev_pm_qos_read_value()
129 : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX); in dev_pm_qos_read_value()
[all …]
Dqos-test.c11 struct freq_constraints qos; in freq_qos_test_min() local
15 freq_constraints_init(&qos); in freq_qos_test_min()
19 ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000); in freq_qos_test_min()
21 ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000); in freq_qos_test_min()
24 KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000); in freq_qos_test_min()
28 KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000); in freq_qos_test_min()
32 KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), in freq_qos_test_min()
39 struct freq_constraints qos; in freq_qos_test_maxdef() local
43 freq_constraints_init(&qos); in freq_qos_test_maxdef()
46 KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), in freq_qos_test_maxdef()
[all …]
DMakefile2 obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
7 obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
/drivers/net/ethernet/mellanox/mlx5/core/esw/
Dqos.c29 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) in esw_qos_tsar_config()
71 if (!vport->qos.enabled) in esw_qos_vport_config()
74 err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix, in esw_qos_vport_config()
101 list_for_each_entry(group, &esw->qos.groups, list) { in esw_qos_calculate_min_rate_divider()
108 if (!evport->enabled || !evport->qos.enabled || in esw_qos_calculate_min_rate_divider()
109 evport->qos.group != group || evport->qos.min_rate < max_guarantee) in esw_qos_calculate_min_rate_divider()
111 max_guarantee = evport->qos.min_rate; in esw_qos_calculate_min_rate_divider()
146 if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group) in esw_qos_normalize_vports_min_rate()
148 bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share); in esw_qos_normalize_vports_min_rate()
150 if (bw_share == evport->qos.bw_share) in esw_qos_normalize_vports_min_rate()
[all …]
/drivers/staging/octeon/
Dethernet-tx.c75 int qos, queues_per_port; in cvm_oct_free_tx_skbs() local
83 for (qos = 0; qos < queues_per_port; qos++) { in cvm_oct_free_tx_skbs()
84 if (skb_queue_len(&priv->tx_free_list[qos]) == 0) in cvm_oct_free_tx_skbs()
86 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, in cvm_oct_free_tx_skbs()
89 priv->fau + qos * 4); in cvm_oct_free_tx_skbs()
94 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); in cvm_oct_free_tx_skbs()
98 t = __skb_dequeue(&priv->tx_free_list[qos]); in cvm_oct_free_tx_skbs()
103 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, in cvm_oct_free_tx_skbs()
113 total_remaining += skb_queue_len(&priv->tx_free_list[qos]); in cvm_oct_free_tx_skbs()
134 int qos; in cvm_oct_xmit() local
[all …]
Dethernet.c678 int qos; in cvm_oct_probe() local
780 for (qos = 0; qos < 16; qos++) in cvm_oct_probe()
781 skb_queue_head_init(&priv->tx_free_list[qos]); in cvm_oct_probe()
834 for (qos = 0; qos < 16; qos++) in cvm_oct_probe()
835 skb_queue_head_init(&priv->tx_free_list[qos]); in cvm_oct_probe()
836 for (qos = 0; qos < cvmx_pko_get_num_queues(port); in cvm_oct_probe()
837 qos++) in cvm_oct_probe()
838 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); in cvm_oct_probe()
/drivers/net/ethernet/ti/
Dam65-cpsw-qos.c58 return port->qos.est_oper || port->qos.est_admin; in am65_cpsw_port_est_enabled()
167 if (port->qos.est_oper) in am65_cpsw_admin_to_oper()
168 devm_kfree(&ndev->dev, port->qos.est_oper); in am65_cpsw_admin_to_oper()
170 port->qos.est_oper = port->qos.est_admin; in am65_cpsw_admin_to_oper()
171 port->qos.est_admin = NULL; in am65_cpsw_admin_to_oper()
187 if (port->qos.est_oper && port->qos.est_admin && in am65_cpsw_port_est_get_buf_num()
188 est_new->buf == port->qos.est_oper->buf) in am65_cpsw_port_est_get_buf_num()
217 if (!port->qos.est_admin) in am65_cpsw_est_update_state()
306 port->qos.link_speed); in am65_cpsw_est_check_scheds()
336 port->qos.link_speed); in am65_cpsw_est_set_sched_list()
[all …]
/drivers/infiniband/hw/irdma/
Dws.c47 node->traffic_class = vsi->qos[user_pri].traffic_class; in irdma_alloc_node()
49 node->rel_bw = vsi->qos[user_pri].rel_bw; in irdma_alloc_node()
53 node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle; in irdma_alloc_node()
114 vsi->qos[node->user_pri].qs_handle = node_info.qs_handle; in irdma_ws_cqp_cmd()
161 mutex_lock(&vsi->qos[user_pri].qos_mutex); in irdma_tc_in_use()
162 if (!list_empty(&vsi->qos[user_pri].qplist)) { in irdma_tc_in_use()
163 mutex_unlock(&vsi->qos[user_pri].qos_mutex); in irdma_tc_in_use()
171 if (vsi->qos[i].traffic_class == vsi->qos[user_pri].traffic_class && in irdma_tc_in_use()
172 !list_empty(&vsi->qos[i].qplist)) { in irdma_tc_in_use()
173 mutex_unlock(&vsi->qos[user_pri].qos_mutex); in irdma_tc_in_use()
[all …]
/drivers/interconnect/qcom/
Dsdm660.c244 struct qcom_icc_qos qos; member
262 .qos.ap_owned = _ap_owned, \
263 .qos.qos_mode = _qos_mode, \
264 .qos.areq_prio = _qos_prio, \
265 .qos.prio_level = _qos_prio, \
266 .qos.qos_port = _qos_port, \
569 struct qcom_icc_qos *qos, in qcom_icc_bimc_set_qos_health() argument
575 val = qos->prio_level; in qcom_icc_bimc_set_qos_health()
578 val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT; in qcom_icc_bimc_set_qos_health()
583 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT; in qcom_icc_bimc_set_qos_health()
[all …]
/drivers/atm/
Diphase.c246 if (vcc->qos.txtp.traffic_class == ATM_ABR) { in clear_lockup()
476 if (vcc->qos.txtp.max_pcr <= 0) { in ia_cbr_setup()
480 rate = vcc->qos.txtp.max_pcr; in ia_cbr_setup()
1384 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1386 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1401 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1402 (vcc->qos.txtp.traffic_class == ATM_ABR))
1728 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1757 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1761 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
[all …]
Dfore200e.c976 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { in fore200e_push_rpd()
1231 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); in fore200e_activate_vcin()
1288 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) in fore200e_rate_ctrl() argument
1290 if (qos->txtp.max_pcr < ATM_OC3_PCR) { in fore200e_rate_ctrl()
1293 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; in fore200e_rate_ctrl()
1341 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), in fore200e_open()
1342 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], in fore200e_open()
1343 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, in fore200e_open()
1344 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], in fore200e_open()
1345 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); in fore200e_open()
[all …]
Dlanai.c696 if (lvcc->rx.atmvcc->qos.aal == ATM_AAL5) { in host_vcc_start_rx()
727 (lvcc->tx.atmvcc->qos.txtp.traffic_class == ATM_CBR) ? in host_vcc_start_tx()
788 (lvcc->tx.atmvcc->qos.txtp.traffic_class != ATM_CBR || in lanai_shutdown_tx_vci()
1499 struct lanai_vcc *lvcc, const struct atm_qos *qos) in lanai_setup_rx_vci_aal5() argument
1502 qos->rxtp.max_sdu, AAL5_RX_MULTIPLIER, "RX"); in lanai_setup_rx_vci_aal5()
1507 const struct atm_qos *qos) in lanai_setup_tx_vci() argument
1510 if (qos->aal == ATM_AAL0) { in lanai_setup_tx_vci()
1516 max_sdu = qos->txtp.max_sdu; in lanai_setup_tx_vci()
1652 if (unlikely(lvcc->rx.atmvcc->qos.aal != ATM_AAL5)) { in handle_service()
1970 const struct atm_qos *qos) in vci0_is_ok() argument
[all …]
Didt77252.c130 static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos,
878 aal = vcc->qos.aal; in queue_skb()
1066 if ((vcc->qos.aal == ATM_AAL0) || in dequeue_rx()
1067 (vcc->qos.aal == ATM_AAL34)) { in dequeue_rx()
1107 if (vcc->qos.aal != ATM_AAL5) { in dequeue_rx()
1109 card->name, vcc->qos.aal); in dequeue_rx()
1301 if (vcc->qos.aal != ATM_AAL0) { in idt77252_rx_raw()
1958 switch (vcc->qos.aal) { in idt77252_send_skb()
1964 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); in idt77252_send_skb()
2133 struct atm_vcc *vcc, struct atm_qos *qos) in idt77252_init_cbr() argument
[all …]
Dzatm.c496 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; in open_rx_first()
497 if (vcc->qos.aal == ATM_AAL5) { in open_rx_first()
498 if (vcc->qos.rxtp.max_sdu > 65464) in open_rx_first()
499 vcc->qos.rxtp.max_sdu = 65464; in open_rx_first()
502 cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, in open_rx_first()
526 zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? in open_rx_first()
661 | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | in do_tx()
908 zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; in close_tx()
928 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; in open_tx_first()
938 unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && in open_tx_first()
[all …]
Datmtcp.c95 vcc->qos = msg->qos; in atmtcp_recv_control()
135 msg.qos = vcc->qos; in atmtcp_v_open()
199 if (vcc->qos.txtp.traffic_class == ATM_NONE) { in atmtcp_v_send()
275 vcc->qos.rxtp.traffic_class != ATM_NONE) { in find_vcc()
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_sriov.h24 u8 qos, __be16 vlan_proto);
42 u16 vid, u16 qos, u32 vf) in ixgbe_set_vmvir() argument
45 u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; in ixgbe_set_vmvir()
/drivers/net/ethernet/sfc/
Dsriov.c22 u8 qos, __be16 vlan_proto) in efx_sriov_set_vf_vlan() argument
28 (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) in efx_sriov_set_vf_vlan()
34 return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); in efx_sriov_set_vf_vlan()
/drivers/media/rc/
Dgpio-ir-recv.c26 struct pm_qos_request qos; member
142 cpu_latency_qos_remove_request(&gpio_dev->qos); in gpio_ir_recv_remove()
181 cpu_latency_qos_remove_request(&gpio_dev->qos); in gpio_ir_recv_runtime_suspend()
190 cpu_latency_qos_add_request(&gpio_dev->qos, 0); in gpio_ir_recv_runtime_resume()
/drivers/net/netdevsim/
Dnetdev.c96 u16 vlan, u8 qos, __be16 vlan_proto) in nsim_set_vf_vlan() argument
101 if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7) in nsim_set_vf_vlan()
105 nsim_bus_dev->vfconfigs[vf].qos = qos; in nsim_set_vf_vlan()
181 ivi->qos = nsim_bus_dev->vfconfigs[vf].qos; in nsim_get_vf_config()
/drivers/net/ethernet/huawei/hinic/
Dhinic_sriov.c101 u8 qos, int vf_id) in hinic_set_vf_vlan() argument
114 vf_vlan.qos = qos; in hinic_set_vf_vlan()
563 ivi->qos = vfinfo->pf_qos; in hinic_get_vf_config()
643 u16 vlan, u8 qos) in hinic_add_vf_vlan() argument
648 err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id); in hinic_add_vf_vlan()
653 nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; in hinic_add_vf_vlan()
656 vlan, qos, HW_VF_ID_TO_OS(vf_id)); in hinic_add_vf_vlan()
730 u16 cur_vlanprio, int vf, u16 vlan, u8 qos) in set_hw_vf_vlan() argument
735 if (vlan || qos) { in set_hw_vf_vlan()
746 OS_VF_ID_TO_HW(vf), vlan, qos); in set_hw_vf_vlan()
[all …]
Dhinic_sriov.h75 u8 qos; member
81 int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
/drivers/net/ethernet/netronome/nfp/
Dnfp_net_sriov.c94 int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, in nfp_app_set_vf_vlan() argument
109 if (vlan > 4095 || qos > 7) { in nfp_app_set_vf_vlan()
117 FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos); in nfp_app_set_vf_vlan()
237 ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci); in nfp_app_get_vf_config()
/drivers/net/ethernet/intel/ice/
Dice_idc.c221 void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) in ice_get_qos_params() argument
230 qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); in ice_get_qos_params()
232 qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; in ice_get_qos_params()
235 qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; in ice_get_qos_params()
/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/
Dqos_tracepoint.h22 __entry->tsar_ix = vport->qos.esw_tsar_ix;
41 __entry->tsar_ix = vport->qos.esw_tsar_ix;
44 __entry->group = vport->qos.group;
/drivers/net/ethernet/mellanox/mlx5/core/
Deswitch.c147 u16 vlan, u8 qos, u8 set_flags) in modify_esw_vport_cvlan() argument
156 vport, vlan, qos, set_flags); in modify_esw_vport_cvlan()
175 esw_vport_context.cvlan_pcp, qos); in modify_esw_vport_cvlan()
793 mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share); in esw_vport_setup()
811 flags = (vport->info.vlan || vport->info.qos) ? in esw_vport_setup()
815 vport->info.qos, flags); in esw_vport_setup()
1002 memset(&vport->qos, 0, sizeof(vport->qos)); in mlx5_eswitch_clear_vf_vports_info()
1845 ivi->qos = evport->info.qos; in mlx5_eswitch_get_vport_config()
1848 ivi->min_tx_rate = evport->qos.min_rate; in mlx5_eswitch_get_vport_config()
1849 ivi->max_tx_rate = evport->qos.max_rate; in mlx5_eswitch_get_vport_config()
[all …]

1234567