/drivers/ntb/test/ |
D | ntb_perf.c | 144 struct perf_ctx *perf; member 169 struct perf_ctx *perf; member 205 int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd, 247 static void perf_terminate_test(struct perf_ctx *perf); 253 link = ntb_link_is_up(peer->perf->ntb, NULL, NULL); in perf_link_is_up() 260 struct perf_ctx *perf = peer->perf; in perf_spad_cmd_send() local 264 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data); in perf_spad_cmd_send() 277 sts = ntb_peer_spad_read(perf->ntb, peer->pidx, in perf_spad_cmd_send() 278 PERF_SPAD_CMD(perf->gidx)); in perf_spad_cmd_send() 284 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send() [all …]
|
/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ |
D | perf.c | 34 u32 perf = 0; in nvbios_perf_table() local 38 perf = nvbios_rd32(bios, bit_P.offset + 0); in nvbios_perf_table() 39 if (perf) { in nvbios_perf_table() 40 *ver = nvbios_rd08(bios, perf + 0); in nvbios_perf_table() 41 *hdr = nvbios_rd08(bios, perf + 1); in nvbios_perf_table() 43 *cnt = nvbios_rd08(bios, perf + 5); in nvbios_perf_table() 44 *len = nvbios_rd08(bios, perf + 2); in nvbios_perf_table() 45 *snr = nvbios_rd08(bios, perf + 4); in nvbios_perf_table() 46 *ssz = nvbios_rd08(bios, perf + 3); in nvbios_perf_table() 47 return perf; in nvbios_perf_table() [all …]
|
/drivers/gpu/drm/msm/ |
D | msm_perf.c | 41 static int wait_sample(struct msm_perf_state *perf) in wait_sample() argument 45 if (time_after(perf->next_jiffies, start_jiffies)) { in wait_sample() 47 perf->next_jiffies - start_jiffies; in wait_sample() 54 perf->next_jiffies += SAMPLE_TIME; in wait_sample() 58 static int refill_buf(struct msm_perf_state *perf) in refill_buf() argument 60 struct msm_drm_private *priv = perf->dev->dev_private; in refill_buf() 62 char *ptr = perf->buf; in refill_buf() 63 int rem = sizeof(perf->buf); in refill_buf() 66 if ((perf->cnt++ % 32) == 0) { in refill_buf() 86 ret = wait_sample(perf); in refill_buf() [all …]
|
/drivers/gpu/drm/msm/disp/dpu1/ |
D | dpu_core_perf.c | 56 bw_factor = kms->catalog->perf.bw_inefficiency_factor; in _dpu_core_perf_calc_bw() 93 clk_factor = kms->catalog->perf.clk_inefficiency_factor; in _dpu_core_perf_calc_clk() 112 struct dpu_core_perf_params *perf) in _dpu_core_perf_calc_crtc() argument 114 if (!kms || !kms->catalog || !crtc || !state || !perf) { in _dpu_core_perf_calc_crtc() 119 memset(perf, 0, sizeof(struct dpu_core_perf_params)); in _dpu_core_perf_calc_crtc() 121 if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { in _dpu_core_perf_calc_crtc() 122 perf->bw_ctl = 0; in _dpu_core_perf_calc_crtc() 123 perf->max_per_pipe_ib = 0; in _dpu_core_perf_calc_crtc() 124 perf->core_clk_rate = 0; in _dpu_core_perf_calc_crtc() 125 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { in _dpu_core_perf_calc_crtc() [all …]
|
D | dpu_core_perf.h | 98 void dpu_core_perf_destroy(struct dpu_core_perf *perf); 107 int dpu_core_perf_init(struct dpu_core_perf *perf,
|
D | dpu_plane.c | 180 hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines; in _dpu_plane_calc_bw() 349 &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl); in _dpu_plane_set_qos_lut() 378 danger_lut = pdpu->catalog->perf.danger_lut_tbl in _dpu_plane_set_danger_lut() 380 safe_lut = pdpu->catalog->perf.safe_lut_tbl in _dpu_plane_set_danger_lut() 388 danger_lut = pdpu->catalog->perf.danger_lut_tbl in _dpu_plane_set_danger_lut() 390 safe_lut = pdpu->catalog->perf.safe_lut_tbl in _dpu_plane_set_danger_lut() 393 danger_lut = pdpu->catalog->perf.danger_lut_tbl in _dpu_plane_set_danger_lut() 395 safe_lut = pdpu->catalog->perf.safe_lut_tbl in _dpu_plane_set_danger_lut() 1184 cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg in dpu_plane_sspp_atomic_update()
|
/drivers/gpu/drm/i915/ |
D | i915_perf.c | 389 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) in i915_perf_get_oa_config() argument 394 oa_config = idr_find(&perf->metrics_idr, metrics_set); in i915_perf_get_oa_config() 471 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); in oa_buffer_check_unlocked() 521 __ratelimit(&stream->perf->tail_pointer_race)) in oa_buffer_check_unlocked() 722 (GRAPHICS_VER(stream->perf->i915) == 12 ? in gen8_append_oa_reports() 736 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && in gen8_append_oa_reports() 737 GRAPHICS_VER(stream->perf->i915) <= 11) in gen8_append_oa_reports() 771 if (!stream->perf->exclusive_stream->ctx || in gen8_append_oa_reports() 780 if (stream->perf->exclusive_stream->ctx && in gen8_append_oa_reports() 804 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ? in gen8_append_oa_reports() [all …]
|
D | i915_query.c | 199 struct i915_perf *perf = &i915->perf; in query_perf_config_data() local 206 if (!perf->i915) in query_perf_config_data() 241 idr_for_each_entry(&perf->metrics_idr, tmp, id) { in query_perf_config_data() 252 oa_config = i915_perf_get_oa_config(perf, config_id); in query_perf_config_data() 320 static size_t sizeof_perf_metrics(struct i915_perf *perf) in sizeof_perf_metrics() argument 328 idr_for_each_entry(&perf->metrics_idr, tmp, id) in sizeof_perf_metrics() 340 struct i915_perf *perf = &i915->perf; in query_perf_config_list() local 346 if (!perf->i915) in query_perf_config_list() 350 return sizeof_perf_metrics(perf); in query_perf_config_list() 374 idr_for_each_entry(&perf->metrics_idr, tmp, id) { in query_perf_config_list()
|
D | i915_perf_types.h | 44 struct i915_perf *perf; member 137 struct i915_perf *perf; member 325 bool (*is_valid_b_counter_reg)(struct i915_perf *perf, u32 addr); 331 bool (*is_valid_mux_reg)(struct i915_perf *perf, u32 addr); 337 bool (*is_valid_flex_reg)(struct i915_perf *perf, u32 addr);
|
D | i915_perf.h | 40 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set);
|
/drivers/gpu/drm/i915/selftests/ |
D | i915_perf.c | 20 alloc_empty_config(struct i915_perf *perf) in alloc_empty_config() argument 28 oa_config->perf = perf; in alloc_empty_config() 33 mutex_lock(&perf->metrics_lock); in alloc_empty_config() 35 oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL); in alloc_empty_config() 37 mutex_unlock(&perf->metrics_lock); in alloc_empty_config() 42 mutex_unlock(&perf->metrics_lock); in alloc_empty_config() 48 destroy_empty_config(struct i915_perf *perf) in destroy_empty_config() argument 53 mutex_lock(&perf->metrics_lock); in destroy_empty_config() 55 idr_for_each_entry(&perf->metrics_idr, tmp, id) { in destroy_empty_config() 63 idr_remove(&perf->metrics_idr, oa_config->id); in destroy_empty_config() [all …]
|
D | i915_selftest.c | 226 if (!i915_selftest.perf) in i915_perf_selftests() 229 err = run_selftests(perf, pdev_to_i915(pdev)); in i915_perf_selftests() 231 i915_selftest.perf = err; in i915_perf_selftests() 235 if (i915_selftest.perf < 0) { in i915_perf_selftests() 236 i915_selftest.perf = -ENOTTY; in i915_perf_selftests() 438 module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400);
|
/drivers/cpufreq/ |
D | acpi-cpufreq.c | 198 struct acpi_processor_performance *perf; in extract_io() local 201 perf = to_perf_data(data); in extract_io() 203 for (i = 0; i < perf->state_count; i++) { in extract_io() 204 if (value == perf->states[i].status) in extract_io() 214 struct acpi_processor_performance *perf; in extract_msr() local 223 perf = to_perf_data(data); in extract_msr() 226 if (msr == perf->states[pos->driver_data].status) in extract_msr() 308 struct acpi_processor_performance *perf = to_perf_data(data); in drv_read() local 310 .reg = &perf->control_register, in drv_read() 331 struct acpi_processor_performance *perf = to_perf_data(data); in drv_write() local [all …]
|
D | cppc_cpufreq.c | 107 u64 perf; in cppc_scale_freq_workfn() local 117 perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, in cppc_scale_freq_workfn() 121 perf <<= SCHED_CAPACITY_SHIFT; in cppc_scale_freq_workfn() 122 local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf); in cppc_scale_freq_workfn() 312 unsigned int perf) in cppc_cpufreq_perf_to_khz() argument 330 retval = offset + div64_u64(perf * mul, div); in cppc_cpufreq_perf_to_khz()
|
/drivers/xen/ |
D | xen-acpi-processor.c | 216 dst_perf = &op.u.set_pminfo.perf; in push_pxx_to_hypervisor() 244 struct acpi_processor_performance *perf; in push_pxx_to_hypervisor() local 247 perf = _pr->performance; in push_pxx_to_hypervisor() 249 for (i = 0; i < perf->state_count; i++) { in push_pxx_to_hypervisor() 251 (i == perf->state ? '*' : ' '), i, in push_pxx_to_hypervisor() 252 (u32) perf->states[i].core_frequency, in push_pxx_to_hypervisor() 253 (u32) perf->states[i].power, in push_pxx_to_hypervisor() 254 (u32) perf->states[i].transition_latency); in push_pxx_to_hypervisor() 552 struct acpi_processor_performance *perf; in xen_acpi_processor_init() local 555 perf = per_cpu_ptr(acpi_perf_data, i); in xen_acpi_processor_init() [all …]
|
/drivers/perf/ |
D | Kconfig | 41 PMU (perf) driver supporting the ARM CCN (Cache Coherent Network) 82 tristate "Freescale i.MX8 DDR perf monitor" 96 Adds the L2 cache PMU into the perf events subsystem for 106 Adds the L3 cache PMU into the perf events subsystem for 129 Enable perf support for the ARMv8.2 Statistical Profiling 131 the CPU pipeline and reports this via the perf AUX interface. 140 source "drivers/perf/hisilicon/Kconfig"
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_topology.c | 140 struct kfd_perf_properties *perf; in kfd_release_topology_device() local 166 perf = container_of(dev->perf_props.next, in kfd_release_topology_device() 168 list_del(&perf->list); in kfd_release_topology_device() 169 kfree(perf); in kfd_release_topology_device() 563 struct kfd_perf_properties *perf; in kfd_remove_sysfs_node_entry() local 601 list_for_each_entry(perf, &dev->perf_props, list) { in kfd_remove_sysfs_node_entry() 602 kfree(perf->attr_group); in kfd_remove_sysfs_node_entry() 603 perf->attr_group = NULL; in kfd_remove_sysfs_node_entry() 626 struct kfd_perf_properties *perf; in kfd_build_sysfs_node_entry() local 751 list_for_each_entry(perf, &dev->perf_props, list) { in kfd_build_sysfs_node_entry() [all …]
|
/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/ |
D | gk208.fuc5 | 41 #include "perf.fuc" 52 #include "perf.fuc" 65 #include "perf.fuc"
|
D | gf119.fuc4 | 41 #include "perf.fuc" 52 #include "perf.fuc" 65 #include "perf.fuc"
|
D | gf100.fuc3 | 41 #include "perf.fuc" 52 #include "perf.fuc" 65 #include "perf.fuc"
|
D | gt215.fuc3 | 41 #include "perf.fuc" 52 #include "perf.fuc" 65 #include "perf.fuc"
|
D | perf.fuc | 42 // $r15 - current (perf) 53 // $r15 - current (perf)
|
/drivers/firmware/arm_scmi/ |
D | perf.c | 36 u32 perf; member 256 return t1->perf - t2->perf; in opp_cmp_func() 297 opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val); in scmi_perf_describe_levels_get() 303 opp->perf, opp->power, opp->trans_latency_us); in scmi_perf_describe_levels_get() 656 freq = opp->perf * dom->mult_factor; in scmi_dvfs_device_opps_add() 663 freq = (--opp)->perf * dom->mult_factor; in scmi_dvfs_device_opps_add() 727 opp_freq = opp->perf * dom->mult_factor; in scmi_dvfs_est_power_get() 919 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
|
/drivers/gpu/drm/msm/disp/mdp5/ |
D | mdp5_cfg.c | 98 .perf = { 185 .perf = { 285 .perf = { 357 .perf = { 437 .perf = { 537 .perf = { 650 .perf = {
|
/drivers/iommu/intel/ |
D | Makefile | 5 obj-$(CONFIG_DMAR_PERF) += perf.o
|