Home
last modified time | relevance | path

Searched refs:x86_pmu (Results 1 – 11 of 11) sorted by relevance

/arch/x86/kernel/cpu/
Dperf_event_intel_lbr.c147 if (pmi && x86_pmu.version >= 4) in __intel_pmu_lbr_enable()
186 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32()
187 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32()
194 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64()
195 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64()
196 wrmsrl(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64()
197 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) in intel_pmu_lbr_reset_64()
204 if (!x86_pmu.lbr_nr) in intel_pmu_lbr_reset()
207 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) in intel_pmu_lbr_reset()
220 rdmsrl(x86_pmu.lbr_tos, tos); in intel_pmu_lbr_tos()
[all …]
Dperf_event.c43 struct x86_pmu x86_pmu __read_mostly;
68 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
118 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()
121 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
148 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
153 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
164 i = x86_pmu.num_counters; in reserve_pmc_hardware()
177 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()
201 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()
215 if (x86_pmu.num_counters_fixed) { in check_hw_exists()
[all …]
Dperf_event_intel.c1498 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); in __intel_pmu_enable_all()
1681 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed()
1758 if (!x86_pmu.num_counters) in intel_pmu_reset()
1765 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in intel_pmu_reset()
1769 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) in intel_pmu_reset()
1776 if (x86_pmu.version >= 2) { in intel_pmu_reset()
1782 if (x86_pmu.lbr_nr) { in intel_pmu_reset()
1808 if (!x86_pmu.late_ack) in intel_pmu_handle_irq()
1850 x86_pmu.drain_pebs(regs); in intel_pmu_handle_irq()
1860 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; in intel_pmu_handle_irq()
[all …]
Dperf_event_intel_ds.c315 if (!x86_pmu.pebs) in alloc_pebs_buffer()
318 buffer = dsalloc(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); in alloc_pebs_buffer()
326 if (x86_pmu.intel_cap.pebs_format < 2) { in alloc_pebs_buffer()
329 dsfree(buffer, x86_pmu.pebs_buffer_size); in alloc_pebs_buffer()
335 max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size; in alloc_pebs_buffer()
340 max * x86_pmu.pebs_record_size; in alloc_pebs_buffer()
349 if (!ds || !x86_pmu.pebs) in release_pebs_buffer()
356 x86_pmu.pebs_buffer_size); in release_pebs_buffer()
367 if (!x86_pmu.bts) in alloc_bts_buffer()
393 if (!ds || !x86_pmu.bts) in release_bts_buffer()
[all …]
Dperf_event.h506 struct x86_pmu { struct
641 __quirk.next = x86_pmu.quirks; \ argument
642 x86_pmu.quirks = &__quirk; \
670 extern struct x86_pmu x86_pmu __read_mostly;
674 return x86_pmu.lbr_sel_map && in x86_pmu_has_lbr_callstack()
675 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; in x86_pmu_has_lbr_callstack()
705 return x86_pmu.eventsel + (x86_pmu.addr_offset ? in x86_pmu_config_addr()
706 x86_pmu.addr_offset(index, true) : index); in x86_pmu_config_addr()
711 return x86_pmu.perfctr + (x86_pmu.addr_offset ? in x86_pmu_event_addr()
712 x86_pmu.addr_offset(index, false) : index); in x86_pmu_event_addr()
[all …]
Dperf_event_amd.c249 for (i = 0; i < x86_pmu.num_counters; i++) { in __amd_put_nb_event_constraints()
316 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { in __amd_get_nb_event_constraints()
359 for (i = 0; i < x86_pmu.num_counters; i++) { in amd_alloc_nb()
622 static __initconst const struct x86_pmu amd_pmu = {
661 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; in amd_core_pmu_init()
674 x86_pmu.eventsel = MSR_F15H_PERF_CTL; in amd_core_pmu_init()
675 x86_pmu.perfctr = MSR_F15H_PERF_CTR; in amd_core_pmu_init()
676 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; in amd_core_pmu_init()
690 x86_pmu = amd_pmu; in amd_pmu_init()
Dperf_event_p6.c200 static __initconst const struct x86_pmu p6_pmu = {
241 x86_pmu.attr_rdpmc_broken = 1; in p6_pmu_rdpmc_quirk()
242 x86_pmu.attr_rdpmc = 0; in p6_pmu_rdpmc_quirk()
248 x86_pmu = p6_pmu; in p6_pmu_init()
Dperf_event_knc.c289 static const struct x86_pmu knc_pmu __initconst = {
315 x86_pmu = knc_pmu; in knc_pmu_init()
Dperf_event_p4.c921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_disable_all()
990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_enable_all()
1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_handle_irq()
1028 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq()
1301 static __initconst const struct x86_pmu p4_pmu = {
1359 x86_pmu = p4_pmu; in p4_pmu_init()
1370 for (i = 0; i < x86_pmu.num_counters; i++) { in p4_pmu_init()
Dperf_event_intel_bts.c550 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) in bts_init()
/arch/x86/xen/
Dpmu.c493 if (x86_pmu.handle_irq(&regs)) in xen_pmu_irq_handler()