• Home
  • Raw
  • Download

Lines Matching refs:pmc

62 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;  in kvm_perf_overflow()  local
63 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow()
65 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { in kvm_perf_overflow()
66 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow()
67 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
75 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow_intr() local
76 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow_intr()
78 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { in kvm_perf_overflow_intr()
79 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow_intr()
80 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow_intr()
91 irq_work_queue(&pmc_to_pmu(pmc)->irq_work); in kvm_perf_overflow_intr()
93 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in kvm_perf_overflow_intr()
97 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, in pmc_reprogram_counter() argument
113 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
116 guest_cpuid_is_intel(pmc->vcpu)) { in pmc_reprogram_counter()
127 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
130 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
134 pmc->perf_event = event; in pmc_reprogram_counter()
135 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
136 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); in pmc_reprogram_counter()
137 pmc->is_paused = false; in pmc_reprogram_counter()
140 static void pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
142 u64 counter = pmc->counter; in pmc_pause_counter()
144 if (!pmc->perf_event || pmc->is_paused) in pmc_pause_counter()
148 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
149 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
150 pmc->is_paused = true; in pmc_pause_counter()
153 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
155 if (!pmc->perf_event) in pmc_resume_counter()
159 if (perf_event_period(pmc->perf_event, in pmc_resume_counter()
160 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
164 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
165 pmc->is_paused = false; in pmc_resume_counter()
167 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); in pmc_resume_counter()
171 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) in reprogram_gp_counter() argument
175 struct kvm *kvm = pmc->vcpu->kvm; in reprogram_gp_counter()
178 struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); in reprogram_gp_counter()
184 pmc->eventsel = eventsel; in reprogram_gp_counter()
186 pmc_pause_counter(pmc); in reprogram_gp_counter()
188 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) in reprogram_gp_counter()
212 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); in reprogram_gp_counter()
220 if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) in reprogram_gp_counter()
223 pmc_release_perf_event(pmc); in reprogram_gp_counter()
225 pmc->current_config = eventsel; in reprogram_gp_counter()
226 pmc_reprogram_counter(pmc, type, config, in reprogram_gp_counter()
233 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) in reprogram_fixed_counter() argument
238 struct kvm *kvm = pmc->vcpu->kvm; in reprogram_fixed_counter()
240 pmc_pause_counter(pmc); in reprogram_fixed_counter()
242 if (!en_field || !pmc_is_enabled(pmc)) in reprogram_fixed_counter()
255 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc)) in reprogram_fixed_counter()
258 pmc_release_perf_event(pmc); in reprogram_fixed_counter()
260 pmc->current_config = (u64)ctrl; in reprogram_fixed_counter()
261 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, in reprogram_fixed_counter()
271 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); in reprogram_counter() local
273 if (!pmc) in reprogram_counter()
276 if (pmc_is_gp(pmc)) in reprogram_counter()
277 reprogram_gp_counter(pmc, pmc->eventsel); in reprogram_counter()
282 reprogram_fixed_counter(pmc, ctrl, idx); in reprogram_counter()
293 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit); in kvm_pmu_handle_event() local
295 if (unlikely(!pmc || !pmc->perf_event)) { in kvm_pmu_handle_event()
356 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
365 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask); in kvm_pmu_rdpmc()
366 if (!pmc) in kvm_pmu_rdpmc()
374 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
396 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
398 if (pmc) in kvm_pmu_mark_pmc_in_use()
399 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
442 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) in pmc_speculative_in_use() argument
444 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_speculative_in_use()
446 if (pmc_is_fixed(pmc)) in pmc_speculative_in_use()
448 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; in pmc_speculative_in_use()
450 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; in pmc_speculative_in_use()
457 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
467 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i); in kvm_pmu_cleanup()
469 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) in kvm_pmu_cleanup()
470 pmc_stop_counter(pmc); in kvm_pmu_cleanup()