• Home
  • Raw
  • Download

Lines Matching refs:pmc

64 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;  in kvm_perf_overflow()  local
65 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow()
67 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { in kvm_perf_overflow()
68 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow()
69 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
77 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow_intr() local
78 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow_intr()
80 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { in kvm_perf_overflow_intr()
81 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow_intr()
82 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow_intr()
93 irq_work_queue(&pmc_to_pmu(pmc)->irq_work); in kvm_perf_overflow_intr()
95 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in kvm_perf_overflow_intr()
99 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, in pmc_reprogram_counter() argument
116 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
132 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
135 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
139 pmc->perf_event = event; in pmc_reprogram_counter()
140 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
141 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); in pmc_reprogram_counter()
144 static void pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
146 u64 counter = pmc->counter; in pmc_pause_counter()
148 if (!pmc->perf_event) in pmc_pause_counter()
152 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
153 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
156 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
158 if (!pmc->perf_event) in pmc_resume_counter()
162 if (perf_event_period(pmc->perf_event, in pmc_resume_counter()
163 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
167 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
169 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); in pmc_resume_counter()
181 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) in reprogram_gp_counter() argument
185 struct kvm *kvm = pmc->vcpu->kvm; in reprogram_gp_counter()
187 struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); in reprogram_gp_counter()
193 pmc->eventsel = eventsel; in reprogram_gp_counter()
195 pmc_pause_counter(pmc); in reprogram_gp_counter()
197 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) in reprogram_gp_counter()
218 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); in reprogram_gp_counter()
226 if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) in reprogram_gp_counter()
229 pmc_release_perf_event(pmc); in reprogram_gp_counter()
231 pmc->current_config = eventsel; in reprogram_gp_counter()
232 pmc_reprogram_counter(pmc, type, config, in reprogram_gp_counter()
241 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) in reprogram_fixed_counter() argument
246 struct kvm *kvm = pmc->vcpu->kvm; in reprogram_fixed_counter()
248 pmc_pause_counter(pmc); in reprogram_fixed_counter()
250 if (!en_field || !pmc_is_enabled(pmc)) in reprogram_fixed_counter()
263 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc)) in reprogram_fixed_counter()
266 pmc_release_perf_event(pmc); in reprogram_fixed_counter()
268 pmc->current_config = (u64)ctrl; in reprogram_fixed_counter()
269 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, in reprogram_fixed_counter()
279 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); in reprogram_counter() local
281 if (!pmc) in reprogram_counter()
284 if (pmc_is_gp(pmc)) in reprogram_counter()
285 reprogram_gp_counter(pmc, pmc->eventsel); in reprogram_counter()
290 reprogram_fixed_counter(pmc, ctrl, idx); in reprogram_counter()
301 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit); in kvm_pmu_handle_event() local
303 if (unlikely(!pmc || !pmc->perf_event)) { in kvm_pmu_handle_event()
364 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
373 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask); in kvm_pmu_rdpmc()
374 if (!pmc) in kvm_pmu_rdpmc()
382 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
401 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
403 if (pmc) in kvm_pmu_mark_pmc_in_use()
404 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
447 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) in pmc_speculative_in_use() argument
449 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_speculative_in_use()
451 if (pmc_is_fixed(pmc)) in pmc_speculative_in_use()
453 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; in pmc_speculative_in_use()
455 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; in pmc_speculative_in_use()
462 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
472 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i); in kvm_pmu_cleanup()
474 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) in kvm_pmu_cleanup()
475 pmc_stop_counter(pmc); in kvm_pmu_cleanup()