Lines Matching refs:vcpu
17 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
19 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
43 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_idx_is_64bit() argument
46 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); in kvm_pmu_idx_is_64bit()
66 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_pmc_is_chained() local
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
108 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_idx_has_chain_evtype() argument
118 eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_idx_has_chain_evtype()
128 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu, in kvm_pmu_get_pair_counter_value() argument
137 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pair_counter_value()
138 counter_high = __vcpu_sys_reg(vcpu, reg + 1); in kvm_pmu_get_pair_counter_value()
144 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pair_counter_value()
163 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_get_counter_value() argument
166 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value()
169 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_get_counter_value()
186 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) in kvm_pmu_set_counter_value() argument
192 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); in kvm_pmu_set_counter_value()
195 kvm_pmu_create_perf_event(vcpu, select_idx); in kvm_pmu_set_counter_value()
218 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) in kvm_pmu_stop_counter() argument
226 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_stop_counter()
236 __vcpu_sys_reg(vcpu, reg) = val; in kvm_pmu_stop_counter()
239 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter); in kvm_pmu_stop_counter()
249 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_init() argument
252 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init()
263 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_reset() argument
265 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); in kvm_pmu_vcpu_reset()
266 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset()
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); in kvm_pmu_vcpu_reset()
280 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_destroy() argument
283 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_destroy()
287 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
290 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) in kvm_pmu_valid_counter_mask() argument
292 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; in kvm_pmu_valid_counter_mask()
308 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_enable_counter_mask() argument
311 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_enable_counter_mask()
314 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) in kvm_pmu_enable_counter_mask()
324 kvm_pmu_update_pmc_chained(vcpu, i); in kvm_pmu_enable_counter_mask()
325 kvm_pmu_create_perf_event(vcpu, i); in kvm_pmu_enable_counter_mask()
343 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_disable_counter_mask() argument
346 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_disable_counter_mask()
359 kvm_pmu_update_pmc_chained(vcpu, i); in kvm_pmu_disable_counter_mask()
360 kvm_pmu_create_perf_event(vcpu, i); in kvm_pmu_disable_counter_mask()
368 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) in kvm_pmu_overflow_status() argument
372 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { in kvm_pmu_overflow_status()
373 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); in kvm_pmu_overflow_status()
374 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in kvm_pmu_overflow_status()
375 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); in kvm_pmu_overflow_status()
376 reg &= kvm_pmu_valid_counter_mask(vcpu); in kvm_pmu_overflow_status()
382 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) in kvm_pmu_update_state() argument
384 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state()
387 if (!kvm_arm_pmu_v3_ready(vcpu)) in kvm_pmu_update_state()
390 overflow = !!kvm_pmu_overflow_status(vcpu); in kvm_pmu_update_state()
396 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state()
397 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_pmu_update_state()
403 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) in kvm_pmu_should_notify_user() argument
405 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user()
406 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_pmu_should_notify_user()
409 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user()
418 void kvm_pmu_update_run(struct kvm_vcpu *vcpu) in kvm_pmu_update_run() argument
420 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_pmu_update_run()
424 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
435 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) in kvm_pmu_flush_hwstate() argument
437 kvm_pmu_update_state(vcpu); in kvm_pmu_flush_hwstate()
447 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) in kvm_pmu_sync_hwstate() argument
449 kvm_pmu_update_state(vcpu); in kvm_pmu_sync_hwstate()
459 struct kvm_vcpu *vcpu; in kvm_pmu_perf_overflow_notify_vcpu() local
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
465 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow_notify_vcpu()
477 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow() local
489 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_perf_overflow()
496 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); in kvm_pmu_perf_overflow()
498 if (kvm_pmu_overflow_status(vcpu)) { in kvm_pmu_perf_overflow()
499 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); in kvm_pmu_perf_overflow()
502 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow()
504 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
515 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_software_increment() argument
517 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_software_increment()
520 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) in kvm_pmu_software_increment()
524 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in kvm_pmu_software_increment()
533 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); in kvm_pmu_software_increment()
534 type &= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_software_increment()
539 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; in kvm_pmu_software_increment()
541 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; in kvm_pmu_software_increment()
548 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1; in kvm_pmu_software_increment()
550 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg; in kvm_pmu_software_increment()
552 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1); in kvm_pmu_software_increment()
555 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); in kvm_pmu_software_increment()
565 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_handle_pmcr() argument
567 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); in kvm_pmu_handle_pmcr()
571 kvm_pmu_enable_counter_mask(vcpu, in kvm_pmu_handle_pmcr()
572 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask); in kvm_pmu_handle_pmcr()
574 kvm_pmu_disable_counter_mask(vcpu, mask); in kvm_pmu_handle_pmcr()
578 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); in kvm_pmu_handle_pmcr()
583 kvm_pmu_set_counter_value(vcpu, i, 0); in kvm_pmu_handle_pmcr()
587 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_counter_is_enabled() argument
589 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && in kvm_pmu_counter_is_enabled()
590 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); in kvm_pmu_counter_is_enabled()
598 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_create_perf_event() argument
600 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_create_perf_event()
615 data = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_create_perf_event()
617 kvm_pmu_stop_counter(vcpu, pmc); in kvm_pmu_create_perf_event()
621 eventsel = data & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_create_perf_event()
631 if (vcpu->kvm->arch.pmu_filter && in kvm_pmu_create_perf_event()
632 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) in kvm_pmu_create_perf_event()
639 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); in kvm_pmu_create_perf_event()
646 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_create_perf_event()
662 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_create_perf_event()
688 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_update_pmc_chained() argument
690 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_pmc_chained()
695 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && in kvm_pmu_update_pmc_chained()
696 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); in kvm_pmu_update_pmc_chained()
702 kvm_pmu_stop_counter(vcpu, canonical_pmc); in kvm_pmu_update_pmc_chained()
708 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc)); in kvm_pmu_update_pmc_chained()
709 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
712 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
725 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, in kvm_pmu_set_counter_event_type() argument
732 mask |= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_set_counter_event_type()
737 __vcpu_sys_reg(vcpu, reg) = data & mask; in kvm_pmu_set_counter_event_type()
739 kvm_pmu_update_pmc_chained(vcpu, select_idx); in kvm_pmu_set_counter_event_type()
740 kvm_pmu_create_perf_event(vcpu, select_idx); in kvm_pmu_set_counter_event_type()
788 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) in kvm_pmu_get_pmceid() argument
790 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; in kvm_pmu_get_pmceid()
805 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_pmu_get_pmceid()
831 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) in kvm_arm_pmu_v3_enable() argument
833 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
841 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable()
842 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
843 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_enable()
852 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable()
854 } else if (kvm_arm_pmu_irq_initialized(vcpu)) { in kvm_arm_pmu_v3_enable()
858 kvm_pmu_vcpu_reset(vcpu); in kvm_arm_pmu_v3_enable()
859 vcpu->arch.pmu.ready = true; in kvm_arm_pmu_v3_enable()
864 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) in kvm_arm_pmu_v3_init() argument
866 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init()
874 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init()
877 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_init()
880 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
881 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
886 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
889 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
901 struct kvm_vcpu *vcpu; in pmu_irq_is_valid() local
903 kvm_for_each_vcpu(i, vcpu, kvm) { in pmu_irq_is_valid()
904 if (!kvm_arm_pmu_irq_initialized(vcpu)) in pmu_irq_is_valid()
908 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
911 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
919 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_set_attr() argument
922 !test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) in kvm_arm_pmu_v3_set_attr()
925 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
928 if (!vcpu->kvm->arch.pmuver) in kvm_arm_pmu_v3_set_attr()
929 vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver(); in kvm_arm_pmu_v3_set_attr()
931 if (vcpu->kvm->arch.pmuver == 0xf) in kvm_arm_pmu_v3_set_attr()
939 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_set_attr()
949 if (!pmu_irq_is_valid(vcpu->kvm, irq)) in kvm_arm_pmu_v3_set_attr()
952 if (kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_set_attr()
956 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
964 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_arm_pmu_v3_set_attr()
976 mutex_lock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
978 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
979 vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL); in kvm_arm_pmu_v3_set_attr()
980 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
981 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
992 bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
994 bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
998 bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1000 bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1002 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
1007 return kvm_arm_pmu_v3_init(vcpu); in kvm_arm_pmu_v3_set_attr()
1013 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_get_attr() argument
1020 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_get_attr()
1023 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) in kvm_arm_pmu_v3_get_attr()
1026 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_get_attr()
1029 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()
1037 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_has_attr() argument
1044 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) in kvm_arm_pmu_v3_has_attr()