• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4 
5 #include <linux/nospec.h>
6 
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
10 
11 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
12 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
13 
14 #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
15 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
16 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
17 
18 struct kvm_event_hw_type_mapping {
19 	u8 eventsel;
20 	u8 unit_mask;
21 	unsigned event_type;
22 };
23 
24 struct kvm_pmu_ops {
25 	unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
26 	unsigned (*find_fixed_event)(int idx);
27 	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
28 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
29 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
30 					  u64 *mask);
31 	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
32 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
33 	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
34 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
35 	void (*refresh)(struct kvm_vcpu *vcpu);
36 	void (*init)(struct kvm_vcpu *vcpu);
37 	void (*reset)(struct kvm_vcpu *vcpu);
38 };
39 
pmc_bitmask(struct kvm_pmc * pmc)40 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
41 {
42 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
43 
44 	return pmu->counter_bitmask[pmc->type];
45 }
46 
pmc_read_counter(struct kvm_pmc * pmc)47 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
48 {
49 	u64 counter, enabled, running;
50 
51 	counter = pmc->counter;
52 	if (pmc->perf_event)
53 		counter += perf_event_read_value(pmc->perf_event,
54 						 &enabled, &running);
55 	/* FIXME: Scaling needed? */
56 	return counter & pmc_bitmask(pmc);
57 }
58 
pmc_stop_counter(struct kvm_pmc * pmc)59 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
60 {
61 	if (pmc->perf_event) {
62 		pmc->counter = pmc_read_counter(pmc);
63 		perf_event_release_kernel(pmc->perf_event);
64 		pmc->perf_event = NULL;
65 	}
66 }
67 
pmc_is_gp(struct kvm_pmc * pmc)68 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
69 {
70 	return pmc->type == KVM_PMC_GP;
71 }
72 
pmc_is_fixed(struct kvm_pmc * pmc)73 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
74 {
75 	return pmc->type == KVM_PMC_FIXED;
76 }
77 
pmc_is_enabled(struct kvm_pmc * pmc)78 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
79 {
80 	return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
81 }
82 
83 /* returns general purpose PMC with the specified MSR. Note that it can be
84  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
85  * paramenter to tell them apart.
86  */
get_gp_pmc(struct kvm_pmu * pmu,u32 msr,u32 base)87 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
88 					 u32 base)
89 {
90 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
91 		u32 index = array_index_nospec(msr - base,
92 					       pmu->nr_arch_gp_counters);
93 
94 		return &pmu->gp_counters[index];
95 	}
96 
97 	return NULL;
98 }
99 
100 /* returns fixed PMC with the specified MSR */
get_fixed_pmc(struct kvm_pmu * pmu,u32 msr)101 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
102 {
103 	int base = MSR_CORE_PERF_FIXED_CTR0;
104 
105 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
106 		u32 index = array_index_nospec(msr - base,
107 					       pmu->nr_arch_fixed_counters);
108 
109 		return &pmu->fixed_counters[index];
110 	}
111 
112 	return NULL;
113 }
114 
115 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
116 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
117 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
118 
119 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
120 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
121 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
122 int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
123 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
124 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
125 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
126 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
127 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
128 void kvm_pmu_init(struct kvm_vcpu *vcpu);
129 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
130 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
131 
132 bool is_vmware_backdoor_pmc(u32 pmc_idx);
133 
134 extern struct kvm_pmu_ops intel_pmu_ops;
135 extern struct kvm_pmu_ops amd_pmu_ops;
136 #endif /* __KVM_X86_PMU_H */
137