• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4 
5 #include <linux/nospec.h>
6 
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
10 
11 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
12 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
13 
14 #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
15 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
16 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
17 
18 #define MAX_FIXED_COUNTERS	3
19 
20 struct kvm_event_hw_type_mapping {
21 	u8 eventsel;
22 	u8 unit_mask;
23 	unsigned event_type;
24 };
25 
26 struct kvm_pmu_ops {
27 	unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
28 	unsigned (*find_fixed_event)(int idx);
29 	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
30 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
31 	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
32 		unsigned int idx, u64 *mask);
33 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
34 	int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
35 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
36 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
37 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 	void (*refresh)(struct kvm_vcpu *vcpu);
39 	void (*init)(struct kvm_vcpu *vcpu);
40 	void (*reset)(struct kvm_vcpu *vcpu);
41 	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
42 	void (*cleanup)(struct kvm_vcpu *vcpu);
43 };
44 
pmc_bitmask(struct kvm_pmc * pmc)45 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
46 {
47 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
48 
49 	return pmu->counter_bitmask[pmc->type];
50 }
51 
pmc_read_counter(struct kvm_pmc * pmc)52 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
53 {
54 	u64 counter, enabled, running;
55 
56 	counter = pmc->counter;
57 	if (pmc->perf_event && !pmc->is_paused)
58 		counter += perf_event_read_value(pmc->perf_event,
59 						 &enabled, &running);
60 	/* FIXME: Scaling needed? */
61 	return counter & pmc_bitmask(pmc);
62 }
63 
pmc_release_perf_event(struct kvm_pmc * pmc)64 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
65 {
66 	if (pmc->perf_event) {
67 		perf_event_release_kernel(pmc->perf_event);
68 		pmc->perf_event = NULL;
69 		pmc->current_config = 0;
70 		pmc_to_pmu(pmc)->event_count--;
71 	}
72 }
73 
pmc_stop_counter(struct kvm_pmc * pmc)74 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
75 {
76 	if (pmc->perf_event) {
77 		pmc->counter = pmc_read_counter(pmc);
78 		pmc_release_perf_event(pmc);
79 	}
80 }
81 
pmc_is_gp(struct kvm_pmc * pmc)82 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
83 {
84 	return pmc->type == KVM_PMC_GP;
85 }
86 
pmc_is_fixed(struct kvm_pmc * pmc)87 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
88 {
89 	return pmc->type == KVM_PMC_FIXED;
90 }
91 
pmc_is_enabled(struct kvm_pmc * pmc)92 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
93 {
94 	return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
95 }
96 
kvm_valid_perf_global_ctrl(struct kvm_pmu * pmu,u64 data)97 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
98 						 u64 data)
99 {
100 	return !(pmu->global_ctrl_mask & data);
101 }
102 
103 /* returns general purpose PMC with the specified MSR. Note that it can be
104  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
105  * parameter to tell them apart.
106  */
get_gp_pmc(struct kvm_pmu * pmu,u32 msr,u32 base)107 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
108 					 u32 base)
109 {
110 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
111 		u32 index = array_index_nospec(msr - base,
112 					       pmu->nr_arch_gp_counters);
113 
114 		return &pmu->gp_counters[index];
115 	}
116 
117 	return NULL;
118 }
119 
120 /* returns fixed PMC with the specified MSR */
get_fixed_pmc(struct kvm_pmu * pmu,u32 msr)121 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
122 {
123 	int base = MSR_CORE_PERF_FIXED_CTR0;
124 
125 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
126 		u32 index = array_index_nospec(msr - base,
127 					       pmu->nr_arch_fixed_counters);
128 
129 		return &pmu->fixed_counters[index];
130 	}
131 
132 	return NULL;
133 }
134 
get_sample_period(struct kvm_pmc * pmc,u64 counter_value)135 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
136 {
137 	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
138 
139 	if (!sample_period)
140 		sample_period = pmc_bitmask(pmc) + 1;
141 	return sample_period;
142 }
143 
pmc_update_sample_period(struct kvm_pmc * pmc)144 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
145 {
146 	if (!pmc->perf_event || pmc->is_paused)
147 		return;
148 
149 	perf_event_period(pmc->perf_event,
150 			  get_sample_period(pmc, pmc->counter));
151 }
152 
153 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
154 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
155 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
156 
157 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
158 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
159 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
160 int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
161 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
162 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
163 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
164 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
165 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
166 void kvm_pmu_init(struct kvm_vcpu *vcpu);
167 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
168 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
169 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
170 
171 bool is_vmware_backdoor_pmc(u32 pmc_idx);
172 
173 extern struct kvm_pmu_ops intel_pmu_ops;
174 extern struct kvm_pmu_ops amd_pmu_ops;
175 #endif /* __KVM_X86_PMU_H */
176