1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4
5 #include <linux/nospec.h>
6
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
10
11 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
12 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
13
14 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
15 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
16 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
17
18 #define MAX_FIXED_COUNTERS 3
19
20 struct kvm_event_hw_type_mapping {
21 u8 eventsel;
22 u8 unit_mask;
23 unsigned event_type;
24 };
25
26 struct kvm_pmu_ops {
27 unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
28 unsigned (*find_fixed_event)(int idx);
29 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
30 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
31 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
32 unsigned int idx, u64 *mask);
33 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
34 int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
35 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
36 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
37 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 void (*refresh)(struct kvm_vcpu *vcpu);
39 void (*init)(struct kvm_vcpu *vcpu);
40 void (*reset)(struct kvm_vcpu *vcpu);
41 };
42
pmc_bitmask(struct kvm_pmc * pmc)43 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
44 {
45 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
46
47 return pmu->counter_bitmask[pmc->type];
48 }
49
pmc_read_counter(struct kvm_pmc * pmc)50 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
51 {
52 u64 counter, enabled, running;
53
54 counter = pmc->counter;
55 if (pmc->perf_event)
56 counter += perf_event_read_value(pmc->perf_event,
57 &enabled, &running);
58 /* FIXME: Scaling needed? */
59 return counter & pmc_bitmask(pmc);
60 }
61
pmc_release_perf_event(struct kvm_pmc * pmc)62 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
63 {
64 if (pmc->perf_event) {
65 perf_event_release_kernel(pmc->perf_event);
66 pmc->perf_event = NULL;
67 pmc->current_config = 0;
68 pmc_to_pmu(pmc)->event_count--;
69 }
70 }
71
pmc_stop_counter(struct kvm_pmc * pmc)72 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
73 {
74 if (pmc->perf_event) {
75 pmc->counter = pmc_read_counter(pmc);
76 pmc_release_perf_event(pmc);
77 }
78 }
79
pmc_is_gp(struct kvm_pmc * pmc)80 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
81 {
82 return pmc->type == KVM_PMC_GP;
83 }
84
pmc_is_fixed(struct kvm_pmc * pmc)85 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
86 {
87 return pmc->type == KVM_PMC_FIXED;
88 }
89
pmc_is_enabled(struct kvm_pmc * pmc)90 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
91 {
92 return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
93 }
94
kvm_valid_perf_global_ctrl(struct kvm_pmu * pmu,u64 data)95 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
96 u64 data)
97 {
98 return !(pmu->global_ctrl_mask & data);
99 }
100
101 /* returns general purpose PMC with the specified MSR. Note that it can be
102 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
103 * paramenter to tell them apart.
104 */
get_gp_pmc(struct kvm_pmu * pmu,u32 msr,u32 base)105 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
106 u32 base)
107 {
108 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
109 u32 index = array_index_nospec(msr - base,
110 pmu->nr_arch_gp_counters);
111
112 return &pmu->gp_counters[index];
113 }
114
115 return NULL;
116 }
117
118 /* returns fixed PMC with the specified MSR */
get_fixed_pmc(struct kvm_pmu * pmu,u32 msr)119 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
120 {
121 int base = MSR_CORE_PERF_FIXED_CTR0;
122
123 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
124 u32 index = array_index_nospec(msr - base,
125 pmu->nr_arch_fixed_counters);
126
127 return &pmu->fixed_counters[index];
128 }
129
130 return NULL;
131 }
132
get_sample_period(struct kvm_pmc * pmc,u64 counter_value)133 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
134 {
135 u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
136
137 if (!sample_period)
138 sample_period = pmc_bitmask(pmc) + 1;
139 return sample_period;
140 }
141
142 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
143 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
144 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
145
146 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
147 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
148 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
149 int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
150 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
151 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
152 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
153 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
154 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
155 void kvm_pmu_init(struct kvm_vcpu *vcpu);
156 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
157 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
158 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
159
160 bool is_vmware_backdoor_pmc(u32 pmc_idx);
161
162 extern struct kvm_pmu_ops intel_pmu_ops;
163 extern struct kvm_pmu_ops amd_pmu_ops;
164 #endif /* __KVM_X86_PMU_H */
165