1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM PMU support for AMD
4 *
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 * Wei Huang <wei@redhat.com>
9 *
10 * Implementation is based on pmu_intel.c file
11 */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19
20 enum pmu_type {
21 PMU_TYPE_COUNTER = 0,
22 PMU_TYPE_EVNTSEL,
23 };
24
25 enum index {
26 INDEX_ZERO = 0,
27 INDEX_ONE,
28 INDEX_TWO,
29 INDEX_THREE,
30 INDEX_FOUR,
31 INDEX_FIVE,
32 INDEX_ERROR,
33 };
34
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
45 };
46
47 /* duplicated from amd_f17h_perfmon_event_map. */
48 static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
49 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
50 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
51 [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
52 [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
53 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
54 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
55 [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
56 [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
57 };
58
59 /* amd_pmc_perf_hw_id depends on these being the same size */
60 static_assert(ARRAY_SIZE(amd_event_mapping) ==
61 ARRAY_SIZE(amd_f17h_event_mapping));
62
get_msr_base(struct kvm_pmu * pmu,enum pmu_type type)63 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
64 {
65 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
66
67 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
68 if (type == PMU_TYPE_COUNTER)
69 return MSR_F15H_PERF_CTR;
70 else
71 return MSR_F15H_PERF_CTL;
72 } else {
73 if (type == PMU_TYPE_COUNTER)
74 return MSR_K7_PERFCTR0;
75 else
76 return MSR_K7_EVNTSEL0;
77 }
78 }
79
msr_to_index(u32 msr)80 static enum index msr_to_index(u32 msr)
81 {
82 switch (msr) {
83 case MSR_F15H_PERF_CTL0:
84 case MSR_F15H_PERF_CTR0:
85 case MSR_K7_EVNTSEL0:
86 case MSR_K7_PERFCTR0:
87 return INDEX_ZERO;
88 case MSR_F15H_PERF_CTL1:
89 case MSR_F15H_PERF_CTR1:
90 case MSR_K7_EVNTSEL1:
91 case MSR_K7_PERFCTR1:
92 return INDEX_ONE;
93 case MSR_F15H_PERF_CTL2:
94 case MSR_F15H_PERF_CTR2:
95 case MSR_K7_EVNTSEL2:
96 case MSR_K7_PERFCTR2:
97 return INDEX_TWO;
98 case MSR_F15H_PERF_CTL3:
99 case MSR_F15H_PERF_CTR3:
100 case MSR_K7_EVNTSEL3:
101 case MSR_K7_PERFCTR3:
102 return INDEX_THREE;
103 case MSR_F15H_PERF_CTL4:
104 case MSR_F15H_PERF_CTR4:
105 return INDEX_FOUR;
106 case MSR_F15H_PERF_CTL5:
107 case MSR_F15H_PERF_CTR5:
108 return INDEX_FIVE;
109 default:
110 return INDEX_ERROR;
111 }
112 }
113
get_gp_pmc_amd(struct kvm_pmu * pmu,u32 msr,enum pmu_type type)114 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
115 enum pmu_type type)
116 {
117 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
118
119 switch (msr) {
120 case MSR_F15H_PERF_CTL0:
121 case MSR_F15H_PERF_CTL1:
122 case MSR_F15H_PERF_CTL2:
123 case MSR_F15H_PERF_CTL3:
124 case MSR_F15H_PERF_CTL4:
125 case MSR_F15H_PERF_CTL5:
126 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
127 return NULL;
128 fallthrough;
129 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
130 if (type != PMU_TYPE_EVNTSEL)
131 return NULL;
132 break;
133 case MSR_F15H_PERF_CTR0:
134 case MSR_F15H_PERF_CTR1:
135 case MSR_F15H_PERF_CTR2:
136 case MSR_F15H_PERF_CTR3:
137 case MSR_F15H_PERF_CTR4:
138 case MSR_F15H_PERF_CTR5:
139 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
140 return NULL;
141 fallthrough;
142 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
143 if (type != PMU_TYPE_COUNTER)
144 return NULL;
145 break;
146 default:
147 return NULL;
148 }
149
150 return &pmu->gp_counters[msr_to_index(msr)];
151 }
152
amd_pmc_perf_hw_id(struct kvm_pmc * pmc)153 static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
154 {
155 struct kvm_event_hw_type_mapping *event_mapping;
156 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
157 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
158 int i;
159
160 if (guest_cpuid_family(pmc->vcpu) >= 0x17)
161 event_mapping = amd_f17h_event_mapping;
162 else
163 event_mapping = amd_event_mapping;
164
165 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
166 if (event_mapping[i].eventsel == event_select
167 && event_mapping[i].unit_mask == unit_mask)
168 break;
169
170 if (i == ARRAY_SIZE(amd_event_mapping))
171 return PERF_COUNT_HW_MAX;
172
173 return event_mapping[i].event_type;
174 }
175
176 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
amd_find_fixed_event(int idx)177 static unsigned amd_find_fixed_event(int idx)
178 {
179 return PERF_COUNT_HW_MAX;
180 }
181
182 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
183 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
184 */
amd_pmc_is_enabled(struct kvm_pmc * pmc)185 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
186 {
187 return true;
188 }
189
amd_pmc_idx_to_pmc(struct kvm_pmu * pmu,int pmc_idx)190 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
191 {
192 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
193 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
194
195 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
196 /*
197 * The idx is contiguous. The MSRs are not. The counter MSRs
198 * are interleaved with the event select MSRs.
199 */
200 pmc_idx *= 2;
201 }
202
203 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
204 }
205
206 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
amd_is_valid_rdpmc_ecx(struct kvm_vcpu * vcpu,unsigned int idx)207 static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
208 {
209 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
210
211 idx &= ~(3u << 30);
212
213 return (idx >= pmu->nr_arch_gp_counters);
214 }
215
216 /* idx is the ECX register of RDPMC instruction */
amd_rdpmc_ecx_to_pmc(struct kvm_vcpu * vcpu,unsigned int idx,u64 * mask)217 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
218 unsigned int idx, u64 *mask)
219 {
220 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
221 struct kvm_pmc *counters;
222
223 idx &= ~(3u << 30);
224 if (idx >= pmu->nr_arch_gp_counters)
225 return NULL;
226 counters = pmu->gp_counters;
227
228 return &counters[idx];
229 }
230
amd_is_valid_msr(struct kvm_vcpu * vcpu,u32 msr)231 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
232 {
233 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
234 return false;
235 }
236
amd_msr_idx_to_pmc(struct kvm_vcpu * vcpu,u32 msr)237 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
238 {
239 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
240 struct kvm_pmc *pmc;
241
242 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
243 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
244
245 return pmc;
246 }
247
amd_pmu_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)248 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
249 {
250 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
251 struct kvm_pmc *pmc;
252 u32 msr = msr_info->index;
253
254 /* MSR_PERFCTRn */
255 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
256 if (pmc) {
257 msr_info->data = pmc_read_counter(pmc);
258 return 0;
259 }
260 /* MSR_EVNTSELn */
261 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
262 if (pmc) {
263 msr_info->data = pmc->eventsel;
264 return 0;
265 }
266
267 return 1;
268 }
269
amd_pmu_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)270 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
271 {
272 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
273 struct kvm_pmc *pmc;
274 u32 msr = msr_info->index;
275 u64 data = msr_info->data;
276
277 /* MSR_PERFCTRn */
278 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
279 if (pmc) {
280 pmc->counter += data - pmc_read_counter(pmc);
281 pmc_update_sample_period(pmc);
282 return 0;
283 }
284 /* MSR_EVNTSELn */
285 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
286 if (pmc) {
287 data &= ~pmu->reserved_bits;
288 if (data != pmc->eventsel)
289 reprogram_gp_counter(pmc, data);
290 return 0;
291 }
292
293 return 1;
294 }
295
amd_pmu_refresh(struct kvm_vcpu * vcpu)296 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
297 {
298 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
299
300 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
301 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
302 else
303 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
304
305 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
306 pmu->reserved_bits = 0xfffffff000280000ull;
307 pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
308 pmu->version = 1;
309 /* not applicable to AMD; but clean them to prevent any fall out */
310 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
311 pmu->nr_arch_fixed_counters = 0;
312 pmu->global_status = 0;
313 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
314 }
315
amd_pmu_init(struct kvm_vcpu * vcpu)316 static void amd_pmu_init(struct kvm_vcpu *vcpu)
317 {
318 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
319 int i;
320
321 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
322
323 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
324 pmu->gp_counters[i].type = KVM_PMC_GP;
325 pmu->gp_counters[i].vcpu = vcpu;
326 pmu->gp_counters[i].idx = i;
327 pmu->gp_counters[i].current_config = 0;
328 }
329 }
330
amd_pmu_reset(struct kvm_vcpu * vcpu)331 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
332 {
333 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
334 int i;
335
336 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
337 struct kvm_pmc *pmc = &pmu->gp_counters[i];
338
339 pmc_stop_counter(pmc);
340 pmc->counter = pmc->eventsel = 0;
341 }
342 }
343
344 struct kvm_pmu_ops amd_pmu_ops = {
345 .pmc_perf_hw_id = amd_pmc_perf_hw_id,
346 .find_fixed_event = amd_find_fixed_event,
347 .pmc_is_enabled = amd_pmc_is_enabled,
348 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
349 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
350 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
351 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
352 .is_valid_msr = amd_is_valid_msr,
353 .get_msr = amd_pmu_get_msr,
354 .set_msr = amd_pmu_set_msr,
355 .refresh = amd_pmu_refresh,
356 .init = amd_pmu_init,
357 .reset = amd_pmu_reset,
358 };
359