• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19 
20 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
21 	/* Index must match CPUID 0x0A.EBX bit vector */
22 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
23 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
24 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
25 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
26 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
27 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
28 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
29 	[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
30 };
31 
32 /* mapping between fixed pmc index and intel_arch_events array */
33 static int fixed_pmc_events[] = {1, 0, 7};
34 
reprogram_fixed_counters(struct kvm_pmu * pmu,u64 data)35 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
36 {
37 	int i;
38 
39 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
40 		u8 new_ctrl = fixed_ctrl_field(data, i);
41 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
42 		struct kvm_pmc *pmc;
43 
44 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
45 
46 		if (old_ctrl == new_ctrl)
47 			continue;
48 
49 		reprogram_fixed_counter(pmc, new_ctrl, i);
50 	}
51 
52 	pmu->fixed_ctr_ctrl = data;
53 }
54 
55 /* function is called when global control register has been updated. */
global_ctrl_changed(struct kvm_pmu * pmu,u64 data)56 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
57 {
58 	int bit;
59 	u64 diff = pmu->global_ctrl ^ data;
60 
61 	pmu->global_ctrl = data;
62 
63 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
64 		reprogram_counter(pmu, bit);
65 }
66 
intel_pmc_perf_hw_id(struct kvm_pmc * pmc)67 static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
68 {
69 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
70 	u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
71 	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
72 	int i;
73 
74 	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
75 		if (intel_arch_events[i].eventsel == event_select
76 		    && intel_arch_events[i].unit_mask == unit_mask
77 		    && (pmu->available_event_types & (1 << i)))
78 			break;
79 
80 	if (i == ARRAY_SIZE(intel_arch_events))
81 		return PERF_COUNT_HW_MAX;
82 
83 	return intel_arch_events[i].event_type;
84 }
85 
intel_find_fixed_event(int idx)86 static unsigned intel_find_fixed_event(int idx)
87 {
88 	u32 event;
89 	size_t size = ARRAY_SIZE(fixed_pmc_events);
90 
91 	if (idx >= size)
92 		return PERF_COUNT_HW_MAX;
93 
94 	event = fixed_pmc_events[array_index_nospec(idx, size)];
95 	return intel_arch_events[event].event_type;
96 }
97 
98 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
intel_pmc_is_enabled(struct kvm_pmc * pmc)99 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
100 {
101 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
102 
103 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
104 }
105 
intel_pmc_idx_to_pmc(struct kvm_pmu * pmu,int pmc_idx)106 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
107 {
108 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
109 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
110 				  MSR_P6_EVNTSEL0);
111 	else {
112 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
113 
114 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
115 	}
116 }
117 
118 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
intel_is_valid_msr_idx(struct kvm_vcpu * vcpu,unsigned idx)119 static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
120 {
121 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
122 	bool fixed = idx & (1u << 30);
123 
124 	idx &= ~(3u << 30);
125 
126 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
127 		(fixed && idx >= pmu->nr_arch_fixed_counters);
128 }
129 
intel_msr_idx_to_pmc(struct kvm_vcpu * vcpu,unsigned idx,u64 * mask)130 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
131 					    unsigned idx, u64 *mask)
132 {
133 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
134 	bool fixed = idx & (1u << 30);
135 	struct kvm_pmc *counters;
136 	unsigned int num_counters;
137 
138 	idx &= ~(3u << 30);
139 	if (fixed) {
140 		counters = pmu->fixed_counters;
141 		num_counters = pmu->nr_arch_fixed_counters;
142 	} else {
143 		counters = pmu->gp_counters;
144 		num_counters = pmu->nr_arch_gp_counters;
145 	}
146 	if (idx >= num_counters)
147 		return NULL;
148 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
149 	return &counters[array_index_nospec(idx, num_counters)];
150 }
151 
intel_is_valid_msr(struct kvm_vcpu * vcpu,u32 msr)152 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
153 {
154 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
155 	int ret;
156 
157 	switch (msr) {
158 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
159 	case MSR_CORE_PERF_GLOBAL_STATUS:
160 	case MSR_CORE_PERF_GLOBAL_CTRL:
161 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
162 		ret = pmu->version > 1;
163 		break;
164 	default:
165 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
166 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
167 			get_fixed_pmc(pmu, msr);
168 		break;
169 	}
170 
171 	return ret;
172 }
173 
intel_pmu_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * data)174 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
175 {
176 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
177 	struct kvm_pmc *pmc;
178 
179 	switch (msr) {
180 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
181 		*data = pmu->fixed_ctr_ctrl;
182 		return 0;
183 	case MSR_CORE_PERF_GLOBAL_STATUS:
184 		*data = pmu->global_status;
185 		return 0;
186 	case MSR_CORE_PERF_GLOBAL_CTRL:
187 		*data = pmu->global_ctrl;
188 		return 0;
189 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
190 		*data = pmu->global_ovf_ctrl;
191 		return 0;
192 	default:
193 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
194 			u64 val = pmc_read_counter(pmc);
195 			*data = val & pmu->counter_bitmask[KVM_PMC_GP];
196 			return 0;
197 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
198 			u64 val = pmc_read_counter(pmc);
199 			*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
200 			return 0;
201 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
202 			*data = pmc->eventsel;
203 			return 0;
204 		}
205 	}
206 
207 	return 1;
208 }
209 
intel_pmu_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)210 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
211 {
212 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
213 	struct kvm_pmc *pmc;
214 	u32 msr = msr_info->index;
215 	u64 data = msr_info->data;
216 
217 	switch (msr) {
218 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
219 		if (pmu->fixed_ctr_ctrl == data)
220 			return 0;
221 		if (!(data & 0xfffffffffffff444ull)) {
222 			reprogram_fixed_counters(pmu, data);
223 			return 0;
224 		}
225 		break;
226 	case MSR_CORE_PERF_GLOBAL_STATUS:
227 		if (msr_info->host_initiated) {
228 			pmu->global_status = data;
229 			return 0;
230 		}
231 		break; /* RO MSR */
232 	case MSR_CORE_PERF_GLOBAL_CTRL:
233 		if (pmu->global_ctrl == data)
234 			return 0;
235 		if (!(data & pmu->global_ctrl_mask)) {
236 			global_ctrl_changed(pmu, data);
237 			return 0;
238 		}
239 		break;
240 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
241 		if (!(data & pmu->global_ovf_ctrl_mask)) {
242 			if (!msr_info->host_initiated)
243 				pmu->global_status &= ~data;
244 			pmu->global_ovf_ctrl = data;
245 			return 0;
246 		}
247 		break;
248 	default:
249 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
250 			if (msr_info->host_initiated)
251 				pmc->counter = data;
252 			else
253 				pmc->counter = (s32)data;
254 			return 0;
255 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
256 			pmc->counter = data;
257 			return 0;
258 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
259 			if (data == pmc->eventsel)
260 				return 0;
261 			if (!(data & pmu->reserved_bits)) {
262 				reprogram_gp_counter(pmc, data);
263 				return 0;
264 			}
265 		}
266 	}
267 
268 	return 1;
269 }
270 
intel_pmu_refresh(struct kvm_vcpu * vcpu)271 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
272 {
273 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
274 	struct x86_pmu_capability x86_pmu;
275 	struct kvm_cpuid_entry2 *entry;
276 	union cpuid10_eax eax;
277 	union cpuid10_edx edx;
278 
279 	pmu->nr_arch_gp_counters = 0;
280 	pmu->nr_arch_fixed_counters = 0;
281 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
282 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
283 	pmu->version = 0;
284 	pmu->reserved_bits = 0xffffffff00200000ull;
285 
286 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
287 	if (!entry)
288 		return;
289 	eax.full = entry->eax;
290 	edx.full = entry->edx;
291 
292 	pmu->version = eax.split.version_id;
293 	if (!pmu->version)
294 		return;
295 
296 	perf_get_x86_pmu_capability(&x86_pmu);
297 
298 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
299 					 x86_pmu.num_counters_gp);
300 	eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
301 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
302 	eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
303 	pmu->available_event_types = ~entry->ebx &
304 					((1ull << eax.split.mask_length) - 1);
305 
306 	if (pmu->version == 1) {
307 		pmu->nr_arch_fixed_counters = 0;
308 	} else {
309 		pmu->nr_arch_fixed_counters =
310 			min_t(int, edx.split.num_counters_fixed,
311 			      x86_pmu.num_counters_fixed);
312 		edx.split.bit_width_fixed = min_t(int,
313 			edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
314 		pmu->counter_bitmask[KVM_PMC_FIXED] =
315 			((u64)1 << edx.split.bit_width_fixed) - 1;
316 	}
317 
318 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
319 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
320 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
321 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
322 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
323 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
324 	if (kvm_x86_ops->pt_supported())
325 		pmu->global_ovf_ctrl_mask &=
326 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
327 
328 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
329 	if (entry &&
330 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
331 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
332 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
333 }
334 
intel_pmu_init(struct kvm_vcpu * vcpu)335 static void intel_pmu_init(struct kvm_vcpu *vcpu)
336 {
337 	int i;
338 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
339 
340 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
341 		pmu->gp_counters[i].type = KVM_PMC_GP;
342 		pmu->gp_counters[i].vcpu = vcpu;
343 		pmu->gp_counters[i].idx = i;
344 	}
345 
346 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
347 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
348 		pmu->fixed_counters[i].vcpu = vcpu;
349 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
350 	}
351 }
352 
intel_pmu_reset(struct kvm_vcpu * vcpu)353 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
354 {
355 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
356 	struct kvm_pmc *pmc = NULL;
357 	int i;
358 
359 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
360 		pmc = &pmu->gp_counters[i];
361 
362 		pmc_stop_counter(pmc);
363 		pmc->counter = pmc->eventsel = 0;
364 	}
365 
366 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
367 		pmc = &pmu->fixed_counters[i];
368 
369 		pmc_stop_counter(pmc);
370 		pmc->counter = 0;
371 	}
372 
373 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
374 		pmu->global_ovf_ctrl = 0;
375 }
376 
377 struct kvm_pmu_ops intel_pmu_ops = {
378 	.pmc_perf_hw_id = intel_pmc_perf_hw_id,
379 	.find_fixed_event = intel_find_fixed_event,
380 	.pmc_is_enabled = intel_pmc_is_enabled,
381 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
382 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
383 	.is_valid_msr_idx = intel_is_valid_msr_idx,
384 	.is_valid_msr = intel_is_valid_msr,
385 	.get_msr = intel_pmu_get_msr,
386 	.set_msr = intel_pmu_set_msr,
387 	.refresh = intel_pmu_refresh,
388 	.init = intel_pmu_init,
389 	.reset = intel_pmu_reset,
390 };
391