• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
4 
5 #include <asm/perf_event.h>
6 #include <asm/msr.h>
7 #include <asm/insn.h>
8 
9 #include "../perf_event.h"
10 
11 static const enum {
12 	LBR_EIP_FLAGS		= 1,
13 	LBR_TSX			= 2,
14 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
15 	[LBR_FORMAT_EIP_FLAGS]  = LBR_EIP_FLAGS,
16 	[LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
17 };
18 
19 /*
20  * Intel LBR_SELECT bits
21  * Intel Vol3a, April 2011, Section 16.7 Table 16-10
22  *
23  * Hardware branch filter (not available on all CPUs)
24  */
25 #define LBR_KERNEL_BIT		0 /* do not capture at ring0 */
26 #define LBR_USER_BIT		1 /* do not capture at ring > 0 */
27 #define LBR_JCC_BIT		2 /* do not capture conditional branches */
28 #define LBR_REL_CALL_BIT	3 /* do not capture relative calls */
29 #define LBR_IND_CALL_BIT	4 /* do not capture indirect calls */
30 #define LBR_RETURN_BIT		5 /* do not capture near returns */
31 #define LBR_IND_JMP_BIT		6 /* do not capture indirect jumps */
32 #define LBR_REL_JMP_BIT		7 /* do not capture relative jumps */
33 #define LBR_FAR_BIT		8 /* do not capture far branches */
34 #define LBR_CALL_STACK_BIT	9 /* enable call stack */
35 
36 /*
37  * Following bit only exists in Linux; we mask it out before writing it to
38  * the actual MSR. But it helps the constraint perf code to understand
39  * that this is a separate configuration.
40  */
41 #define LBR_NO_INFO_BIT	       63 /* don't read LBR_INFO. */
42 
43 #define LBR_KERNEL	(1 << LBR_KERNEL_BIT)
44 #define LBR_USER	(1 << LBR_USER_BIT)
45 #define LBR_JCC		(1 << LBR_JCC_BIT)
46 #define LBR_REL_CALL	(1 << LBR_REL_CALL_BIT)
47 #define LBR_IND_CALL	(1 << LBR_IND_CALL_BIT)
48 #define LBR_RETURN	(1 << LBR_RETURN_BIT)
49 #define LBR_REL_JMP	(1 << LBR_REL_JMP_BIT)
50 #define LBR_IND_JMP	(1 << LBR_IND_JMP_BIT)
51 #define LBR_FAR		(1 << LBR_FAR_BIT)
52 #define LBR_CALL_STACK	(1 << LBR_CALL_STACK_BIT)
53 #define LBR_NO_INFO	(1ULL << LBR_NO_INFO_BIT)
54 
55 #define LBR_PLM (LBR_KERNEL | LBR_USER)
56 
57 #define LBR_SEL_MASK	0x3ff	/* valid bits in LBR_SELECT */
58 #define LBR_NOT_SUPP	-1	/* LBR filter not supported */
59 #define LBR_IGN		0	/* ignored */
60 
61 #define LBR_ANY		 \
62 	(LBR_JCC	|\
63 	 LBR_REL_CALL	|\
64 	 LBR_IND_CALL	|\
65 	 LBR_RETURN	|\
66 	 LBR_REL_JMP	|\
67 	 LBR_IND_JMP	|\
68 	 LBR_FAR)
69 
70 #define LBR_FROM_FLAG_MISPRED	BIT_ULL(63)
71 #define LBR_FROM_FLAG_IN_TX	BIT_ULL(62)
72 #define LBR_FROM_FLAG_ABORT	BIT_ULL(61)
73 
74 #define LBR_FROM_SIGNEXT_2MSB	(BIT_ULL(60) | BIT_ULL(59))
75 
76 /*
77  * x86control flow change classification
78  * x86control flow changes include branches, interrupts, traps, faults
79  */
80 enum {
81 	X86_BR_NONE		= 0,      /* unknown */
82 
83 	X86_BR_USER		= 1 << 0, /* branch target is user */
84 	X86_BR_KERNEL		= 1 << 1, /* branch target is kernel */
85 
86 	X86_BR_CALL		= 1 << 2, /* call */
87 	X86_BR_RET		= 1 << 3, /* return */
88 	X86_BR_SYSCALL		= 1 << 4, /* syscall */
89 	X86_BR_SYSRET		= 1 << 5, /* syscall return */
90 	X86_BR_INT		= 1 << 6, /* sw interrupt */
91 	X86_BR_IRET		= 1 << 7, /* return from interrupt */
92 	X86_BR_JCC		= 1 << 8, /* conditional */
93 	X86_BR_JMP		= 1 << 9, /* jump */
94 	X86_BR_IRQ		= 1 << 10,/* hw interrupt or trap or fault */
95 	X86_BR_IND_CALL		= 1 << 11,/* indirect calls */
96 	X86_BR_ABORT		= 1 << 12,/* transaction abort */
97 	X86_BR_IN_TX		= 1 << 13,/* in transaction */
98 	X86_BR_NO_TX		= 1 << 14,/* not in transaction */
99 	X86_BR_ZERO_CALL	= 1 << 15,/* zero length call */
100 	X86_BR_CALL_STACK	= 1 << 16,/* call stack */
101 	X86_BR_IND_JMP		= 1 << 17,/* indirect jump */
102 
103 	X86_BR_TYPE_SAVE	= 1 << 18,/* indicate to save branch type */
104 
105 };
106 
107 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
108 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
109 
110 #define X86_BR_ANY       \
111 	(X86_BR_CALL    |\
112 	 X86_BR_RET     |\
113 	 X86_BR_SYSCALL |\
114 	 X86_BR_SYSRET  |\
115 	 X86_BR_INT     |\
116 	 X86_BR_IRET    |\
117 	 X86_BR_JCC     |\
118 	 X86_BR_JMP	 |\
119 	 X86_BR_IRQ	 |\
120 	 X86_BR_ABORT	 |\
121 	 X86_BR_IND_CALL |\
122 	 X86_BR_IND_JMP  |\
123 	 X86_BR_ZERO_CALL)
124 
125 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
126 
127 #define X86_BR_ANY_CALL		 \
128 	(X86_BR_CALL		|\
129 	 X86_BR_IND_CALL	|\
130 	 X86_BR_ZERO_CALL	|\
131 	 X86_BR_SYSCALL		|\
132 	 X86_BR_IRQ		|\
133 	 X86_BR_INT)
134 
135 /*
136  * Intel LBR_CTL bits
137  *
138  * Hardware branch filter for Arch LBR
139  */
140 #define ARCH_LBR_KERNEL_BIT		1  /* capture at ring0 */
141 #define ARCH_LBR_USER_BIT		2  /* capture at ring > 0 */
142 #define ARCH_LBR_CALL_STACK_BIT		3  /* enable call stack */
143 #define ARCH_LBR_JCC_BIT		16 /* capture conditional branches */
144 #define ARCH_LBR_REL_JMP_BIT		17 /* capture relative jumps */
145 #define ARCH_LBR_IND_JMP_BIT		18 /* capture indirect jumps */
146 #define ARCH_LBR_REL_CALL_BIT		19 /* capture relative calls */
147 #define ARCH_LBR_IND_CALL_BIT		20 /* capture indirect calls */
148 #define ARCH_LBR_RETURN_BIT		21 /* capture near returns */
149 #define ARCH_LBR_OTHER_BRANCH_BIT	22 /* capture other branches */
150 
151 #define ARCH_LBR_KERNEL			(1ULL << ARCH_LBR_KERNEL_BIT)
152 #define ARCH_LBR_USER			(1ULL << ARCH_LBR_USER_BIT)
153 #define ARCH_LBR_CALL_STACK		(1ULL << ARCH_LBR_CALL_STACK_BIT)
154 #define ARCH_LBR_JCC			(1ULL << ARCH_LBR_JCC_BIT)
155 #define ARCH_LBR_REL_JMP		(1ULL << ARCH_LBR_REL_JMP_BIT)
156 #define ARCH_LBR_IND_JMP		(1ULL << ARCH_LBR_IND_JMP_BIT)
157 #define ARCH_LBR_REL_CALL		(1ULL << ARCH_LBR_REL_CALL_BIT)
158 #define ARCH_LBR_IND_CALL		(1ULL << ARCH_LBR_IND_CALL_BIT)
159 #define ARCH_LBR_RETURN			(1ULL << ARCH_LBR_RETURN_BIT)
160 #define ARCH_LBR_OTHER_BRANCH		(1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
161 
162 #define ARCH_LBR_ANY			 \
163 	(ARCH_LBR_JCC			|\
164 	 ARCH_LBR_REL_JMP		|\
165 	 ARCH_LBR_IND_JMP		|\
166 	 ARCH_LBR_REL_CALL		|\
167 	 ARCH_LBR_IND_CALL		|\
168 	 ARCH_LBR_RETURN		|\
169 	 ARCH_LBR_OTHER_BRANCH)
170 
171 #define ARCH_LBR_CTL_MASK			0x7f000e
172 
173 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
174 
is_lbr_call_stack_bit_set(u64 config)175 static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
176 {
177 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
178 		return !!(config & ARCH_LBR_CALL_STACK);
179 
180 	return !!(config & LBR_CALL_STACK);
181 }
182 
183 /*
184  * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
185  * otherwise it becomes near impossible to get a reliable stack.
186  */
187 
__intel_pmu_lbr_enable(bool pmi)188 static void __intel_pmu_lbr_enable(bool pmi)
189 {
190 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
191 	u64 debugctl, lbr_select = 0, orig_debugctl;
192 
193 	/*
194 	 * No need to unfreeze manually, as v4 can do that as part
195 	 * of the GLOBAL_STATUS ack.
196 	 */
197 	if (pmi && x86_pmu.version >= 4)
198 		return;
199 
200 	/*
201 	 * No need to reprogram LBR_SELECT in a PMI, as it
202 	 * did not change.
203 	 */
204 	if (cpuc->lbr_sel)
205 		lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
206 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
207 		wrmsrl(MSR_LBR_SELECT, lbr_select);
208 
209 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
210 	orig_debugctl = debugctl;
211 
212 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
213 		debugctl |= DEBUGCTLMSR_LBR;
214 	/*
215 	 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
216 	 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
217 	 * may cause superfluous increase/decrease of LBR_TOS.
218 	 */
219 	if (is_lbr_call_stack_bit_set(lbr_select))
220 		debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
221 	else
222 		debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
223 
224 	if (orig_debugctl != debugctl)
225 		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
226 
227 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
228 		wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
229 }
230 
__intel_pmu_lbr_disable(void)231 static void __intel_pmu_lbr_disable(void)
232 {
233 	u64 debugctl;
234 
235 	if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
236 		wrmsrl(MSR_ARCH_LBR_CTL, 0);
237 		return;
238 	}
239 
240 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
241 	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
242 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
243 }
244 
intel_pmu_lbr_reset_32(void)245 void intel_pmu_lbr_reset_32(void)
246 {
247 	int i;
248 
249 	for (i = 0; i < x86_pmu.lbr_nr; i++)
250 		wrmsrl(x86_pmu.lbr_from + i, 0);
251 }
252 
intel_pmu_lbr_reset_64(void)253 void intel_pmu_lbr_reset_64(void)
254 {
255 	int i;
256 
257 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
258 		wrmsrl(x86_pmu.lbr_from + i, 0);
259 		wrmsrl(x86_pmu.lbr_to   + i, 0);
260 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
261 			wrmsrl(x86_pmu.lbr_info + i, 0);
262 	}
263 }
264 
intel_pmu_arch_lbr_reset(void)265 static void intel_pmu_arch_lbr_reset(void)
266 {
267 	/* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
268 	wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
269 }
270 
intel_pmu_lbr_reset(void)271 void intel_pmu_lbr_reset(void)
272 {
273 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
274 
275 	if (!x86_pmu.lbr_nr)
276 		return;
277 
278 	x86_pmu.lbr_reset();
279 
280 	cpuc->last_task_ctx = NULL;
281 	cpuc->last_log_id = 0;
282 }
283 
284 /*
285  * TOS = most recently recorded branch
286  */
intel_pmu_lbr_tos(void)287 static inline u64 intel_pmu_lbr_tos(void)
288 {
289 	u64 tos;
290 
291 	rdmsrl(x86_pmu.lbr_tos, tos);
292 	return tos;
293 }
294 
295 enum {
296 	LBR_NONE,
297 	LBR_VALID,
298 };
299 
300 /*
301  * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
302  * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
303  * TSX is not supported they have no consistent behavior:
304  *
305  *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
306  *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
307  *     part of the sign extension.
308  *
309  * Therefore, if:
310  *
311  *   1) LBR has TSX format
312  *   2) CPU has no TSX support enabled
313  *
314  * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
315  * value from rdmsr() must be converted to have a 61 bits sign extension,
316  * ignoring the TSX flags.
317  */
lbr_from_signext_quirk_needed(void)318 static inline bool lbr_from_signext_quirk_needed(void)
319 {
320 	int lbr_format = x86_pmu.intel_cap.lbr_format;
321 	bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
322 			   boot_cpu_has(X86_FEATURE_RTM);
323 
324 	return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
325 }
326 
327 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
328 
329 /* If quirk is enabled, ensure sign extension is 63 bits: */
lbr_from_signext_quirk_wr(u64 val)330 inline u64 lbr_from_signext_quirk_wr(u64 val)
331 {
332 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
333 		/*
334 		 * Sign extend into bits 61:62 while preserving bit 63.
335 		 *
336 		 * Quirk is enabled when TSX is disabled. Therefore TSX bits
337 		 * in val are always OFF and must be changed to be sign
338 		 * extension bits. Since bits 59:60 are guaranteed to be
339 		 * part of the sign extension bits, we can just copy them
340 		 * to 61:62.
341 		 */
342 		val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
343 	}
344 	return val;
345 }
346 
347 /*
348  * If quirk is needed, ensure sign extension is 61 bits:
349  */
lbr_from_signext_quirk_rd(u64 val)350 static u64 lbr_from_signext_quirk_rd(u64 val)
351 {
352 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
353 		/*
354 		 * Quirk is on when TSX is not enabled. Therefore TSX
355 		 * flags must be read as OFF.
356 		 */
357 		val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
358 	}
359 	return val;
360 }
361 
wrlbr_from(unsigned int idx,u64 val)362 static __always_inline void wrlbr_from(unsigned int idx, u64 val)
363 {
364 	val = lbr_from_signext_quirk_wr(val);
365 	wrmsrl(x86_pmu.lbr_from + idx, val);
366 }
367 
wrlbr_to(unsigned int idx,u64 val)368 static __always_inline void wrlbr_to(unsigned int idx, u64 val)
369 {
370 	wrmsrl(x86_pmu.lbr_to + idx, val);
371 }
372 
wrlbr_info(unsigned int idx,u64 val)373 static __always_inline void wrlbr_info(unsigned int idx, u64 val)
374 {
375 	wrmsrl(x86_pmu.lbr_info + idx, val);
376 }
377 
rdlbr_from(unsigned int idx,struct lbr_entry * lbr)378 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
379 {
380 	u64 val;
381 
382 	if (lbr)
383 		return lbr->from;
384 
385 	rdmsrl(x86_pmu.lbr_from + idx, val);
386 
387 	return lbr_from_signext_quirk_rd(val);
388 }
389 
rdlbr_to(unsigned int idx,struct lbr_entry * lbr)390 static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
391 {
392 	u64 val;
393 
394 	if (lbr)
395 		return lbr->to;
396 
397 	rdmsrl(x86_pmu.lbr_to + idx, val);
398 
399 	return val;
400 }
401 
rdlbr_info(unsigned int idx,struct lbr_entry * lbr)402 static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
403 {
404 	u64 val;
405 
406 	if (lbr)
407 		return lbr->info;
408 
409 	rdmsrl(x86_pmu.lbr_info + idx, val);
410 
411 	return val;
412 }
413 
414 static inline void
wrlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)415 wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
416 {
417 	wrlbr_from(idx, lbr->from);
418 	wrlbr_to(idx, lbr->to);
419 	if (need_info)
420 		wrlbr_info(idx, lbr->info);
421 }
422 
423 static inline bool
rdlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)424 rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
425 {
426 	u64 from = rdlbr_from(idx, NULL);
427 
428 	/* Don't read invalid entry */
429 	if (!from)
430 		return false;
431 
432 	lbr->from = from;
433 	lbr->to = rdlbr_to(idx, NULL);
434 	if (need_info)
435 		lbr->info = rdlbr_info(idx, NULL);
436 
437 	return true;
438 }
439 
intel_pmu_lbr_restore(void * ctx)440 void intel_pmu_lbr_restore(void *ctx)
441 {
442 	bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
443 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
444 	struct x86_perf_task_context *task_ctx = ctx;
445 	int i;
446 	unsigned lbr_idx, mask;
447 	u64 tos = task_ctx->tos;
448 
449 	mask = x86_pmu.lbr_nr - 1;
450 	for (i = 0; i < task_ctx->valid_lbrs; i++) {
451 		lbr_idx = (tos - i) & mask;
452 		wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
453 	}
454 
455 	for (; i < x86_pmu.lbr_nr; i++) {
456 		lbr_idx = (tos - i) & mask;
457 		wrlbr_from(lbr_idx, 0);
458 		wrlbr_to(lbr_idx, 0);
459 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
460 			wrlbr_info(lbr_idx, 0);
461 	}
462 
463 	wrmsrl(x86_pmu.lbr_tos, tos);
464 
465 	if (cpuc->lbr_select)
466 		wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
467 }
468 
intel_pmu_arch_lbr_restore(void * ctx)469 static void intel_pmu_arch_lbr_restore(void *ctx)
470 {
471 	struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
472 	struct lbr_entry *entries = task_ctx->entries;
473 	int i;
474 
475 	/* Fast reset the LBRs before restore if the call stack is not full. */
476 	if (!entries[x86_pmu.lbr_nr - 1].from)
477 		intel_pmu_arch_lbr_reset();
478 
479 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
480 		if (!entries[i].from)
481 			break;
482 		wrlbr_all(&entries[i], i, true);
483 	}
484 }
485 
486 /*
487  * Restore the Architecture LBR state from the xsave area in the perf
488  * context data for the task via the XRSTORS instruction.
489  */
intel_pmu_arch_lbr_xrstors(void * ctx)490 static void intel_pmu_arch_lbr_xrstors(void *ctx)
491 {
492 	struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
493 
494 	copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
495 }
496 
lbr_is_reset_in_cstate(void * ctx)497 static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
498 {
499 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
500 		return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
501 
502 	return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
503 }
504 
__intel_pmu_lbr_restore(void * ctx)505 static void __intel_pmu_lbr_restore(void *ctx)
506 {
507 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
508 
509 	if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
510 	    task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
511 		intel_pmu_lbr_reset();
512 		return;
513 	}
514 
515 	/*
516 	 * Does not restore the LBR registers, if
517 	 * - No one else touched them, and
518 	 * - Was not cleared in Cstate
519 	 */
520 	if ((ctx == cpuc->last_task_ctx) &&
521 	    (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
522 	    !lbr_is_reset_in_cstate(ctx)) {
523 		task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
524 		return;
525 	}
526 
527 	x86_pmu.lbr_restore(ctx);
528 
529 	task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
530 }
531 
intel_pmu_lbr_save(void * ctx)532 void intel_pmu_lbr_save(void *ctx)
533 {
534 	bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
535 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
536 	struct x86_perf_task_context *task_ctx = ctx;
537 	unsigned lbr_idx, mask;
538 	u64 tos;
539 	int i;
540 
541 	mask = x86_pmu.lbr_nr - 1;
542 	tos = intel_pmu_lbr_tos();
543 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
544 		lbr_idx = (tos - i) & mask;
545 		if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
546 			break;
547 	}
548 	task_ctx->valid_lbrs = i;
549 	task_ctx->tos = tos;
550 
551 	if (cpuc->lbr_select)
552 		rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
553 }
554 
intel_pmu_arch_lbr_save(void * ctx)555 static void intel_pmu_arch_lbr_save(void *ctx)
556 {
557 	struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
558 	struct lbr_entry *entries = task_ctx->entries;
559 	int i;
560 
561 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
562 		if (!rdlbr_all(&entries[i], i, true))
563 			break;
564 	}
565 
566 	/* LBR call stack is not full. Reset is required in restore. */
567 	if (i < x86_pmu.lbr_nr)
568 		entries[x86_pmu.lbr_nr - 1].from = 0;
569 }
570 
571 /*
572  * Save the Architecture LBR state to the xsave area in the perf
573  * context data for the task via the XSAVES instruction.
574  */
intel_pmu_arch_lbr_xsaves(void * ctx)575 static void intel_pmu_arch_lbr_xsaves(void *ctx)
576 {
577 	struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
578 
579 	copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
580 }
581 
__intel_pmu_lbr_save(void * ctx)582 static void __intel_pmu_lbr_save(void *ctx)
583 {
584 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
585 
586 	if (task_context_opt(ctx)->lbr_callstack_users == 0) {
587 		task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
588 		return;
589 	}
590 
591 	x86_pmu.lbr_save(ctx);
592 
593 	task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
594 
595 	cpuc->last_task_ctx = ctx;
596 	cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
597 }
598 
intel_pmu_lbr_swap_task_ctx(struct perf_event_context * prev,struct perf_event_context * next)599 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
600 				 struct perf_event_context *next)
601 {
602 	void *prev_ctx_data, *next_ctx_data;
603 
604 	swap(prev->task_ctx_data, next->task_ctx_data);
605 
606 	/*
607 	 * Architecture specific synchronization makes sense in
608 	 * case both prev->task_ctx_data and next->task_ctx_data
609 	 * pointers are allocated.
610 	 */
611 
612 	prev_ctx_data = next->task_ctx_data;
613 	next_ctx_data = prev->task_ctx_data;
614 
615 	if (!prev_ctx_data || !next_ctx_data)
616 		return;
617 
618 	swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
619 	     task_context_opt(next_ctx_data)->lbr_callstack_users);
620 }
621 
intel_pmu_lbr_sched_task(struct perf_event_context * ctx,bool sched_in)622 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
623 {
624 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
625 	void *task_ctx;
626 
627 	if (!cpuc->lbr_users)
628 		return;
629 
630 	/*
631 	 * If LBR callstack feature is enabled and the stack was saved when
632 	 * the task was scheduled out, restore the stack. Otherwise flush
633 	 * the LBR stack.
634 	 */
635 	task_ctx = ctx ? ctx->task_ctx_data : NULL;
636 	if (task_ctx) {
637 		if (sched_in)
638 			__intel_pmu_lbr_restore(task_ctx);
639 		else
640 			__intel_pmu_lbr_save(task_ctx);
641 		return;
642 	}
643 
644 	/*
645 	 * Since a context switch can flip the address space and LBR entries
646 	 * are not tagged with an identifier, we need to wipe the LBR, even for
647 	 * per-cpu events. You simply cannot resolve the branches from the old
648 	 * address space.
649 	 */
650 	if (sched_in)
651 		intel_pmu_lbr_reset();
652 }
653 
branch_user_callstack(unsigned br_sel)654 static inline bool branch_user_callstack(unsigned br_sel)
655 {
656 	return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
657 }
658 
intel_pmu_lbr_add(struct perf_event * event)659 void intel_pmu_lbr_add(struct perf_event *event)
660 {
661 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
662 
663 	if (!x86_pmu.lbr_nr)
664 		return;
665 
666 	if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
667 		cpuc->lbr_select = 1;
668 
669 	cpuc->br_sel = event->hw.branch_reg.reg;
670 
671 	if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
672 		task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
673 
674 	/*
675 	 * Request pmu::sched_task() callback, which will fire inside the
676 	 * regular perf event scheduling, so that call will:
677 	 *
678 	 *  - restore or wipe; when LBR-callstack,
679 	 *  - wipe; otherwise,
680 	 *
681 	 * when this is from __perf_event_task_sched_in().
682 	 *
683 	 * However, if this is from perf_install_in_context(), no such callback
684 	 * will follow and we'll need to reset the LBR here if this is the
685 	 * first LBR event.
686 	 *
687 	 * The problem is, we cannot tell these cases apart... but we can
688 	 * exclude the biggest chunk of cases by looking at
689 	 * event->total_time_running. An event that has accrued runtime cannot
690 	 * be 'new'. Conversely, a new event can get installed through the
691 	 * context switch path for the first time.
692 	 */
693 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
694 		cpuc->lbr_pebs_users++;
695 	perf_sched_cb_inc(event->ctx->pmu);
696 	if (!cpuc->lbr_users++ && !event->total_time_running)
697 		intel_pmu_lbr_reset();
698 }
699 
release_lbr_buffers(void)700 void release_lbr_buffers(void)
701 {
702 	struct kmem_cache *kmem_cache;
703 	struct cpu_hw_events *cpuc;
704 	int cpu;
705 
706 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
707 		return;
708 
709 	for_each_possible_cpu(cpu) {
710 		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
711 		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
712 		if (kmem_cache && cpuc->lbr_xsave) {
713 			kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
714 			cpuc->lbr_xsave = NULL;
715 		}
716 	}
717 }
718 
reserve_lbr_buffers(void)719 void reserve_lbr_buffers(void)
720 {
721 	struct kmem_cache *kmem_cache;
722 	struct cpu_hw_events *cpuc;
723 	int cpu;
724 
725 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
726 		return;
727 
728 	for_each_possible_cpu(cpu) {
729 		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
730 		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
731 		if (!kmem_cache || cpuc->lbr_xsave)
732 			continue;
733 
734 		cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
735 							GFP_KERNEL | __GFP_ZERO,
736 							cpu_to_node(cpu));
737 	}
738 }
739 
intel_pmu_lbr_del(struct perf_event * event)740 void intel_pmu_lbr_del(struct perf_event *event)
741 {
742 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
743 
744 	if (!x86_pmu.lbr_nr)
745 		return;
746 
747 	if (branch_user_callstack(cpuc->br_sel) &&
748 	    event->ctx->task_ctx_data)
749 		task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
750 
751 	if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
752 		cpuc->lbr_select = 0;
753 
754 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
755 		cpuc->lbr_pebs_users--;
756 	cpuc->lbr_users--;
757 	WARN_ON_ONCE(cpuc->lbr_users < 0);
758 	WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
759 	perf_sched_cb_dec(event->ctx->pmu);
760 }
761 
vlbr_exclude_host(void)762 static inline bool vlbr_exclude_host(void)
763 {
764 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
765 
766 	return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
767 		(unsigned long *)&cpuc->intel_ctrl_guest_mask);
768 }
769 
intel_pmu_lbr_enable_all(bool pmi)770 void intel_pmu_lbr_enable_all(bool pmi)
771 {
772 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
773 
774 	if (cpuc->lbr_users && !vlbr_exclude_host())
775 		__intel_pmu_lbr_enable(pmi);
776 }
777 
intel_pmu_lbr_disable_all(void)778 void intel_pmu_lbr_disable_all(void)
779 {
780 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
781 
782 	if (cpuc->lbr_users && !vlbr_exclude_host())
783 		__intel_pmu_lbr_disable();
784 }
785 
intel_pmu_lbr_read_32(struct cpu_hw_events * cpuc)786 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
787 {
788 	unsigned long mask = x86_pmu.lbr_nr - 1;
789 	u64 tos = intel_pmu_lbr_tos();
790 	int i;
791 
792 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
793 		unsigned long lbr_idx = (tos - i) & mask;
794 		union {
795 			struct {
796 				u32 from;
797 				u32 to;
798 			};
799 			u64     lbr;
800 		} msr_lastbranch;
801 
802 		rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
803 
804 		cpuc->lbr_entries[i].from	= msr_lastbranch.from;
805 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
806 		cpuc->lbr_entries[i].mispred	= 0;
807 		cpuc->lbr_entries[i].predicted	= 0;
808 		cpuc->lbr_entries[i].in_tx	= 0;
809 		cpuc->lbr_entries[i].abort	= 0;
810 		cpuc->lbr_entries[i].cycles	= 0;
811 		cpuc->lbr_entries[i].type	= 0;
812 		cpuc->lbr_entries[i].reserved	= 0;
813 	}
814 	cpuc->lbr_stack.nr = i;
815 	cpuc->lbr_stack.hw_idx = tos;
816 }
817 
818 /*
819  * Due to lack of segmentation in Linux the effective address (offset)
820  * is the same as the linear address, allowing us to merge the LIP and EIP
821  * LBR formats.
822  */
intel_pmu_lbr_read_64(struct cpu_hw_events * cpuc)823 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
824 {
825 	bool need_info = false, call_stack = false;
826 	unsigned long mask = x86_pmu.lbr_nr - 1;
827 	int lbr_format = x86_pmu.intel_cap.lbr_format;
828 	u64 tos = intel_pmu_lbr_tos();
829 	int i;
830 	int out = 0;
831 	int num = x86_pmu.lbr_nr;
832 
833 	if (cpuc->lbr_sel) {
834 		need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
835 		if (cpuc->lbr_sel->config & LBR_CALL_STACK)
836 			call_stack = true;
837 	}
838 
839 	for (i = 0; i < num; i++) {
840 		unsigned long lbr_idx = (tos - i) & mask;
841 		u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
842 		int skip = 0;
843 		u16 cycles = 0;
844 		int lbr_flags = lbr_desc[lbr_format];
845 
846 		from = rdlbr_from(lbr_idx, NULL);
847 		to   = rdlbr_to(lbr_idx, NULL);
848 
849 		/*
850 		 * Read LBR call stack entries
851 		 * until invalid entry (0s) is detected.
852 		 */
853 		if (call_stack && !from)
854 			break;
855 
856 		if (lbr_format == LBR_FORMAT_INFO && need_info) {
857 			u64 info;
858 
859 			info = rdlbr_info(lbr_idx, NULL);
860 			mis = !!(info & LBR_INFO_MISPRED);
861 			pred = !mis;
862 			in_tx = !!(info & LBR_INFO_IN_TX);
863 			abort = !!(info & LBR_INFO_ABORT);
864 			cycles = (info & LBR_INFO_CYCLES);
865 		}
866 
867 		if (lbr_format == LBR_FORMAT_TIME) {
868 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
869 			pred = !mis;
870 			skip = 1;
871 			cycles = ((to >> 48) & LBR_INFO_CYCLES);
872 
873 			to = (u64)((((s64)to) << 16) >> 16);
874 		}
875 
876 		if (lbr_flags & LBR_EIP_FLAGS) {
877 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
878 			pred = !mis;
879 			skip = 1;
880 		}
881 		if (lbr_flags & LBR_TSX) {
882 			in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
883 			abort = !!(from & LBR_FROM_FLAG_ABORT);
884 			skip = 3;
885 		}
886 		from = (u64)((((s64)from) << skip) >> skip);
887 
888 		/*
889 		 * Some CPUs report duplicated abort records,
890 		 * with the second entry not having an abort bit set.
891 		 * Skip them here. This loop runs backwards,
892 		 * so we need to undo the previous record.
893 		 * If the abort just happened outside the window
894 		 * the extra entry cannot be removed.
895 		 */
896 		if (abort && x86_pmu.lbr_double_abort && out > 0)
897 			out--;
898 
899 		cpuc->lbr_entries[out].from	 = from;
900 		cpuc->lbr_entries[out].to	 = to;
901 		cpuc->lbr_entries[out].mispred	 = mis;
902 		cpuc->lbr_entries[out].predicted = pred;
903 		cpuc->lbr_entries[out].in_tx	 = in_tx;
904 		cpuc->lbr_entries[out].abort	 = abort;
905 		cpuc->lbr_entries[out].cycles	 = cycles;
906 		cpuc->lbr_entries[out].type	 = 0;
907 		cpuc->lbr_entries[out].reserved	 = 0;
908 		out++;
909 	}
910 	cpuc->lbr_stack.nr = out;
911 	cpuc->lbr_stack.hw_idx = tos;
912 }
913 
get_lbr_br_type(u64 info)914 static __always_inline int get_lbr_br_type(u64 info)
915 {
916 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
917 		return 0;
918 
919 	return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
920 }
921 
get_lbr_mispred(u64 info)922 static __always_inline bool get_lbr_mispred(u64 info)
923 {
924 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
925 		return 0;
926 
927 	return !!(info & LBR_INFO_MISPRED);
928 }
929 
get_lbr_predicted(u64 info)930 static __always_inline bool get_lbr_predicted(u64 info)
931 {
932 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
933 		return 0;
934 
935 	return !(info & LBR_INFO_MISPRED);
936 }
937 
get_lbr_cycles(u64 info)938 static __always_inline u16 get_lbr_cycles(u64 info)
939 {
940 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
941 	    !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
942 		return 0;
943 
944 	return info & LBR_INFO_CYCLES;
945 }
946 
intel_pmu_store_lbr(struct cpu_hw_events * cpuc,struct lbr_entry * entries)947 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
948 				struct lbr_entry *entries)
949 {
950 	struct perf_branch_entry *e;
951 	struct lbr_entry *lbr;
952 	u64 from, to, info;
953 	int i;
954 
955 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
956 		lbr = entries ? &entries[i] : NULL;
957 		e = &cpuc->lbr_entries[i];
958 
959 		from = rdlbr_from(i, lbr);
960 		/*
961 		 * Read LBR entries until invalid entry (0s) is detected.
962 		 */
963 		if (!from)
964 			break;
965 
966 		to = rdlbr_to(i, lbr);
967 		info = rdlbr_info(i, lbr);
968 
969 		e->from		= from;
970 		e->to		= to;
971 		e->mispred	= get_lbr_mispred(info);
972 		e->predicted	= get_lbr_predicted(info);
973 		e->in_tx	= !!(info & LBR_INFO_IN_TX);
974 		e->abort	= !!(info & LBR_INFO_ABORT);
975 		e->cycles	= get_lbr_cycles(info);
976 		e->type		= get_lbr_br_type(info);
977 		e->reserved	= 0;
978 	}
979 
980 	cpuc->lbr_stack.nr = i;
981 }
982 
intel_pmu_arch_lbr_read(struct cpu_hw_events * cpuc)983 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
984 {
985 	intel_pmu_store_lbr(cpuc, NULL);
986 }
987 
intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events * cpuc)988 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
989 {
990 	struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
991 
992 	if (!xsave) {
993 		intel_pmu_store_lbr(cpuc, NULL);
994 		return;
995 	}
996 	copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
997 
998 	intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
999 }
1000 
intel_pmu_lbr_read(void)1001 void intel_pmu_lbr_read(void)
1002 {
1003 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1004 
1005 	/*
1006 	 * Don't read when all LBRs users are using adaptive PEBS.
1007 	 *
1008 	 * This could be smarter and actually check the event,
1009 	 * but this simple approach seems to work for now.
1010 	 */
1011 	if (!cpuc->lbr_users || vlbr_exclude_host() ||
1012 	    cpuc->lbr_users == cpuc->lbr_pebs_users)
1013 		return;
1014 
1015 	x86_pmu.lbr_read(cpuc);
1016 
1017 	intel_pmu_lbr_filter(cpuc);
1018 }
1019 
1020 /*
1021  * SW filter is used:
1022  * - in case there is no HW filter
1023  * - in case the HW filter has errata or limitations
1024  */
intel_pmu_setup_sw_lbr_filter(struct perf_event * event)1025 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1026 {
1027 	u64 br_type = event->attr.branch_sample_type;
1028 	int mask = 0;
1029 
1030 	if (br_type & PERF_SAMPLE_BRANCH_USER)
1031 		mask |= X86_BR_USER;
1032 
1033 	if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
1034 		mask |= X86_BR_KERNEL;
1035 
1036 	/* we ignore BRANCH_HV here */
1037 
1038 	if (br_type & PERF_SAMPLE_BRANCH_ANY)
1039 		mask |= X86_BR_ANY;
1040 
1041 	if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
1042 		mask |= X86_BR_ANY_CALL;
1043 
1044 	if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
1045 		mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
1046 
1047 	if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
1048 		mask |= X86_BR_IND_CALL;
1049 
1050 	if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
1051 		mask |= X86_BR_ABORT;
1052 
1053 	if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
1054 		mask |= X86_BR_IN_TX;
1055 
1056 	if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
1057 		mask |= X86_BR_NO_TX;
1058 
1059 	if (br_type & PERF_SAMPLE_BRANCH_COND)
1060 		mask |= X86_BR_JCC;
1061 
1062 	if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1063 		if (!x86_pmu_has_lbr_callstack())
1064 			return -EOPNOTSUPP;
1065 		if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1066 			return -EINVAL;
1067 		mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1068 			X86_BR_CALL_STACK;
1069 	}
1070 
1071 	if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1072 		mask |= X86_BR_IND_JMP;
1073 
1074 	if (br_type & PERF_SAMPLE_BRANCH_CALL)
1075 		mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1076 
1077 	if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1078 		mask |= X86_BR_TYPE_SAVE;
1079 
1080 	/*
1081 	 * stash actual user request into reg, it may
1082 	 * be used by fixup code for some CPU
1083 	 */
1084 	event->hw.branch_reg.reg = mask;
1085 	return 0;
1086 }
1087 
1088 /*
1089  * setup the HW LBR filter
1090  * Used only when available, may not be enough to disambiguate
1091  * all branches, may need the help of the SW filter
1092  */
intel_pmu_setup_hw_lbr_filter(struct perf_event * event)1093 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1094 {
1095 	struct hw_perf_event_extra *reg;
1096 	u64 br_type = event->attr.branch_sample_type;
1097 	u64 mask = 0, v;
1098 	int i;
1099 
1100 	for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1101 		if (!(br_type & (1ULL << i)))
1102 			continue;
1103 
1104 		v = x86_pmu.lbr_sel_map[i];
1105 		if (v == LBR_NOT_SUPP)
1106 			return -EOPNOTSUPP;
1107 
1108 		if (v != LBR_IGN)
1109 			mask |= v;
1110 	}
1111 
1112 	reg = &event->hw.branch_reg;
1113 	reg->idx = EXTRA_REG_LBR;
1114 
1115 	if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1116 		reg->config = mask;
1117 
1118 		/*
1119 		 * The Arch LBR HW can retrieve the common branch types
1120 		 * from the LBR_INFO. It doesn't require the high overhead
1121 		 * SW disassemble.
1122 		 * Enable the branch type by default for the Arch LBR.
1123 		 */
1124 		reg->reg |= X86_BR_TYPE_SAVE;
1125 		return 0;
1126 	}
1127 
1128 	/*
1129 	 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1130 	 * in suppress mode. So LBR_SELECT should be set to
1131 	 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1132 	 * But the 10th bit LBR_CALL_STACK does not operate
1133 	 * in suppress mode.
1134 	 */
1135 	reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1136 
1137 	if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1138 	    (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1139 	    (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
1140 		reg->config |= LBR_NO_INFO;
1141 
1142 	return 0;
1143 }
1144 
intel_pmu_setup_lbr_filter(struct perf_event * event)1145 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1146 {
1147 	int ret = 0;
1148 
1149 	/*
1150 	 * no LBR on this PMU
1151 	 */
1152 	if (!x86_pmu.lbr_nr)
1153 		return -EOPNOTSUPP;
1154 
1155 	/*
1156 	 * setup SW LBR filter
1157 	 */
1158 	ret = intel_pmu_setup_sw_lbr_filter(event);
1159 	if (ret)
1160 		return ret;
1161 
1162 	/*
1163 	 * setup HW LBR filter, if any
1164 	 */
1165 	if (x86_pmu.lbr_sel_map)
1166 		ret = intel_pmu_setup_hw_lbr_filter(event);
1167 
1168 	return ret;
1169 }
1170 
1171 /*
1172  * return the type of control flow change at address "from"
1173  * instruction is not necessarily a branch (in case of interrupt).
1174  *
1175  * The branch type returned also includes the priv level of the
1176  * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
1177  *
1178  * If a branch type is unknown OR the instruction cannot be
1179  * decoded (e.g., text page not present), then X86_BR_NONE is
1180  * returned.
1181  */
branch_type(unsigned long from,unsigned long to,int abort)1182 static int branch_type(unsigned long from, unsigned long to, int abort)
1183 {
1184 	struct insn insn;
1185 	void *addr;
1186 	int bytes_read, bytes_left;
1187 	int ret = X86_BR_NONE;
1188 	int ext, to_plm, from_plm;
1189 	u8 buf[MAX_INSN_SIZE];
1190 	int is64 = 0;
1191 
1192 	to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1193 	from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
1194 
1195 	/*
1196 	 * maybe zero if lbr did not fill up after a reset by the time
1197 	 * we get a PMU interrupt
1198 	 */
1199 	if (from == 0 || to == 0)
1200 		return X86_BR_NONE;
1201 
1202 	if (abort)
1203 		return X86_BR_ABORT | to_plm;
1204 
1205 	if (from_plm == X86_BR_USER) {
1206 		/*
1207 		 * can happen if measuring at the user level only
1208 		 * and we interrupt in a kernel thread, e.g., idle.
1209 		 */
1210 		if (!current->mm)
1211 			return X86_BR_NONE;
1212 
1213 		/* may fail if text not present */
1214 		bytes_left = copy_from_user_nmi(buf, (void __user *)from,
1215 						MAX_INSN_SIZE);
1216 		bytes_read = MAX_INSN_SIZE - bytes_left;
1217 		if (!bytes_read)
1218 			return X86_BR_NONE;
1219 
1220 		addr = buf;
1221 	} else {
1222 		/*
1223 		 * The LBR logs any address in the IP, even if the IP just
1224 		 * faulted. This means userspace can control the from address.
1225 		 * Ensure we don't blindy read any address by validating it is
1226 		 * a known text address.
1227 		 */
1228 		if (kernel_text_address(from)) {
1229 			addr = (void *)from;
1230 			/*
1231 			 * Assume we can get the maximum possible size
1232 			 * when grabbing kernel data.  This is not
1233 			 * _strictly_ true since we could possibly be
1234 			 * executing up next to a memory hole, but
1235 			 * it is very unlikely to be a problem.
1236 			 */
1237 			bytes_read = MAX_INSN_SIZE;
1238 		} else {
1239 			return X86_BR_NONE;
1240 		}
1241 	}
1242 
1243 	/*
1244 	 * decoder needs to know the ABI especially
1245 	 * on 64-bit systems running 32-bit apps
1246 	 */
1247 #ifdef CONFIG_X86_64
1248 	is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
1249 #endif
1250 	insn_init(&insn, addr, bytes_read, is64);
1251 	insn_get_opcode(&insn);
1252 	if (!insn.opcode.got)
1253 		return X86_BR_ABORT;
1254 
1255 	switch (insn.opcode.bytes[0]) {
1256 	case 0xf:
1257 		switch (insn.opcode.bytes[1]) {
1258 		case 0x05: /* syscall */
1259 		case 0x34: /* sysenter */
1260 			ret = X86_BR_SYSCALL;
1261 			break;
1262 		case 0x07: /* sysret */
1263 		case 0x35: /* sysexit */
1264 			ret = X86_BR_SYSRET;
1265 			break;
1266 		case 0x80 ... 0x8f: /* conditional */
1267 			ret = X86_BR_JCC;
1268 			break;
1269 		default:
1270 			ret = X86_BR_NONE;
1271 		}
1272 		break;
1273 	case 0x70 ... 0x7f: /* conditional */
1274 		ret = X86_BR_JCC;
1275 		break;
1276 	case 0xc2: /* near ret */
1277 	case 0xc3: /* near ret */
1278 	case 0xca: /* far ret */
1279 	case 0xcb: /* far ret */
1280 		ret = X86_BR_RET;
1281 		break;
1282 	case 0xcf: /* iret */
1283 		ret = X86_BR_IRET;
1284 		break;
1285 	case 0xcc ... 0xce: /* int */
1286 		ret = X86_BR_INT;
1287 		break;
1288 	case 0xe8: /* call near rel */
1289 		insn_get_immediate(&insn);
1290 		if (insn.immediate1.value == 0) {
1291 			/* zero length call */
1292 			ret = X86_BR_ZERO_CALL;
1293 			break;
1294 		}
1295 		fallthrough;
1296 	case 0x9a: /* call far absolute */
1297 		ret = X86_BR_CALL;
1298 		break;
1299 	case 0xe0 ... 0xe3: /* loop jmp */
1300 		ret = X86_BR_JCC;
1301 		break;
1302 	case 0xe9 ... 0xeb: /* jmp */
1303 		ret = X86_BR_JMP;
1304 		break;
1305 	case 0xff: /* call near absolute, call far absolute ind */
1306 		insn_get_modrm(&insn);
1307 		ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1308 		switch (ext) {
1309 		case 2: /* near ind call */
1310 		case 3: /* far ind call */
1311 			ret = X86_BR_IND_CALL;
1312 			break;
1313 		case 4:
1314 		case 5:
1315 			ret = X86_BR_IND_JMP;
1316 			break;
1317 		}
1318 		break;
1319 	default:
1320 		ret = X86_BR_NONE;
1321 	}
1322 	/*
1323 	 * interrupts, traps, faults (and thus ring transition) may
1324 	 * occur on any instructions. Thus, to classify them correctly,
1325 	 * we need to first look at the from and to priv levels. If they
1326 	 * are different and to is in the kernel, then it indicates
1327 	 * a ring transition. If the from instruction is not a ring
1328 	 * transition instr (syscall, systenter, int), then it means
1329 	 * it was a irq, trap or fault.
1330 	 *
1331 	 * we have no way of detecting kernel to kernel faults.
1332 	 */
1333 	if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1334 	    && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1335 		ret = X86_BR_IRQ;
1336 
1337 	/*
1338 	 * branch priv level determined by target as
1339 	 * is done by HW when LBR_SELECT is implemented
1340 	 */
1341 	if (ret != X86_BR_NONE)
1342 		ret |= to_plm;
1343 
1344 	return ret;
1345 }
1346 
1347 #define X86_BR_TYPE_MAP_MAX	16
1348 
1349 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1350 	PERF_BR_CALL,		/* X86_BR_CALL */
1351 	PERF_BR_RET,		/* X86_BR_RET */
1352 	PERF_BR_SYSCALL,	/* X86_BR_SYSCALL */
1353 	PERF_BR_SYSRET,		/* X86_BR_SYSRET */
1354 	PERF_BR_UNKNOWN,	/* X86_BR_INT */
1355 	PERF_BR_UNKNOWN,	/* X86_BR_IRET */
1356 	PERF_BR_COND,		/* X86_BR_JCC */
1357 	PERF_BR_UNCOND,		/* X86_BR_JMP */
1358 	PERF_BR_UNKNOWN,	/* X86_BR_IRQ */
1359 	PERF_BR_IND_CALL,	/* X86_BR_IND_CALL */
1360 	PERF_BR_UNKNOWN,	/* X86_BR_ABORT */
1361 	PERF_BR_UNKNOWN,	/* X86_BR_IN_TX */
1362 	PERF_BR_UNKNOWN,	/* X86_BR_NO_TX */
1363 	PERF_BR_CALL,		/* X86_BR_ZERO_CALL */
1364 	PERF_BR_UNKNOWN,	/* X86_BR_CALL_STACK */
1365 	PERF_BR_IND,		/* X86_BR_IND_JMP */
1366 };
1367 
1368 static int
common_branch_type(int type)1369 common_branch_type(int type)
1370 {
1371 	int i;
1372 
1373 	type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1374 
1375 	if (type) {
1376 		i = __ffs(type);
1377 		if (i < X86_BR_TYPE_MAP_MAX)
1378 			return branch_map[i];
1379 	}
1380 
1381 	return PERF_BR_UNKNOWN;
1382 }
1383 
1384 enum {
1385 	ARCH_LBR_BR_TYPE_JCC			= 0,
1386 	ARCH_LBR_BR_TYPE_NEAR_IND_JMP		= 1,
1387 	ARCH_LBR_BR_TYPE_NEAR_REL_JMP		= 2,
1388 	ARCH_LBR_BR_TYPE_NEAR_IND_CALL		= 3,
1389 	ARCH_LBR_BR_TYPE_NEAR_REL_CALL		= 4,
1390 	ARCH_LBR_BR_TYPE_NEAR_RET		= 5,
1391 	ARCH_LBR_BR_TYPE_KNOWN_MAX		= ARCH_LBR_BR_TYPE_NEAR_RET,
1392 
1393 	ARCH_LBR_BR_TYPE_MAP_MAX		= 16,
1394 };
1395 
1396 static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1397 	[ARCH_LBR_BR_TYPE_JCC]			= X86_BR_JCC,
1398 	[ARCH_LBR_BR_TYPE_NEAR_IND_JMP]		= X86_BR_IND_JMP,
1399 	[ARCH_LBR_BR_TYPE_NEAR_REL_JMP]		= X86_BR_JMP,
1400 	[ARCH_LBR_BR_TYPE_NEAR_IND_CALL]	= X86_BR_IND_CALL,
1401 	[ARCH_LBR_BR_TYPE_NEAR_REL_CALL]	= X86_BR_CALL,
1402 	[ARCH_LBR_BR_TYPE_NEAR_RET]		= X86_BR_RET,
1403 };
1404 
1405 /*
1406  * implement actual branch filter based on user demand.
1407  * Hardware may not exactly satisfy that request, thus
1408  * we need to inspect opcodes. Mismatched branches are
1409  * discarded. Therefore, the number of branches returned
1410  * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1411  */
1412 static void
intel_pmu_lbr_filter(struct cpu_hw_events * cpuc)1413 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1414 {
1415 	u64 from, to;
1416 	int br_sel = cpuc->br_sel;
1417 	int i, j, type, to_plm;
1418 	bool compress = false;
1419 
1420 	/* if sampling all branches, then nothing to filter */
1421 	if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1422 	    ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1423 		return;
1424 
1425 	for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1426 
1427 		from = cpuc->lbr_entries[i].from;
1428 		to = cpuc->lbr_entries[i].to;
1429 		type = cpuc->lbr_entries[i].type;
1430 
1431 		/*
1432 		 * Parse the branch type recorded in LBR_x_INFO MSR.
1433 		 * Doesn't support OTHER_BRANCH decoding for now.
1434 		 * OTHER_BRANCH branch type still rely on software decoding.
1435 		 */
1436 		if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1437 		    type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1438 			to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1439 			type = arch_lbr_br_type_map[type] | to_plm;
1440 		} else
1441 			type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1442 		if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1443 			if (cpuc->lbr_entries[i].in_tx)
1444 				type |= X86_BR_IN_TX;
1445 			else
1446 				type |= X86_BR_NO_TX;
1447 		}
1448 
1449 		/* if type does not correspond, then discard */
1450 		if (type == X86_BR_NONE || (br_sel & type) != type) {
1451 			cpuc->lbr_entries[i].from = 0;
1452 			compress = true;
1453 		}
1454 
1455 		if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1456 			cpuc->lbr_entries[i].type = common_branch_type(type);
1457 	}
1458 
1459 	if (!compress)
1460 		return;
1461 
1462 	/* remove all entries with from=0 */
1463 	for (i = 0; i < cpuc->lbr_stack.nr; ) {
1464 		if (!cpuc->lbr_entries[i].from) {
1465 			j = i;
1466 			while (++j < cpuc->lbr_stack.nr)
1467 				cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1468 			cpuc->lbr_stack.nr--;
1469 			if (!cpuc->lbr_entries[i].from)
1470 				continue;
1471 		}
1472 		i++;
1473 	}
1474 }
1475 
intel_pmu_store_pebs_lbrs(struct lbr_entry * lbr)1476 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1477 {
1478 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1479 
1480 	/* Cannot get TOS for large PEBS and Arch LBR */
1481 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1482 	    (cpuc->n_pebs == cpuc->n_large_pebs))
1483 		cpuc->lbr_stack.hw_idx = -1ULL;
1484 	else
1485 		cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1486 
1487 	intel_pmu_store_lbr(cpuc, lbr);
1488 	intel_pmu_lbr_filter(cpuc);
1489 }
1490 
1491 /*
1492  * Map interface branch filters onto LBR filters
1493  */
1494 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1495 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1496 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1497 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1498 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1499 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_REL_JMP
1500 						| LBR_IND_JMP | LBR_FAR,
1501 	/*
1502 	 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1503 	 */
1504 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1505 	 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1506 	/*
1507 	 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1508 	 */
1509 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1510 	[PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
1511 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1512 };
1513 
1514 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1515 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1516 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1517 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1518 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1519 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_FAR,
1520 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1521 						| LBR_FAR,
1522 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]	= LBR_IND_CALL,
1523 	[PERF_SAMPLE_BRANCH_COND_SHIFT]		= LBR_JCC,
1524 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= LBR_IND_JMP,
1525 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= LBR_REL_CALL,
1526 };
1527 
1528 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1529 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1530 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1531 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1532 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1533 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_FAR,
1534 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1535 						| LBR_FAR,
1536 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]	= LBR_IND_CALL,
1537 	[PERF_SAMPLE_BRANCH_COND_SHIFT]		= LBR_JCC,
1538 	[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1539 						| LBR_RETURN | LBR_CALL_STACK,
1540 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= LBR_IND_JMP,
1541 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= LBR_REL_CALL,
1542 };
1543 
1544 static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1545 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= ARCH_LBR_ANY,
1546 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= ARCH_LBR_USER,
1547 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= ARCH_LBR_KERNEL,
1548 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1549 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= ARCH_LBR_RETURN |
1550 						  ARCH_LBR_OTHER_BRANCH,
1551 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]     = ARCH_LBR_REL_CALL |
1552 						  ARCH_LBR_IND_CALL |
1553 						  ARCH_LBR_OTHER_BRANCH,
1554 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = ARCH_LBR_IND_CALL,
1555 	[PERF_SAMPLE_BRANCH_COND_SHIFT]         = ARCH_LBR_JCC,
1556 	[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = ARCH_LBR_REL_CALL |
1557 						  ARCH_LBR_IND_CALL |
1558 						  ARCH_LBR_RETURN |
1559 						  ARCH_LBR_CALL_STACK,
1560 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= ARCH_LBR_IND_JMP,
1561 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= ARCH_LBR_REL_CALL,
1562 };
1563 
1564 /* core */
intel_pmu_lbr_init_core(void)1565 void __init intel_pmu_lbr_init_core(void)
1566 {
1567 	x86_pmu.lbr_nr     = 4;
1568 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1569 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1570 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1571 
1572 	/*
1573 	 * SW branch filter usage:
1574 	 * - compensate for lack of HW filter
1575 	 */
1576 }
1577 
1578 /* nehalem/westmere */
intel_pmu_lbr_init_nhm(void)1579 void __init intel_pmu_lbr_init_nhm(void)
1580 {
1581 	x86_pmu.lbr_nr     = 16;
1582 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1583 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1584 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1585 
1586 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1587 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1588 
1589 	/*
1590 	 * SW branch filter usage:
1591 	 * - workaround LBR_SEL errata (see above)
1592 	 * - support syscall, sysret capture.
1593 	 *   That requires LBR_FAR but that means far
1594 	 *   jmp need to be filtered out
1595 	 */
1596 }
1597 
1598 /* sandy bridge */
intel_pmu_lbr_init_snb(void)1599 void __init intel_pmu_lbr_init_snb(void)
1600 {
1601 	x86_pmu.lbr_nr	 = 16;
1602 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1603 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1604 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1605 
1606 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1607 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1608 
1609 	/*
1610 	 * SW branch filter usage:
1611 	 * - support syscall, sysret capture.
1612 	 *   That requires LBR_FAR but that means far
1613 	 *   jmp need to be filtered out
1614 	 */
1615 }
1616 
1617 static inline struct kmem_cache *
create_lbr_kmem_cache(size_t size,size_t align)1618 create_lbr_kmem_cache(size_t size, size_t align)
1619 {
1620 	return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1621 }
1622 
1623 /* haswell */
intel_pmu_lbr_init_hsw(void)1624 void intel_pmu_lbr_init_hsw(void)
1625 {
1626 	size_t size = sizeof(struct x86_perf_task_context);
1627 
1628 	x86_pmu.lbr_nr	 = 16;
1629 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1630 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1631 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1632 
1633 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1634 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1635 
1636 	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1637 
1638 	if (lbr_from_signext_quirk_needed())
1639 		static_branch_enable(&lbr_from_quirk_key);
1640 }
1641 
1642 /* skylake */
intel_pmu_lbr_init_skl(void)1643 __init void intel_pmu_lbr_init_skl(void)
1644 {
1645 	size_t size = sizeof(struct x86_perf_task_context);
1646 
1647 	x86_pmu.lbr_nr	 = 32;
1648 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1649 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1650 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1651 	x86_pmu.lbr_info = MSR_LBR_INFO_0;
1652 
1653 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1654 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1655 
1656 	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1657 
1658 	/*
1659 	 * SW branch filter usage:
1660 	 * - support syscall, sysret capture.
1661 	 *   That requires LBR_FAR but that means far
1662 	 *   jmp need to be filtered out
1663 	 */
1664 }
1665 
1666 /* atom */
intel_pmu_lbr_init_atom(void)1667 void __init intel_pmu_lbr_init_atom(void)
1668 {
1669 	/*
1670 	 * only models starting at stepping 10 seems
1671 	 * to have an operational LBR which can freeze
1672 	 * on PMU interrupt
1673 	 */
1674 	if (boot_cpu_data.x86_model == 28
1675 	    && boot_cpu_data.x86_stepping < 10) {
1676 		pr_cont("LBR disabled due to erratum");
1677 		return;
1678 	}
1679 
1680 	x86_pmu.lbr_nr	   = 8;
1681 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1682 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1683 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1684 
1685 	/*
1686 	 * SW branch filter usage:
1687 	 * - compensate for lack of HW filter
1688 	 */
1689 }
1690 
1691 /* slm */
intel_pmu_lbr_init_slm(void)1692 void __init intel_pmu_lbr_init_slm(void)
1693 {
1694 	x86_pmu.lbr_nr	   = 8;
1695 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1696 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1697 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1698 
1699 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1700 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1701 
1702 	/*
1703 	 * SW branch filter usage:
1704 	 * - compensate for lack of HW filter
1705 	 */
1706 	pr_cont("8-deep LBR, ");
1707 }
1708 
1709 /* Knights Landing */
intel_pmu_lbr_init_knl(void)1710 void intel_pmu_lbr_init_knl(void)
1711 {
1712 	x86_pmu.lbr_nr	   = 8;
1713 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1714 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1715 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1716 
1717 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1718 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1719 
1720 	/* Knights Landing does have MISPREDICT bit */
1721 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1722 		x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1723 }
1724 
1725 /*
1726  * LBR state size is variable based on the max number of registers.
1727  * This calculates the expected state size, which should match
1728  * what the hardware enumerates for the size of XFEATURE_LBR.
1729  */
get_lbr_state_size(void)1730 static inline unsigned int get_lbr_state_size(void)
1731 {
1732 	return sizeof(struct arch_lbr_state) +
1733 	       x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1734 }
1735 
is_arch_lbr_xsave_available(void)1736 static bool is_arch_lbr_xsave_available(void)
1737 {
1738 	if (!boot_cpu_has(X86_FEATURE_XSAVES))
1739 		return false;
1740 
1741 	/*
1742 	 * Check the LBR state with the corresponding software structure.
1743 	 * Disable LBR XSAVES support if the size doesn't match.
1744 	 */
1745 	if (xfeature_size(XFEATURE_LBR) == 0)
1746 		return false;
1747 
1748 	if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1749 		return false;
1750 
1751 	return true;
1752 }
1753 
intel_pmu_arch_lbr_init(void)1754 void __init intel_pmu_arch_lbr_init(void)
1755 {
1756 	struct pmu *pmu = x86_get_pmu(smp_processor_id());
1757 	union cpuid28_eax eax;
1758 	union cpuid28_ebx ebx;
1759 	union cpuid28_ecx ecx;
1760 	unsigned int unused_edx;
1761 	bool arch_lbr_xsave;
1762 	size_t size;
1763 	u64 lbr_nr;
1764 
1765 	/* Arch LBR Capabilities */
1766 	cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1767 
1768 	lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1769 	if (!lbr_nr)
1770 		goto clear_arch_lbr;
1771 
1772 	/* Apply the max depth of Arch LBR */
1773 	if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1774 		goto clear_arch_lbr;
1775 
1776 	x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1777 	x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1778 	x86_pmu.lbr_lip = eax.split.lbr_lip;
1779 	x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1780 	x86_pmu.lbr_filter = ebx.split.lbr_filter;
1781 	x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1782 	x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1783 	x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1784 	x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1785 	x86_pmu.lbr_nr = lbr_nr;
1786 
1787 
1788 	arch_lbr_xsave = is_arch_lbr_xsave_available();
1789 	if (arch_lbr_xsave) {
1790 		size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1791 		       get_lbr_state_size();
1792 		pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1793 							    XSAVE_ALIGNMENT);
1794 	}
1795 
1796 	if (!pmu->task_ctx_cache) {
1797 		arch_lbr_xsave = false;
1798 
1799 		size = sizeof(struct x86_perf_task_context_arch_lbr) +
1800 		       lbr_nr * sizeof(struct lbr_entry);
1801 		pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1802 	}
1803 
1804 	x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1805 	x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1806 	x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1807 
1808 	/* LBR callstack requires both CPL and Branch Filtering support */
1809 	if (!x86_pmu.lbr_cpl ||
1810 	    !x86_pmu.lbr_filter ||
1811 	    !x86_pmu.lbr_call_stack)
1812 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1813 
1814 	if (!x86_pmu.lbr_cpl) {
1815 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1816 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1817 	} else if (!x86_pmu.lbr_filter) {
1818 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1819 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1820 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1821 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1822 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1823 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1824 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1825 	}
1826 
1827 	x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1828 	x86_pmu.lbr_ctl_map  = arch_lbr_ctl_map;
1829 
1830 	if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1831 		x86_pmu.lbr_ctl_map = NULL;
1832 
1833 	x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1834 	if (arch_lbr_xsave) {
1835 		x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1836 		x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1837 		x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1838 		pr_cont("XSAVE ");
1839 	} else {
1840 		x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1841 		x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1842 		x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1843 	}
1844 
1845 	pr_cont("Architectural LBR, ");
1846 
1847 	return;
1848 
1849 clear_arch_lbr:
1850 	setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR);
1851 }
1852 
1853 /**
1854  * x86_perf_get_lbr - get the LBR records information
1855  *
1856  * @lbr: the caller's memory to store the LBR records information
1857  *
1858  * Returns: 0 indicates the LBR info has been successfully obtained
1859  */
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)1860 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1861 {
1862 	int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1863 
1864 	lbr->nr = x86_pmu.lbr_nr;
1865 	lbr->from = x86_pmu.lbr_from;
1866 	lbr->to = x86_pmu.lbr_to;
1867 	lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1868 
1869 	return 0;
1870 }
1871 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1872 
1873 struct event_constraint vlbr_constraint =
1874 	__EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1875 			  FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
1876