1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
4
5 #include <asm/perf_event.h>
6 #include <asm/msr.h>
7
8 #include "../perf_event.h"
9
10 static const enum {
11 LBR_EIP_FLAGS = 1,
12 LBR_TSX = 2,
13 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
14 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
15 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
16 };
17
18 /*
19 * Intel LBR_SELECT bits
20 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
21 *
22 * Hardware branch filter (not available on all CPUs)
23 */
24 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
25 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
26 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
27 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
28 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
29 #define LBR_RETURN_BIT 5 /* do not capture near returns */
30 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
31 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
32 #define LBR_FAR_BIT 8 /* do not capture far branches */
33 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
34
35 /*
36 * Following bit only exists in Linux; we mask it out before writing it to
37 * the actual MSR. But it helps the constraint perf code to understand
38 * that this is a separate configuration.
39 */
40 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
41
42 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
43 #define LBR_USER (1 << LBR_USER_BIT)
44 #define LBR_JCC (1 << LBR_JCC_BIT)
45 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
46 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
47 #define LBR_RETURN (1 << LBR_RETURN_BIT)
48 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
49 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
50 #define LBR_FAR (1 << LBR_FAR_BIT)
51 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
52 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
53
54 #define LBR_PLM (LBR_KERNEL | LBR_USER)
55
56 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
57 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
58 #define LBR_IGN 0 /* ignored */
59
60 #define LBR_ANY \
61 (LBR_JCC |\
62 LBR_REL_CALL |\
63 LBR_IND_CALL |\
64 LBR_RETURN |\
65 LBR_REL_JMP |\
66 LBR_IND_JMP |\
67 LBR_FAR)
68
69 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
70 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
71 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
72
73 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
74
75 /*
76 * Intel LBR_CTL bits
77 *
78 * Hardware branch filter for Arch LBR
79 */
80 #define ARCH_LBR_KERNEL_BIT 1 /* capture at ring0 */
81 #define ARCH_LBR_USER_BIT 2 /* capture at ring > 0 */
82 #define ARCH_LBR_CALL_STACK_BIT 3 /* enable call stack */
83 #define ARCH_LBR_JCC_BIT 16 /* capture conditional branches */
84 #define ARCH_LBR_REL_JMP_BIT 17 /* capture relative jumps */
85 #define ARCH_LBR_IND_JMP_BIT 18 /* capture indirect jumps */
86 #define ARCH_LBR_REL_CALL_BIT 19 /* capture relative calls */
87 #define ARCH_LBR_IND_CALL_BIT 20 /* capture indirect calls */
88 #define ARCH_LBR_RETURN_BIT 21 /* capture near returns */
89 #define ARCH_LBR_OTHER_BRANCH_BIT 22 /* capture other branches */
90
91 #define ARCH_LBR_KERNEL (1ULL << ARCH_LBR_KERNEL_BIT)
92 #define ARCH_LBR_USER (1ULL << ARCH_LBR_USER_BIT)
93 #define ARCH_LBR_CALL_STACK (1ULL << ARCH_LBR_CALL_STACK_BIT)
94 #define ARCH_LBR_JCC (1ULL << ARCH_LBR_JCC_BIT)
95 #define ARCH_LBR_REL_JMP (1ULL << ARCH_LBR_REL_JMP_BIT)
96 #define ARCH_LBR_IND_JMP (1ULL << ARCH_LBR_IND_JMP_BIT)
97 #define ARCH_LBR_REL_CALL (1ULL << ARCH_LBR_REL_CALL_BIT)
98 #define ARCH_LBR_IND_CALL (1ULL << ARCH_LBR_IND_CALL_BIT)
99 #define ARCH_LBR_RETURN (1ULL << ARCH_LBR_RETURN_BIT)
100 #define ARCH_LBR_OTHER_BRANCH (1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
101
102 #define ARCH_LBR_ANY \
103 (ARCH_LBR_JCC |\
104 ARCH_LBR_REL_JMP |\
105 ARCH_LBR_IND_JMP |\
106 ARCH_LBR_REL_CALL |\
107 ARCH_LBR_IND_CALL |\
108 ARCH_LBR_RETURN |\
109 ARCH_LBR_OTHER_BRANCH)
110
111 #define ARCH_LBR_CTL_MASK 0x7f000e
112
113 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
114
is_lbr_call_stack_bit_set(u64 config)115 static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
116 {
117 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
118 return !!(config & ARCH_LBR_CALL_STACK);
119
120 return !!(config & LBR_CALL_STACK);
121 }
122
123 /*
124 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
125 * otherwise it becomes near impossible to get a reliable stack.
126 */
127
__intel_pmu_lbr_enable(bool pmi)128 static void __intel_pmu_lbr_enable(bool pmi)
129 {
130 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
131 u64 debugctl, lbr_select = 0, orig_debugctl;
132
133 /*
134 * No need to unfreeze manually, as v4 can do that as part
135 * of the GLOBAL_STATUS ack.
136 */
137 if (pmi && x86_pmu.version >= 4)
138 return;
139
140 /*
141 * No need to reprogram LBR_SELECT in a PMI, as it
142 * did not change.
143 */
144 if (cpuc->lbr_sel)
145 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
146 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
147 wrmsrl(MSR_LBR_SELECT, lbr_select);
148
149 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
150 orig_debugctl = debugctl;
151
152 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
153 debugctl |= DEBUGCTLMSR_LBR;
154 /*
155 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
156 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
157 * may cause superfluous increase/decrease of LBR_TOS.
158 */
159 if (is_lbr_call_stack_bit_set(lbr_select))
160 debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
161 else
162 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
163
164 if (orig_debugctl != debugctl)
165 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
166
167 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
168 wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
169 }
170
__intel_pmu_lbr_disable(void)171 static void __intel_pmu_lbr_disable(void)
172 {
173 u64 debugctl;
174
175 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
176 wrmsrl(MSR_ARCH_LBR_CTL, 0);
177 return;
178 }
179
180 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
181 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
182 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
183 }
184
intel_pmu_lbr_reset_32(void)185 void intel_pmu_lbr_reset_32(void)
186 {
187 int i;
188
189 for (i = 0; i < x86_pmu.lbr_nr; i++)
190 wrmsrl(x86_pmu.lbr_from + i, 0);
191 }
192
intel_pmu_lbr_reset_64(void)193 void intel_pmu_lbr_reset_64(void)
194 {
195 int i;
196
197 for (i = 0; i < x86_pmu.lbr_nr; i++) {
198 wrmsrl(x86_pmu.lbr_from + i, 0);
199 wrmsrl(x86_pmu.lbr_to + i, 0);
200 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
201 wrmsrl(x86_pmu.lbr_info + i, 0);
202 }
203 }
204
intel_pmu_arch_lbr_reset(void)205 static void intel_pmu_arch_lbr_reset(void)
206 {
207 /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
208 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
209 }
210
intel_pmu_lbr_reset(void)211 void intel_pmu_lbr_reset(void)
212 {
213 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
214
215 if (!x86_pmu.lbr_nr)
216 return;
217
218 x86_pmu.lbr_reset();
219
220 cpuc->last_task_ctx = NULL;
221 cpuc->last_log_id = 0;
222 }
223
224 /*
225 * TOS = most recently recorded branch
226 */
intel_pmu_lbr_tos(void)227 static inline u64 intel_pmu_lbr_tos(void)
228 {
229 u64 tos;
230
231 rdmsrl(x86_pmu.lbr_tos, tos);
232 return tos;
233 }
234
235 enum {
236 LBR_NONE,
237 LBR_VALID,
238 };
239
240 /*
241 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243 * TSX is not supported they have no consistent behavior:
244 *
245 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
246 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
247 * part of the sign extension.
248 *
249 * Therefore, if:
250 *
251 * 1) LBR has TSX format
252 * 2) CPU has no TSX support enabled
253 *
254 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255 * value from rdmsr() must be converted to have a 61 bits sign extension,
256 * ignoring the TSX flags.
257 */
lbr_from_signext_quirk_needed(void)258 static inline bool lbr_from_signext_quirk_needed(void)
259 {
260 int lbr_format = x86_pmu.intel_cap.lbr_format;
261 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
262 boot_cpu_has(X86_FEATURE_RTM);
263
264 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
265 }
266
267 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
268
269 /* If quirk is enabled, ensure sign extension is 63 bits: */
lbr_from_signext_quirk_wr(u64 val)270 inline u64 lbr_from_signext_quirk_wr(u64 val)
271 {
272 if (static_branch_unlikely(&lbr_from_quirk_key)) {
273 /*
274 * Sign extend into bits 61:62 while preserving bit 63.
275 *
276 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 * in val are always OFF and must be changed to be sign
278 * extension bits. Since bits 59:60 are guaranteed to be
279 * part of the sign extension bits, we can just copy them
280 * to 61:62.
281 */
282 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
283 }
284 return val;
285 }
286
287 /*
288 * If quirk is needed, ensure sign extension is 61 bits:
289 */
lbr_from_signext_quirk_rd(u64 val)290 static u64 lbr_from_signext_quirk_rd(u64 val)
291 {
292 if (static_branch_unlikely(&lbr_from_quirk_key)) {
293 /*
294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF.
296 */
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
298 }
299 return val;
300 }
301
wrlbr_from(unsigned int idx,u64 val)302 static __always_inline void wrlbr_from(unsigned int idx, u64 val)
303 {
304 val = lbr_from_signext_quirk_wr(val);
305 wrmsrl(x86_pmu.lbr_from + idx, val);
306 }
307
wrlbr_to(unsigned int idx,u64 val)308 static __always_inline void wrlbr_to(unsigned int idx, u64 val)
309 {
310 wrmsrl(x86_pmu.lbr_to + idx, val);
311 }
312
wrlbr_info(unsigned int idx,u64 val)313 static __always_inline void wrlbr_info(unsigned int idx, u64 val)
314 {
315 wrmsrl(x86_pmu.lbr_info + idx, val);
316 }
317
rdlbr_from(unsigned int idx,struct lbr_entry * lbr)318 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
319 {
320 u64 val;
321
322 if (lbr)
323 return lbr->from;
324
325 rdmsrl(x86_pmu.lbr_from + idx, val);
326
327 return lbr_from_signext_quirk_rd(val);
328 }
329
rdlbr_to(unsigned int idx,struct lbr_entry * lbr)330 static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
331 {
332 u64 val;
333
334 if (lbr)
335 return lbr->to;
336
337 rdmsrl(x86_pmu.lbr_to + idx, val);
338
339 return val;
340 }
341
rdlbr_info(unsigned int idx,struct lbr_entry * lbr)342 static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
343 {
344 u64 val;
345
346 if (lbr)
347 return lbr->info;
348
349 rdmsrl(x86_pmu.lbr_info + idx, val);
350
351 return val;
352 }
353
354 static inline void
wrlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)355 wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
356 {
357 wrlbr_from(idx, lbr->from);
358 wrlbr_to(idx, lbr->to);
359 if (need_info)
360 wrlbr_info(idx, lbr->info);
361 }
362
363 static inline bool
rdlbr_all(struct lbr_entry * lbr,unsigned int idx,bool need_info)364 rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
365 {
366 u64 from = rdlbr_from(idx, NULL);
367
368 /* Don't read invalid entry */
369 if (!from)
370 return false;
371
372 lbr->from = from;
373 lbr->to = rdlbr_to(idx, NULL);
374 if (need_info)
375 lbr->info = rdlbr_info(idx, NULL);
376
377 return true;
378 }
379
intel_pmu_lbr_restore(void * ctx)380 void intel_pmu_lbr_restore(void *ctx)
381 {
382 bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
383 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
384 struct x86_perf_task_context *task_ctx = ctx;
385 int i;
386 unsigned lbr_idx, mask;
387 u64 tos = task_ctx->tos;
388
389 mask = x86_pmu.lbr_nr - 1;
390 for (i = 0; i < task_ctx->valid_lbrs; i++) {
391 lbr_idx = (tos - i) & mask;
392 wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
393 }
394
395 for (; i < x86_pmu.lbr_nr; i++) {
396 lbr_idx = (tos - i) & mask;
397 wrlbr_from(lbr_idx, 0);
398 wrlbr_to(lbr_idx, 0);
399 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
400 wrlbr_info(lbr_idx, 0);
401 }
402
403 wrmsrl(x86_pmu.lbr_tos, tos);
404
405 if (cpuc->lbr_select)
406 wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
407 }
408
intel_pmu_arch_lbr_restore(void * ctx)409 static void intel_pmu_arch_lbr_restore(void *ctx)
410 {
411 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
412 struct lbr_entry *entries = task_ctx->entries;
413 int i;
414
415 /* Fast reset the LBRs before restore if the call stack is not full. */
416 if (!entries[x86_pmu.lbr_nr - 1].from)
417 intel_pmu_arch_lbr_reset();
418
419 for (i = 0; i < x86_pmu.lbr_nr; i++) {
420 if (!entries[i].from)
421 break;
422 wrlbr_all(&entries[i], i, true);
423 }
424 }
425
426 /*
427 * Restore the Architecture LBR state from the xsave area in the perf
428 * context data for the task via the XRSTORS instruction.
429 */
intel_pmu_arch_lbr_xrstors(void * ctx)430 static void intel_pmu_arch_lbr_xrstors(void *ctx)
431 {
432 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
433
434 xrstors(&task_ctx->xsave, XFEATURE_MASK_LBR);
435 }
436
lbr_is_reset_in_cstate(void * ctx)437 static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
438 {
439 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
440 return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
441
442 return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
443 }
444
__intel_pmu_lbr_restore(void * ctx)445 static void __intel_pmu_lbr_restore(void *ctx)
446 {
447 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
448
449 if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
450 task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
451 intel_pmu_lbr_reset();
452 return;
453 }
454
455 /*
456 * Does not restore the LBR registers, if
457 * - No one else touched them, and
458 * - Was not cleared in Cstate
459 */
460 if ((ctx == cpuc->last_task_ctx) &&
461 (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
462 !lbr_is_reset_in_cstate(ctx)) {
463 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
464 return;
465 }
466
467 x86_pmu.lbr_restore(ctx);
468
469 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
470 }
471
intel_pmu_lbr_save(void * ctx)472 void intel_pmu_lbr_save(void *ctx)
473 {
474 bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
475 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
476 struct x86_perf_task_context *task_ctx = ctx;
477 unsigned lbr_idx, mask;
478 u64 tos;
479 int i;
480
481 mask = x86_pmu.lbr_nr - 1;
482 tos = intel_pmu_lbr_tos();
483 for (i = 0; i < x86_pmu.lbr_nr; i++) {
484 lbr_idx = (tos - i) & mask;
485 if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
486 break;
487 }
488 task_ctx->valid_lbrs = i;
489 task_ctx->tos = tos;
490
491 if (cpuc->lbr_select)
492 rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
493 }
494
intel_pmu_arch_lbr_save(void * ctx)495 static void intel_pmu_arch_lbr_save(void *ctx)
496 {
497 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
498 struct lbr_entry *entries = task_ctx->entries;
499 int i;
500
501 for (i = 0; i < x86_pmu.lbr_nr; i++) {
502 if (!rdlbr_all(&entries[i], i, true))
503 break;
504 }
505
506 /* LBR call stack is not full. Reset is required in restore. */
507 if (i < x86_pmu.lbr_nr)
508 entries[x86_pmu.lbr_nr - 1].from = 0;
509 }
510
511 /*
512 * Save the Architecture LBR state to the xsave area in the perf
513 * context data for the task via the XSAVES instruction.
514 */
intel_pmu_arch_lbr_xsaves(void * ctx)515 static void intel_pmu_arch_lbr_xsaves(void *ctx)
516 {
517 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
518
519 xsaves(&task_ctx->xsave, XFEATURE_MASK_LBR);
520 }
521
__intel_pmu_lbr_save(void * ctx)522 static void __intel_pmu_lbr_save(void *ctx)
523 {
524 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
525
526 if (task_context_opt(ctx)->lbr_callstack_users == 0) {
527 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
528 return;
529 }
530
531 x86_pmu.lbr_save(ctx);
532
533 task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
534
535 cpuc->last_task_ctx = ctx;
536 cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
537 }
538
intel_pmu_lbr_swap_task_ctx(struct perf_event_context * prev,struct perf_event_context * next)539 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
540 struct perf_event_context *next)
541 {
542 void *prev_ctx_data, *next_ctx_data;
543
544 swap(prev->task_ctx_data, next->task_ctx_data);
545
546 /*
547 * Architecture specific synchronization makes sense in
548 * case both prev->task_ctx_data and next->task_ctx_data
549 * pointers are allocated.
550 */
551
552 prev_ctx_data = next->task_ctx_data;
553 next_ctx_data = prev->task_ctx_data;
554
555 if (!prev_ctx_data || !next_ctx_data)
556 return;
557
558 swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
559 task_context_opt(next_ctx_data)->lbr_callstack_users);
560 }
561
intel_pmu_lbr_sched_task(struct perf_event_context * ctx,bool sched_in)562 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
563 {
564 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
565 void *task_ctx;
566
567 if (!cpuc->lbr_users)
568 return;
569
570 /*
571 * If LBR callstack feature is enabled and the stack was saved when
572 * the task was scheduled out, restore the stack. Otherwise flush
573 * the LBR stack.
574 */
575 task_ctx = ctx ? ctx->task_ctx_data : NULL;
576 if (task_ctx) {
577 if (sched_in)
578 __intel_pmu_lbr_restore(task_ctx);
579 else
580 __intel_pmu_lbr_save(task_ctx);
581 return;
582 }
583
584 /*
585 * Since a context switch can flip the address space and LBR entries
586 * are not tagged with an identifier, we need to wipe the LBR, even for
587 * per-cpu events. You simply cannot resolve the branches from the old
588 * address space.
589 */
590 if (sched_in)
591 intel_pmu_lbr_reset();
592 }
593
branch_user_callstack(unsigned br_sel)594 static inline bool branch_user_callstack(unsigned br_sel)
595 {
596 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
597 }
598
intel_pmu_lbr_add(struct perf_event * event)599 void intel_pmu_lbr_add(struct perf_event *event)
600 {
601 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
602
603 if (!x86_pmu.lbr_nr)
604 return;
605
606 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
607 cpuc->lbr_select = 1;
608
609 cpuc->br_sel = event->hw.branch_reg.reg;
610
611 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
612 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
613
614 /*
615 * Request pmu::sched_task() callback, which will fire inside the
616 * regular perf event scheduling, so that call will:
617 *
618 * - restore or wipe; when LBR-callstack,
619 * - wipe; otherwise,
620 *
621 * when this is from __perf_event_task_sched_in().
622 *
623 * However, if this is from perf_install_in_context(), no such callback
624 * will follow and we'll need to reset the LBR here if this is the
625 * first LBR event.
626 *
627 * The problem is, we cannot tell these cases apart... but we can
628 * exclude the biggest chunk of cases by looking at
629 * event->total_time_running. An event that has accrued runtime cannot
630 * be 'new'. Conversely, a new event can get installed through the
631 * context switch path for the first time.
632 */
633 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
634 cpuc->lbr_pebs_users++;
635 perf_sched_cb_inc(event->ctx->pmu);
636 if (!cpuc->lbr_users++ && !event->total_time_running)
637 intel_pmu_lbr_reset();
638 }
639
release_lbr_buffers(void)640 void release_lbr_buffers(void)
641 {
642 struct kmem_cache *kmem_cache;
643 struct cpu_hw_events *cpuc;
644 int cpu;
645
646 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
647 return;
648
649 for_each_possible_cpu(cpu) {
650 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
651 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
652 if (kmem_cache && cpuc->lbr_xsave) {
653 kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
654 cpuc->lbr_xsave = NULL;
655 }
656 }
657 }
658
reserve_lbr_buffers(void)659 void reserve_lbr_buffers(void)
660 {
661 struct kmem_cache *kmem_cache;
662 struct cpu_hw_events *cpuc;
663 int cpu;
664
665 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
666 return;
667
668 for_each_possible_cpu(cpu) {
669 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
670 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
671 if (!kmem_cache || cpuc->lbr_xsave)
672 continue;
673
674 cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
675 GFP_KERNEL | __GFP_ZERO,
676 cpu_to_node(cpu));
677 }
678 }
679
intel_pmu_lbr_del(struct perf_event * event)680 void intel_pmu_lbr_del(struct perf_event *event)
681 {
682 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
683
684 if (!x86_pmu.lbr_nr)
685 return;
686
687 if (branch_user_callstack(cpuc->br_sel) &&
688 event->ctx->task_ctx_data)
689 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
690
691 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
692 cpuc->lbr_select = 0;
693
694 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
695 cpuc->lbr_pebs_users--;
696 cpuc->lbr_users--;
697 WARN_ON_ONCE(cpuc->lbr_users < 0);
698 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
699 perf_sched_cb_dec(event->ctx->pmu);
700 }
701
vlbr_exclude_host(void)702 static inline bool vlbr_exclude_host(void)
703 {
704 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
705
706 return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
707 (unsigned long *)&cpuc->intel_ctrl_guest_mask);
708 }
709
intel_pmu_lbr_enable_all(bool pmi)710 void intel_pmu_lbr_enable_all(bool pmi)
711 {
712 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
713
714 if (cpuc->lbr_users && !vlbr_exclude_host())
715 __intel_pmu_lbr_enable(pmi);
716 }
717
intel_pmu_lbr_disable_all(void)718 void intel_pmu_lbr_disable_all(void)
719 {
720 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
721
722 if (cpuc->lbr_users && !vlbr_exclude_host())
723 __intel_pmu_lbr_disable();
724 }
725
intel_pmu_lbr_read_32(struct cpu_hw_events * cpuc)726 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
727 {
728 unsigned long mask = x86_pmu.lbr_nr - 1;
729 u64 tos = intel_pmu_lbr_tos();
730 int i;
731
732 for (i = 0; i < x86_pmu.lbr_nr; i++) {
733 unsigned long lbr_idx = (tos - i) & mask;
734 union {
735 struct {
736 u32 from;
737 u32 to;
738 };
739 u64 lbr;
740 } msr_lastbranch;
741
742 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
743
744 cpuc->lbr_entries[i].from = msr_lastbranch.from;
745 cpuc->lbr_entries[i].to = msr_lastbranch.to;
746 cpuc->lbr_entries[i].mispred = 0;
747 cpuc->lbr_entries[i].predicted = 0;
748 cpuc->lbr_entries[i].in_tx = 0;
749 cpuc->lbr_entries[i].abort = 0;
750 cpuc->lbr_entries[i].cycles = 0;
751 cpuc->lbr_entries[i].type = 0;
752 cpuc->lbr_entries[i].reserved = 0;
753 }
754 cpuc->lbr_stack.nr = i;
755 cpuc->lbr_stack.hw_idx = tos;
756 }
757
758 /*
759 * Due to lack of segmentation in Linux the effective address (offset)
760 * is the same as the linear address, allowing us to merge the LIP and EIP
761 * LBR formats.
762 */
intel_pmu_lbr_read_64(struct cpu_hw_events * cpuc)763 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
764 {
765 bool need_info = false, call_stack = false;
766 unsigned long mask = x86_pmu.lbr_nr - 1;
767 int lbr_format = x86_pmu.intel_cap.lbr_format;
768 u64 tos = intel_pmu_lbr_tos();
769 int i;
770 int out = 0;
771 int num = x86_pmu.lbr_nr;
772
773 if (cpuc->lbr_sel) {
774 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
775 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
776 call_stack = true;
777 }
778
779 for (i = 0; i < num; i++) {
780 unsigned long lbr_idx = (tos - i) & mask;
781 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
782 int skip = 0;
783 u16 cycles = 0;
784 int lbr_flags = lbr_desc[lbr_format];
785
786 from = rdlbr_from(lbr_idx, NULL);
787 to = rdlbr_to(lbr_idx, NULL);
788
789 /*
790 * Read LBR call stack entries
791 * until invalid entry (0s) is detected.
792 */
793 if (call_stack && !from)
794 break;
795
796 if (lbr_format == LBR_FORMAT_INFO && need_info) {
797 u64 info;
798
799 info = rdlbr_info(lbr_idx, NULL);
800 mis = !!(info & LBR_INFO_MISPRED);
801 pred = !mis;
802 in_tx = !!(info & LBR_INFO_IN_TX);
803 abort = !!(info & LBR_INFO_ABORT);
804 cycles = (info & LBR_INFO_CYCLES);
805 }
806
807 if (lbr_format == LBR_FORMAT_TIME) {
808 mis = !!(from & LBR_FROM_FLAG_MISPRED);
809 pred = !mis;
810 skip = 1;
811 cycles = ((to >> 48) & LBR_INFO_CYCLES);
812
813 to = (u64)((((s64)to) << 16) >> 16);
814 }
815
816 if (lbr_flags & LBR_EIP_FLAGS) {
817 mis = !!(from & LBR_FROM_FLAG_MISPRED);
818 pred = !mis;
819 skip = 1;
820 }
821 if (lbr_flags & LBR_TSX) {
822 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
823 abort = !!(from & LBR_FROM_FLAG_ABORT);
824 skip = 3;
825 }
826 from = (u64)((((s64)from) << skip) >> skip);
827
828 /*
829 * Some CPUs report duplicated abort records,
830 * with the second entry not having an abort bit set.
831 * Skip them here. This loop runs backwards,
832 * so we need to undo the previous record.
833 * If the abort just happened outside the window
834 * the extra entry cannot be removed.
835 */
836 if (abort && x86_pmu.lbr_double_abort && out > 0)
837 out--;
838
839 cpuc->lbr_entries[out].from = from;
840 cpuc->lbr_entries[out].to = to;
841 cpuc->lbr_entries[out].mispred = mis;
842 cpuc->lbr_entries[out].predicted = pred;
843 cpuc->lbr_entries[out].in_tx = in_tx;
844 cpuc->lbr_entries[out].abort = abort;
845 cpuc->lbr_entries[out].cycles = cycles;
846 cpuc->lbr_entries[out].type = 0;
847 cpuc->lbr_entries[out].reserved = 0;
848 out++;
849 }
850 cpuc->lbr_stack.nr = out;
851 cpuc->lbr_stack.hw_idx = tos;
852 }
853
get_lbr_br_type(u64 info)854 static __always_inline int get_lbr_br_type(u64 info)
855 {
856 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
857 return 0;
858
859 return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
860 }
861
get_lbr_mispred(u64 info)862 static __always_inline bool get_lbr_mispred(u64 info)
863 {
864 if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
865 return 0;
866
867 return !!(info & LBR_INFO_MISPRED);
868 }
869
get_lbr_predicted(u64 info)870 static __always_inline bool get_lbr_predicted(u64 info)
871 {
872 if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
873 return 0;
874
875 return !(info & LBR_INFO_MISPRED);
876 }
877
get_lbr_cycles(u64 info)878 static __always_inline u16 get_lbr_cycles(u64 info)
879 {
880 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
881 !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
882 return 0;
883
884 return info & LBR_INFO_CYCLES;
885 }
886
intel_pmu_store_lbr(struct cpu_hw_events * cpuc,struct lbr_entry * entries)887 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
888 struct lbr_entry *entries)
889 {
890 struct perf_branch_entry *e;
891 struct lbr_entry *lbr;
892 u64 from, to, info;
893 int i;
894
895 for (i = 0; i < x86_pmu.lbr_nr; i++) {
896 lbr = entries ? &entries[i] : NULL;
897 e = &cpuc->lbr_entries[i];
898
899 from = rdlbr_from(i, lbr);
900 /*
901 * Read LBR entries until invalid entry (0s) is detected.
902 */
903 if (!from)
904 break;
905
906 to = rdlbr_to(i, lbr);
907 info = rdlbr_info(i, lbr);
908
909 e->from = from;
910 e->to = to;
911 e->mispred = get_lbr_mispred(info);
912 e->predicted = get_lbr_predicted(info);
913 e->in_tx = !!(info & LBR_INFO_IN_TX);
914 e->abort = !!(info & LBR_INFO_ABORT);
915 e->cycles = get_lbr_cycles(info);
916 e->type = get_lbr_br_type(info);
917 e->reserved = 0;
918 }
919
920 cpuc->lbr_stack.nr = i;
921 }
922
intel_pmu_arch_lbr_read(struct cpu_hw_events * cpuc)923 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
924 {
925 intel_pmu_store_lbr(cpuc, NULL);
926 }
927
intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events * cpuc)928 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
929 {
930 struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
931
932 if (!xsave) {
933 intel_pmu_store_lbr(cpuc, NULL);
934 return;
935 }
936 xsaves(&xsave->xsave, XFEATURE_MASK_LBR);
937
938 intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
939 }
940
intel_pmu_lbr_read(void)941 void intel_pmu_lbr_read(void)
942 {
943 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
944
945 /*
946 * Don't read when all LBRs users are using adaptive PEBS.
947 *
948 * This could be smarter and actually check the event,
949 * but this simple approach seems to work for now.
950 */
951 if (!cpuc->lbr_users || vlbr_exclude_host() ||
952 cpuc->lbr_users == cpuc->lbr_pebs_users)
953 return;
954
955 x86_pmu.lbr_read(cpuc);
956
957 intel_pmu_lbr_filter(cpuc);
958 }
959
960 /*
961 * SW filter is used:
962 * - in case there is no HW filter
963 * - in case the HW filter has errata or limitations
964 */
intel_pmu_setup_sw_lbr_filter(struct perf_event * event)965 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
966 {
967 u64 br_type = event->attr.branch_sample_type;
968 int mask = 0;
969
970 if (br_type & PERF_SAMPLE_BRANCH_USER)
971 mask |= X86_BR_USER;
972
973 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
974 mask |= X86_BR_KERNEL;
975
976 /* we ignore BRANCH_HV here */
977
978 if (br_type & PERF_SAMPLE_BRANCH_ANY)
979 mask |= X86_BR_ANY;
980
981 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
982 mask |= X86_BR_ANY_CALL;
983
984 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
985 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
986
987 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
988 mask |= X86_BR_IND_CALL;
989
990 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
991 mask |= X86_BR_ABORT;
992
993 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
994 mask |= X86_BR_IN_TX;
995
996 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
997 mask |= X86_BR_NO_TX;
998
999 if (br_type & PERF_SAMPLE_BRANCH_COND)
1000 mask |= X86_BR_JCC;
1001
1002 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1003 if (!x86_pmu_has_lbr_callstack())
1004 return -EOPNOTSUPP;
1005 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1006 return -EINVAL;
1007 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1008 X86_BR_CALL_STACK;
1009 }
1010
1011 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1012 mask |= X86_BR_IND_JMP;
1013
1014 if (br_type & PERF_SAMPLE_BRANCH_CALL)
1015 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1016
1017 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1018 mask |= X86_BR_TYPE_SAVE;
1019
1020 /*
1021 * stash actual user request into reg, it may
1022 * be used by fixup code for some CPU
1023 */
1024 event->hw.branch_reg.reg = mask;
1025 return 0;
1026 }
1027
1028 /*
1029 * setup the HW LBR filter
1030 * Used only when available, may not be enough to disambiguate
1031 * all branches, may need the help of the SW filter
1032 */
intel_pmu_setup_hw_lbr_filter(struct perf_event * event)1033 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1034 {
1035 struct hw_perf_event_extra *reg;
1036 u64 br_type = event->attr.branch_sample_type;
1037 u64 mask = 0, v;
1038 int i;
1039
1040 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1041 if (!(br_type & (1ULL << i)))
1042 continue;
1043
1044 v = x86_pmu.lbr_sel_map[i];
1045 if (v == LBR_NOT_SUPP)
1046 return -EOPNOTSUPP;
1047
1048 if (v != LBR_IGN)
1049 mask |= v;
1050 }
1051
1052 reg = &event->hw.branch_reg;
1053 reg->idx = EXTRA_REG_LBR;
1054
1055 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1056 reg->config = mask;
1057
1058 /*
1059 * The Arch LBR HW can retrieve the common branch types
1060 * from the LBR_INFO. It doesn't require the high overhead
1061 * SW disassemble.
1062 * Enable the branch type by default for the Arch LBR.
1063 */
1064 reg->reg |= X86_BR_TYPE_SAVE;
1065 return 0;
1066 }
1067
1068 /*
1069 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1070 * in suppress mode. So LBR_SELECT should be set to
1071 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1072 * But the 10th bit LBR_CALL_STACK does not operate
1073 * in suppress mode.
1074 */
1075 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1076
1077 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1078 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1079 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
1080 reg->config |= LBR_NO_INFO;
1081
1082 return 0;
1083 }
1084
intel_pmu_setup_lbr_filter(struct perf_event * event)1085 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1086 {
1087 int ret = 0;
1088
1089 /*
1090 * no LBR on this PMU
1091 */
1092 if (!x86_pmu.lbr_nr)
1093 return -EOPNOTSUPP;
1094
1095 /*
1096 * setup SW LBR filter
1097 */
1098 ret = intel_pmu_setup_sw_lbr_filter(event);
1099 if (ret)
1100 return ret;
1101
1102 /*
1103 * setup HW LBR filter, if any
1104 */
1105 if (x86_pmu.lbr_sel_map)
1106 ret = intel_pmu_setup_hw_lbr_filter(event);
1107
1108 return ret;
1109 }
1110
1111 enum {
1112 ARCH_LBR_BR_TYPE_JCC = 0,
1113 ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1,
1114 ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2,
1115 ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3,
1116 ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4,
1117 ARCH_LBR_BR_TYPE_NEAR_RET = 5,
1118 ARCH_LBR_BR_TYPE_KNOWN_MAX = ARCH_LBR_BR_TYPE_NEAR_RET,
1119
1120 ARCH_LBR_BR_TYPE_MAP_MAX = 16,
1121 };
1122
1123 static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1124 [ARCH_LBR_BR_TYPE_JCC] = X86_BR_JCC,
1125 [ARCH_LBR_BR_TYPE_NEAR_IND_JMP] = X86_BR_IND_JMP,
1126 [ARCH_LBR_BR_TYPE_NEAR_REL_JMP] = X86_BR_JMP,
1127 [ARCH_LBR_BR_TYPE_NEAR_IND_CALL] = X86_BR_IND_CALL,
1128 [ARCH_LBR_BR_TYPE_NEAR_REL_CALL] = X86_BR_CALL,
1129 [ARCH_LBR_BR_TYPE_NEAR_RET] = X86_BR_RET,
1130 };
1131
1132 /*
1133 * implement actual branch filter based on user demand.
1134 * Hardware may not exactly satisfy that request, thus
1135 * we need to inspect opcodes. Mismatched branches are
1136 * discarded. Therefore, the number of branches returned
1137 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1138 */
1139 static void
intel_pmu_lbr_filter(struct cpu_hw_events * cpuc)1140 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1141 {
1142 u64 from, to;
1143 int br_sel = cpuc->br_sel;
1144 int i, j, type, to_plm;
1145 bool compress = false;
1146
1147 /* if sampling all branches, then nothing to filter */
1148 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1149 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1150 return;
1151
1152 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1153
1154 from = cpuc->lbr_entries[i].from;
1155 to = cpuc->lbr_entries[i].to;
1156 type = cpuc->lbr_entries[i].type;
1157
1158 /*
1159 * Parse the branch type recorded in LBR_x_INFO MSR.
1160 * Doesn't support OTHER_BRANCH decoding for now.
1161 * OTHER_BRANCH branch type still rely on software decoding.
1162 */
1163 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1164 type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1165 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1166 type = arch_lbr_br_type_map[type] | to_plm;
1167 } else
1168 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1169 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1170 if (cpuc->lbr_entries[i].in_tx)
1171 type |= X86_BR_IN_TX;
1172 else
1173 type |= X86_BR_NO_TX;
1174 }
1175
1176 /* if type does not correspond, then discard */
1177 if (type == X86_BR_NONE || (br_sel & type) != type) {
1178 cpuc->lbr_entries[i].from = 0;
1179 compress = true;
1180 }
1181
1182 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1183 cpuc->lbr_entries[i].type = common_branch_type(type);
1184 }
1185
1186 if (!compress)
1187 return;
1188
1189 /* remove all entries with from=0 */
1190 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1191 if (!cpuc->lbr_entries[i].from) {
1192 j = i;
1193 while (++j < cpuc->lbr_stack.nr)
1194 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1195 cpuc->lbr_stack.nr--;
1196 if (!cpuc->lbr_entries[i].from)
1197 continue;
1198 }
1199 i++;
1200 }
1201 }
1202
intel_pmu_store_pebs_lbrs(struct lbr_entry * lbr)1203 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1204 {
1205 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1206
1207 /* Cannot get TOS for large PEBS and Arch LBR */
1208 if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1209 (cpuc->n_pebs == cpuc->n_large_pebs))
1210 cpuc->lbr_stack.hw_idx = -1ULL;
1211 else
1212 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1213
1214 intel_pmu_store_lbr(cpuc, lbr);
1215 intel_pmu_lbr_filter(cpuc);
1216 }
1217
1218 /*
1219 * Map interface branch filters onto LBR filters
1220 */
1221 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1222 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1223 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1224 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1225 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1226 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1227 | LBR_IND_JMP | LBR_FAR,
1228 /*
1229 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1230 */
1231 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1232 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1233 /*
1234 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1235 */
1236 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1237 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1238 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1239 };
1240
1241 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1242 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1243 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1244 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1245 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1246 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1247 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1248 | LBR_FAR,
1249 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1250 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1251 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1252 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1253 };
1254
1255 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1256 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1257 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1258 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1259 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1260 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1261 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1262 | LBR_FAR,
1263 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1264 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1265 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1266 | LBR_RETURN | LBR_CALL_STACK,
1267 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1268 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1269 };
1270
1271 static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1272 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = ARCH_LBR_ANY,
1273 [PERF_SAMPLE_BRANCH_USER_SHIFT] = ARCH_LBR_USER,
1274 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = ARCH_LBR_KERNEL,
1275 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1276 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = ARCH_LBR_RETURN |
1277 ARCH_LBR_OTHER_BRANCH,
1278 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = ARCH_LBR_REL_CALL |
1279 ARCH_LBR_IND_CALL |
1280 ARCH_LBR_OTHER_BRANCH,
1281 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = ARCH_LBR_IND_CALL,
1282 [PERF_SAMPLE_BRANCH_COND_SHIFT] = ARCH_LBR_JCC,
1283 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = ARCH_LBR_REL_CALL |
1284 ARCH_LBR_IND_CALL |
1285 ARCH_LBR_RETURN |
1286 ARCH_LBR_CALL_STACK,
1287 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = ARCH_LBR_IND_JMP,
1288 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = ARCH_LBR_REL_CALL,
1289 };
1290
1291 /* core */
intel_pmu_lbr_init_core(void)1292 void __init intel_pmu_lbr_init_core(void)
1293 {
1294 x86_pmu.lbr_nr = 4;
1295 x86_pmu.lbr_tos = MSR_LBR_TOS;
1296 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1297 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1298
1299 /*
1300 * SW branch filter usage:
1301 * - compensate for lack of HW filter
1302 */
1303 }
1304
1305 /* nehalem/westmere */
intel_pmu_lbr_init_nhm(void)1306 void __init intel_pmu_lbr_init_nhm(void)
1307 {
1308 x86_pmu.lbr_nr = 16;
1309 x86_pmu.lbr_tos = MSR_LBR_TOS;
1310 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1311 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1312
1313 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1314 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1315
1316 /*
1317 * SW branch filter usage:
1318 * - workaround LBR_SEL errata (see above)
1319 * - support syscall, sysret capture.
1320 * That requires LBR_FAR but that means far
1321 * jmp need to be filtered out
1322 */
1323 }
1324
1325 /* sandy bridge */
intel_pmu_lbr_init_snb(void)1326 void __init intel_pmu_lbr_init_snb(void)
1327 {
1328 x86_pmu.lbr_nr = 16;
1329 x86_pmu.lbr_tos = MSR_LBR_TOS;
1330 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1331 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1332
1333 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1334 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1335
1336 /*
1337 * SW branch filter usage:
1338 * - support syscall, sysret capture.
1339 * That requires LBR_FAR but that means far
1340 * jmp need to be filtered out
1341 */
1342 }
1343
1344 static inline struct kmem_cache *
create_lbr_kmem_cache(size_t size,size_t align)1345 create_lbr_kmem_cache(size_t size, size_t align)
1346 {
1347 return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1348 }
1349
1350 /* haswell */
intel_pmu_lbr_init_hsw(void)1351 void intel_pmu_lbr_init_hsw(void)
1352 {
1353 size_t size = sizeof(struct x86_perf_task_context);
1354
1355 x86_pmu.lbr_nr = 16;
1356 x86_pmu.lbr_tos = MSR_LBR_TOS;
1357 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1358 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1359
1360 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1361 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1362
1363 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1364
1365 if (lbr_from_signext_quirk_needed())
1366 static_branch_enable(&lbr_from_quirk_key);
1367 }
1368
1369 /* skylake */
intel_pmu_lbr_init_skl(void)1370 __init void intel_pmu_lbr_init_skl(void)
1371 {
1372 size_t size = sizeof(struct x86_perf_task_context);
1373
1374 x86_pmu.lbr_nr = 32;
1375 x86_pmu.lbr_tos = MSR_LBR_TOS;
1376 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1377 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1378 x86_pmu.lbr_info = MSR_LBR_INFO_0;
1379
1380 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1381 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1382
1383 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1384
1385 /*
1386 * SW branch filter usage:
1387 * - support syscall, sysret capture.
1388 * That requires LBR_FAR but that means far
1389 * jmp need to be filtered out
1390 */
1391 }
1392
1393 /* atom */
intel_pmu_lbr_init_atom(void)1394 void __init intel_pmu_lbr_init_atom(void)
1395 {
1396 /*
1397 * only models starting at stepping 10 seems
1398 * to have an operational LBR which can freeze
1399 * on PMU interrupt
1400 */
1401 if (boot_cpu_data.x86_model == 28
1402 && boot_cpu_data.x86_stepping < 10) {
1403 pr_cont("LBR disabled due to erratum");
1404 return;
1405 }
1406
1407 x86_pmu.lbr_nr = 8;
1408 x86_pmu.lbr_tos = MSR_LBR_TOS;
1409 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1410 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1411
1412 /*
1413 * SW branch filter usage:
1414 * - compensate for lack of HW filter
1415 */
1416 }
1417
1418 /* slm */
intel_pmu_lbr_init_slm(void)1419 void __init intel_pmu_lbr_init_slm(void)
1420 {
1421 x86_pmu.lbr_nr = 8;
1422 x86_pmu.lbr_tos = MSR_LBR_TOS;
1423 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1424 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1425
1426 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1427 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1428
1429 /*
1430 * SW branch filter usage:
1431 * - compensate for lack of HW filter
1432 */
1433 pr_cont("8-deep LBR, ");
1434 }
1435
1436 /* Knights Landing */
intel_pmu_lbr_init_knl(void)1437 void intel_pmu_lbr_init_knl(void)
1438 {
1439 x86_pmu.lbr_nr = 8;
1440 x86_pmu.lbr_tos = MSR_LBR_TOS;
1441 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1442 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1443
1444 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1445 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1446
1447 /* Knights Landing does have MISPREDICT bit */
1448 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1449 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1450 }
1451
1452 /*
1453 * LBR state size is variable based on the max number of registers.
1454 * This calculates the expected state size, which should match
1455 * what the hardware enumerates for the size of XFEATURE_LBR.
1456 */
get_lbr_state_size(void)1457 static inline unsigned int get_lbr_state_size(void)
1458 {
1459 return sizeof(struct arch_lbr_state) +
1460 x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1461 }
1462
is_arch_lbr_xsave_available(void)1463 static bool is_arch_lbr_xsave_available(void)
1464 {
1465 if (!boot_cpu_has(X86_FEATURE_XSAVES))
1466 return false;
1467
1468 /*
1469 * Check the LBR state with the corresponding software structure.
1470 * Disable LBR XSAVES support if the size doesn't match.
1471 */
1472 if (xfeature_size(XFEATURE_LBR) == 0)
1473 return false;
1474
1475 if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1476 return false;
1477
1478 return true;
1479 }
1480
intel_pmu_arch_lbr_init(void)1481 void __init intel_pmu_arch_lbr_init(void)
1482 {
1483 struct pmu *pmu = x86_get_pmu(smp_processor_id());
1484 union cpuid28_eax eax;
1485 union cpuid28_ebx ebx;
1486 union cpuid28_ecx ecx;
1487 unsigned int unused_edx;
1488 bool arch_lbr_xsave;
1489 size_t size;
1490 u64 lbr_nr;
1491
1492 /* Arch LBR Capabilities */
1493 cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1494
1495 lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1496 if (!lbr_nr)
1497 goto clear_arch_lbr;
1498
1499 /* Apply the max depth of Arch LBR */
1500 if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1501 goto clear_arch_lbr;
1502
1503 x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1504 x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1505 x86_pmu.lbr_lip = eax.split.lbr_lip;
1506 x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1507 x86_pmu.lbr_filter = ebx.split.lbr_filter;
1508 x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1509 x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1510 x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1511 x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1512 x86_pmu.lbr_nr = lbr_nr;
1513
1514
1515 arch_lbr_xsave = is_arch_lbr_xsave_available();
1516 if (arch_lbr_xsave) {
1517 size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1518 get_lbr_state_size();
1519 pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1520 XSAVE_ALIGNMENT);
1521 }
1522
1523 if (!pmu->task_ctx_cache) {
1524 arch_lbr_xsave = false;
1525
1526 size = sizeof(struct x86_perf_task_context_arch_lbr) +
1527 lbr_nr * sizeof(struct lbr_entry);
1528 pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1529 }
1530
1531 x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1532 x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1533 x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1534
1535 /* LBR callstack requires both CPL and Branch Filtering support */
1536 if (!x86_pmu.lbr_cpl ||
1537 !x86_pmu.lbr_filter ||
1538 !x86_pmu.lbr_call_stack)
1539 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1540
1541 if (!x86_pmu.lbr_cpl) {
1542 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1543 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1544 } else if (!x86_pmu.lbr_filter) {
1545 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1546 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1547 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1548 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1549 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1550 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1551 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1552 }
1553
1554 x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1555 x86_pmu.lbr_ctl_map = arch_lbr_ctl_map;
1556
1557 if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1558 x86_pmu.lbr_ctl_map = NULL;
1559
1560 x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1561 if (arch_lbr_xsave) {
1562 x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1563 x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1564 x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1565 pr_cont("XSAVE ");
1566 } else {
1567 x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1568 x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1569 x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1570 }
1571
1572 pr_cont("Architectural LBR, ");
1573
1574 return;
1575
1576 clear_arch_lbr:
1577 setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR);
1578 }
1579
1580 /**
1581 * x86_perf_get_lbr - get the LBR records information
1582 *
1583 * @lbr: the caller's memory to store the LBR records information
1584 *
1585 * Returns: 0 indicates the LBR info has been successfully obtained
1586 */
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)1587 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1588 {
1589 int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1590
1591 lbr->nr = x86_pmu.lbr_nr;
1592 lbr->from = x86_pmu.lbr_from;
1593 lbr->to = x86_pmu.lbr_to;
1594 lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1595
1596 return 0;
1597 }
1598 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1599
1600 struct event_constraint vlbr_constraint =
1601 __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1602 FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
1603