• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 /*
18  *          |   NHM/WSM    |      SNB     |
19  * register -------------------------------
20  *          |  HT  | no HT |  HT  | no HT |
21  *-----------------------------------------
22  * offcore  | core | core  | cpu  | core  |
23  * lbr_sel  | core | core  | cpu  | core  |
24  * ld_lat   | cpu  | core  | cpu  | core  |
25  *-----------------------------------------
26  *
27  * Given that there is a small number of shared regs,
28  * we can pre-allocate their slot in the per-cpu
29  * per-core reg tables.
30  */
31 enum extra_reg_type {
32 	EXTRA_REG_NONE  = -1,	/* not used */
33 
34 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
35 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
36 	EXTRA_REG_LBR   = 2,	/* lbr_select */
37 
38 	EXTRA_REG_MAX		/* number of entries needed */
39 };
40 
41 struct event_constraint {
42 	union {
43 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
44 		u64		idxmsk64;
45 	};
46 	u64	code;
47 	u64	cmask;
48 	int	weight;
49 	int	overlap;
50 };
51 
52 struct amd_nb {
53 	int nb_id;  /* NorthBridge id */
54 	int refcnt; /* reference count */
55 	struct perf_event *owners[X86_PMC_IDX_MAX];
56 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
57 };
58 
59 /* The maximal number of PEBS events: */
60 #define MAX_PEBS_EVENTS		4
61 
62 /*
63  * A debug store configuration.
64  *
65  * We only support architectures that use 64bit fields.
66  */
67 struct debug_store {
68 	u64	bts_buffer_base;
69 	u64	bts_index;
70 	u64	bts_absolute_maximum;
71 	u64	bts_interrupt_threshold;
72 	u64	pebs_buffer_base;
73 	u64	pebs_index;
74 	u64	pebs_absolute_maximum;
75 	u64	pebs_interrupt_threshold;
76 	u64	pebs_event_reset[MAX_PEBS_EVENTS];
77 };
78 
79 /*
80  * Per register state.
81  */
82 struct er_account {
83 	raw_spinlock_t		lock;	/* per-core: protect structure */
84 	u64                 config;	/* extra MSR config */
85 	u64                 reg;	/* extra MSR number */
86 	atomic_t            ref;	/* reference count */
87 };
88 
89 /*
90  * Per core/cpu state
91  *
92  * Used to coordinate shared registers between HT threads or
93  * among events on a single PMU.
94  */
95 struct intel_shared_regs {
96 	struct er_account       regs[EXTRA_REG_MAX];
97 	int                     refcnt;		/* per-core: #HT threads */
98 	unsigned                core_id;	/* per-core: core id */
99 };
100 
101 #define MAX_LBR_ENTRIES		16
102 
103 struct cpu_hw_events {
104 	/*
105 	 * Generic x86 PMC bits
106 	 */
107 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
108 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
109 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
110 	int			enabled;
111 
112 	int			n_events;
113 	int			n_added;
114 	int			n_txn;
115 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
116 	u64			tags[X86_PMC_IDX_MAX];
117 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
118 
119 	unsigned int		group_flag;
120 
121 	/*
122 	 * Intel DebugStore bits
123 	 */
124 	struct debug_store	*ds;
125 	u64			pebs_enabled;
126 
127 	/*
128 	 * Intel LBR bits
129 	 */
130 	int				lbr_users;
131 	void				*lbr_context;
132 	struct perf_branch_stack	lbr_stack;
133 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
134 	struct er_account		*lbr_sel;
135 	u64				br_sel;
136 
137 	/*
138 	 * Intel host/guest exclude bits
139 	 */
140 	u64				intel_ctrl_guest_mask;
141 	u64				intel_ctrl_host_mask;
142 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
143 
144 	/*
145 	 * manage shared (per-core, per-cpu) registers
146 	 * used on Intel NHM/WSM/SNB
147 	 */
148 	struct intel_shared_regs	*shared_regs;
149 
150 	/*
151 	 * AMD specific bits
152 	 */
153 	struct amd_nb			*amd_nb;
154 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
155 	u64				perf_ctr_virt_mask;
156 
157 	void				*kfree_on_online;
158 };
159 
160 #define __EVENT_CONSTRAINT(c, n, m, w, o) {\
161 	{ .idxmsk64 = (n) },		\
162 	.code = (c),			\
163 	.cmask = (m),			\
164 	.weight = (w),			\
165 	.overlap = (o),			\
166 }
167 
168 #define EVENT_CONSTRAINT(c, n, m)	\
169 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
170 
171 /*
172  * The overlap flag marks event constraints with overlapping counter
173  * masks. This is the case if the counter mask of such an event is not
174  * a subset of any other counter mask of a constraint with an equal or
175  * higher weight, e.g.:
176  *
177  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
178  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
179  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
180  *
181  * The event scheduler may not select the correct counter in the first
182  * cycle because it needs to know which subsequent events will be
183  * scheduled. It may fail to schedule the events then. So we set the
184  * overlap flag for such constraints to give the scheduler a hint which
185  * events to select for counter rescheduling.
186  *
187  * Care must be taken as the rescheduling algorithm is O(n!) which
188  * will increase scheduling cycles for an over-commited system
189  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
190  * and its counter masks must be kept at a minimum.
191  */
192 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
193 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
194 
195 /*
196  * Constraint on the Event code.
197  */
198 #define INTEL_EVENT_CONSTRAINT(c, n)	\
199 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
200 
201 /*
202  * Constraint on the Event code + UMask + fixed-mask
203  *
204  * filter mask to validate fixed counter events.
205  * the following filters disqualify for fixed counters:
206  *  - inv
207  *  - edge
208  *  - cnt-mask
209  *  The other filters are supported by fixed counters.
210  *  The any-thread option is supported starting with v3.
211  */
212 #define FIXED_EVENT_CONSTRAINT(c, n)	\
213 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
214 
215 /*
216  * Constraint on the Event code + UMask
217  */
218 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
219 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
220 
221 #define EVENT_CONSTRAINT_END		\
222 	EVENT_CONSTRAINT(0, 0, 0)
223 
224 #define for_each_event_constraint(e, c)	\
225 	for ((e) = (c); (e)->weight; (e)++)
226 
227 /*
228  * Extra registers for specific events.
229  *
230  * Some events need large masks and require external MSRs.
231  * Those extra MSRs end up being shared for all events on
232  * a PMU and sometimes between PMU of sibling HT threads.
233  * In either case, the kernel needs to handle conflicting
234  * accesses to those extra, shared, regs. The data structure
235  * to manage those registers is stored in cpu_hw_event.
236  */
237 struct extra_reg {
238 	unsigned int		event;
239 	unsigned int		msr;
240 	u64			config_mask;
241 	u64			valid_mask;
242 	int			idx;  /* per_xxx->regs[] reg index */
243 };
244 
245 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
246 	.event = (e),		\
247 	.msr = (ms),		\
248 	.config_mask = (m),	\
249 	.valid_mask = (vm),	\
250 	.idx = EXTRA_REG_##i	\
251 	}
252 
253 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
254 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
255 
256 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
257 
258 union perf_capabilities {
259 	struct {
260 		u64	lbr_format:6;
261 		u64	pebs_trap:1;
262 		u64	pebs_arch_reg:1;
263 		u64	pebs_format:4;
264 		u64	smm_freeze:1;
265 	};
266 	u64	capabilities;
267 };
268 
269 struct x86_pmu_quirk {
270 	struct x86_pmu_quirk *next;
271 	void (*func)(void);
272 };
273 
274 union x86_pmu_config {
275 	struct {
276 		u64 event:8,
277 		    umask:8,
278 		    usr:1,
279 		    os:1,
280 		    edge:1,
281 		    pc:1,
282 		    interrupt:1,
283 		    __reserved1:1,
284 		    en:1,
285 		    inv:1,
286 		    cmask:8,
287 		    event2:4,
288 		    __reserved2:4,
289 		    go:1,
290 		    ho:1;
291 	} bits;
292 	u64 value;
293 };
294 
295 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
296 
297 /*
298  * struct x86_pmu - generic x86 pmu
299  */
300 struct x86_pmu {
301 	/*
302 	 * Generic x86 PMC bits
303 	 */
304 	const char	*name;
305 	int		version;
306 	int		(*handle_irq)(struct pt_regs *);
307 	void		(*disable_all)(void);
308 	void		(*enable_all)(int added);
309 	void		(*enable)(struct perf_event *);
310 	void		(*disable)(struct perf_event *);
311 	int		(*hw_config)(struct perf_event *event);
312 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
313 	unsigned	eventsel;
314 	unsigned	perfctr;
315 	u64		(*event_map)(int);
316 	int		max_events;
317 	int		num_counters;
318 	int		num_counters_fixed;
319 	int		cntval_bits;
320 	u64		cntval_mask;
321 	union {
322 			unsigned long events_maskl;
323 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
324 	};
325 	int		events_mask_len;
326 	int		apic;
327 	u64		max_period;
328 	struct event_constraint *
329 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
330 						 struct perf_event *event);
331 
332 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
333 						 struct perf_event *event);
334 	struct event_constraint *event_constraints;
335 	struct x86_pmu_quirk *quirks;
336 	int		perfctr_second_write;
337 
338 	/*
339 	 * sysfs attrs
340 	 */
341 	int		attr_rdpmc;
342 	struct attribute **format_attrs;
343 
344 	/*
345 	 * CPU Hotplug hooks
346 	 */
347 	int		(*cpu_prepare)(int cpu);
348 	void		(*cpu_starting)(int cpu);
349 	void		(*cpu_dying)(int cpu);
350 	void		(*cpu_dead)(int cpu);
351 	void		(*flush_branch_stack)(void);
352 
353 	/*
354 	 * Intel Arch Perfmon v2+
355 	 */
356 	u64			intel_ctrl;
357 	union perf_capabilities intel_cap;
358 
359 	/*
360 	 * Intel DebugStore bits
361 	 */
362 	int		bts, pebs;
363 	int		bts_active, pebs_active;
364 	int		pebs_record_size;
365 	void		(*drain_pebs)(struct pt_regs *regs);
366 	struct event_constraint *pebs_constraints;
367 
368 	/*
369 	 * Intel LBR
370 	 */
371 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
372 	int		lbr_nr;			   /* hardware stack size */
373 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
374 	const int	*lbr_sel_map;		   /* lbr_select mappings */
375 
376 	/*
377 	 * Extra registers for events
378 	 */
379 	struct extra_reg *extra_regs;
380 	unsigned int er_flags;
381 
382 	/*
383 	 * Intel host/guest support (KVM)
384 	 */
385 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
386 };
387 
388 #define x86_add_quirk(func_)						\
389 do {									\
390 	static struct x86_pmu_quirk __quirk __initdata = {		\
391 		.func = func_,						\
392 	};								\
393 	__quirk.next = x86_pmu.quirks;					\
394 	x86_pmu.quirks = &__quirk;					\
395 } while (0)
396 
397 #define ERF_NO_HT_SHARING	1
398 #define ERF_HAS_RSP_1		2
399 
400 extern struct x86_pmu x86_pmu __read_mostly;
401 
402 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
403 
404 int x86_perf_event_set_period(struct perf_event *event);
405 
406 /*
407  * Generalized hw caching related hw_event table, filled
408  * in on a per model basis. A value of 0 means
409  * 'not supported', -1 means 'hw_event makes no sense on
410  * this CPU', any other value means the raw hw_event
411  * ID.
412  */
413 
414 #define C(x) PERF_COUNT_HW_CACHE_##x
415 
416 extern u64 __read_mostly hw_cache_event_ids
417 				[PERF_COUNT_HW_CACHE_MAX]
418 				[PERF_COUNT_HW_CACHE_OP_MAX]
419 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
420 extern u64 __read_mostly hw_cache_extra_regs
421 				[PERF_COUNT_HW_CACHE_MAX]
422 				[PERF_COUNT_HW_CACHE_OP_MAX]
423 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
424 
425 u64 x86_perf_event_update(struct perf_event *event);
426 
x86_pmu_addr_offset(int index)427 static inline int x86_pmu_addr_offset(int index)
428 {
429 	int offset;
430 
431 	/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
432 	alternative_io(ASM_NOP2,
433 		       "shll $1, %%eax",
434 		       X86_FEATURE_PERFCTR_CORE,
435 		       "=a" (offset),
436 		       "a"  (index));
437 
438 	return offset;
439 }
440 
x86_pmu_config_addr(int index)441 static inline unsigned int x86_pmu_config_addr(int index)
442 {
443 	return x86_pmu.eventsel + x86_pmu_addr_offset(index);
444 }
445 
x86_pmu_event_addr(int index)446 static inline unsigned int x86_pmu_event_addr(int index)
447 {
448 	return x86_pmu.perfctr + x86_pmu_addr_offset(index);
449 }
450 
451 int x86_setup_perfctr(struct perf_event *event);
452 
453 int x86_pmu_hw_config(struct perf_event *event);
454 
455 void x86_pmu_disable_all(void);
456 
__x86_pmu_enable_event(struct hw_perf_event * hwc,u64 enable_mask)457 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
458 					  u64 enable_mask)
459 {
460 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
461 
462 	if (hwc->extra_reg.reg)
463 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
464 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
465 }
466 
467 void x86_pmu_enable_all(int added);
468 
469 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
470 
471 void x86_pmu_stop(struct perf_event *event, int flags);
472 
x86_pmu_disable_event(struct perf_event * event)473 static inline void x86_pmu_disable_event(struct perf_event *event)
474 {
475 	struct hw_perf_event *hwc = &event->hw;
476 
477 	wrmsrl(hwc->config_base, hwc->config);
478 }
479 
480 void x86_pmu_enable_event(struct perf_event *event);
481 
482 int x86_pmu_handle_irq(struct pt_regs *regs);
483 
484 extern struct event_constraint emptyconstraint;
485 
486 extern struct event_constraint unconstrained;
487 
kernel_ip(unsigned long ip)488 static inline bool kernel_ip(unsigned long ip)
489 {
490 #ifdef CONFIG_X86_32
491 	return ip > PAGE_OFFSET;
492 #else
493 	return (long)ip < 0;
494 #endif
495 }
496 
497 #ifdef CONFIG_CPU_SUP_AMD
498 
499 int amd_pmu_init(void);
500 
501 #else /* CONFIG_CPU_SUP_AMD */
502 
amd_pmu_init(void)503 static inline int amd_pmu_init(void)
504 {
505 	return 0;
506 }
507 
508 #endif /* CONFIG_CPU_SUP_AMD */
509 
510 #ifdef CONFIG_CPU_SUP_INTEL
511 
512 int intel_pmu_save_and_restart(struct perf_event *event);
513 
514 struct event_constraint *
515 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
516 
517 struct intel_shared_regs *allocate_shared_regs(int cpu);
518 
519 int intel_pmu_init(void);
520 
521 void init_debug_store_on_cpu(int cpu);
522 
523 void fini_debug_store_on_cpu(int cpu);
524 
525 void release_ds_buffers(void);
526 
527 void reserve_ds_buffers(void);
528 
529 extern struct event_constraint bts_constraint;
530 
531 void intel_pmu_enable_bts(u64 config);
532 
533 void intel_pmu_disable_bts(void);
534 
535 int intel_pmu_drain_bts_buffer(void);
536 
537 extern struct event_constraint intel_core2_pebs_event_constraints[];
538 
539 extern struct event_constraint intel_atom_pebs_event_constraints[];
540 
541 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
542 
543 extern struct event_constraint intel_westmere_pebs_event_constraints[];
544 
545 extern struct event_constraint intel_snb_pebs_event_constraints[];
546 
547 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
548 
549 void intel_pmu_pebs_enable(struct perf_event *event);
550 
551 void intel_pmu_pebs_disable(struct perf_event *event);
552 
553 void intel_pmu_pebs_enable_all(void);
554 
555 void intel_pmu_pebs_disable_all(void);
556 
557 void intel_ds_init(void);
558 
559 void intel_pmu_lbr_reset(void);
560 
561 void intel_pmu_lbr_enable(struct perf_event *event);
562 
563 void intel_pmu_lbr_disable(struct perf_event *event);
564 
565 void intel_pmu_lbr_enable_all(void);
566 
567 void intel_pmu_lbr_disable_all(void);
568 
569 void intel_pmu_lbr_read(void);
570 
571 void intel_pmu_lbr_init_core(void);
572 
573 void intel_pmu_lbr_init_nhm(void);
574 
575 void intel_pmu_lbr_init_atom(void);
576 
577 void intel_pmu_lbr_init_snb(void);
578 
579 int intel_pmu_setup_lbr_filter(struct perf_event *event);
580 
581 int p4_pmu_init(void);
582 
583 int p6_pmu_init(void);
584 
585 #else /* CONFIG_CPU_SUP_INTEL */
586 
reserve_ds_buffers(void)587 static inline void reserve_ds_buffers(void)
588 {
589 }
590 
release_ds_buffers(void)591 static inline void release_ds_buffers(void)
592 {
593 }
594 
intel_pmu_init(void)595 static inline int intel_pmu_init(void)
596 {
597 	return 0;
598 }
599 
allocate_shared_regs(int cpu)600 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
601 {
602 	return NULL;
603 }
604 
605 #endif /* CONFIG_CPU_SUP_INTEL */
606