• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define INTEL_PMC_MAX_GENERIC				       32
9 #define INTEL_PMC_MAX_FIXED					3
10 #define INTEL_PMC_IDX_FIXED				       32
11 
12 #define X86_PMC_IDX_MAX					       64
13 
14 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
15 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
16 
17 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
18 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
19 
20 #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
21 #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
22 #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
23 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
24 #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
25 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
26 #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
31 
32 #define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
33 #define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
34 #define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
35 
36 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
37 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
38 	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
39 
40 #define AMD64_EVENTSEL_EVENT	\
41 	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
42 #define INTEL_ARCH_EVENT_MASK	\
43 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
44 
45 #define X86_RAW_EVENT_MASK		\
46 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
47 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
48 	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
49 	 ARCH_PERFMON_EVENTSEL_INV   |	\
50 	 ARCH_PERFMON_EVENTSEL_CMASK)
51 #define AMD64_RAW_EVENT_MASK		\
52 	(X86_RAW_EVENT_MASK          |  \
53 	 AMD64_EVENTSEL_EVENT)
54 #define AMD64_RAW_EVENT_MASK_NB		\
55 	(AMD64_EVENTSEL_EVENT        |  \
56 	 ARCH_PERFMON_EVENTSEL_UMASK)
57 #define AMD64_NUM_COUNTERS				4
58 #define AMD64_NUM_COUNTERS_CORE				6
59 #define AMD64_NUM_COUNTERS_NB				4
60 
61 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
62 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
63 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
64 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
65 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
66 
67 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
68 #define ARCH_PERFMON_EVENTS_COUNT			7
69 
70 /*
71  * Intel "Architectural Performance Monitoring" CPUID
72  * detection/enumeration details:
73  */
74 union cpuid10_eax {
75 	struct {
76 		unsigned int version_id:8;
77 		unsigned int num_counters:8;
78 		unsigned int bit_width:8;
79 		unsigned int mask_length:8;
80 	} split;
81 	unsigned int full;
82 };
83 
84 union cpuid10_ebx {
85 	struct {
86 		unsigned int no_unhalted_core_cycles:1;
87 		unsigned int no_instructions_retired:1;
88 		unsigned int no_unhalted_reference_cycles:1;
89 		unsigned int no_llc_reference:1;
90 		unsigned int no_llc_misses:1;
91 		unsigned int no_branch_instruction_retired:1;
92 		unsigned int no_branch_misses_retired:1;
93 	} split;
94 	unsigned int full;
95 };
96 
97 union cpuid10_edx {
98 	struct {
99 		unsigned int num_counters_fixed:5;
100 		unsigned int bit_width_fixed:8;
101 		unsigned int reserved:19;
102 	} split;
103 	unsigned int full;
104 };
105 
106 struct x86_pmu_capability {
107 	int		version;
108 	int		num_counters_gp;
109 	int		num_counters_fixed;
110 	int		bit_width_gp;
111 	int		bit_width_fixed;
112 	unsigned int	events_mask;
113 	int		events_mask_len;
114 };
115 
116 /*
117  * Fixed-purpose performance events:
118  */
119 
120 /*
121  * All 3 fixed-mode PMCs are configured via this single MSR:
122  */
123 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
124 
125 /*
126  * The counts are available in three separate MSRs:
127  */
128 
129 /* Instr_Retired.Any: */
130 #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
131 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
132 
133 /* CPU_CLK_Unhalted.Core: */
134 #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
135 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
136 
137 /* CPU_CLK_Unhalted.Ref: */
138 #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
139 #define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
140 #define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
141 
142 /*
143  * We model BTS tracing as another fixed-mode PMC.
144  *
145  * We choose a value in the middle of the fixed event range, since lower
146  * values are used by actual fixed events and higher values are used
147  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
148  */
149 #define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
150 
151 /*
152  * IBS cpuid feature detection
153  */
154 
155 #define IBS_CPUID_FEATURES		0x8000001b
156 
157 /*
158  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
159  * bit 0 is used to indicate the existence of IBS.
160  */
161 #define IBS_CAPS_AVAIL			(1U<<0)
162 #define IBS_CAPS_FETCHSAM		(1U<<1)
163 #define IBS_CAPS_OPSAM			(1U<<2)
164 #define IBS_CAPS_RDWROPCNT		(1U<<3)
165 #define IBS_CAPS_OPCNT			(1U<<4)
166 #define IBS_CAPS_BRNTRGT		(1U<<5)
167 #define IBS_CAPS_OPCNTEXT		(1U<<6)
168 #define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
169 
170 #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
171 					 | IBS_CAPS_FETCHSAM	\
172 					 | IBS_CAPS_OPSAM)
173 
174 /*
175  * IBS APIC setup
176  */
177 #define IBSCTL				0x1cc
178 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
179 #define IBSCTL_LVT_OFFSET_MASK		0x0F
180 
181 /* ibs fetch bits/masks */
182 #define IBS_FETCH_RAND_EN	(1ULL<<57)
183 #define IBS_FETCH_VAL		(1ULL<<49)
184 #define IBS_FETCH_ENABLE	(1ULL<<48)
185 #define IBS_FETCH_CNT		0xFFFF0000ULL
186 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
187 
188 /* ibs op bits/masks */
189 /* lower 4 bits of the current count are ignored: */
190 #define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
191 #define IBS_OP_CNT_CTL		(1ULL<<19)
192 #define IBS_OP_VAL		(1ULL<<18)
193 #define IBS_OP_ENABLE		(1ULL<<17)
194 #define IBS_OP_MAX_CNT		0x0000FFFFULL
195 #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
196 #define IBS_RIP_INVALID		(1ULL<<38)
197 
198 #ifdef CONFIG_X86_LOCAL_APIC
199 extern u32 get_ibs_caps(void);
200 #else
get_ibs_caps(void)201 static inline u32 get_ibs_caps(void) { return 0; }
202 #endif
203 
204 #ifdef CONFIG_PERF_EVENTS
205 extern void perf_events_lapic_init(void);
206 
207 /*
208  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
209  * unused and ABI specified to be 0, so nobody should care what we do with
210  * them.
211  *
212  * EXACT - the IP points to the exact instruction that triggered the
213  *         event (HW bugs exempt).
214  * VM    - original X86_VM_MASK; see set_linear_ip().
215  */
216 #define PERF_EFLAGS_EXACT	(1UL << 3)
217 #define PERF_EFLAGS_VM		(1UL << 5)
218 
219 struct pt_regs;
220 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
221 extern unsigned long perf_misc_flags(struct pt_regs *regs);
222 #define perf_misc_flags(regs)	perf_misc_flags(regs)
223 
224 #include <asm/stacktrace.h>
225 
226 /*
227  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
228  * and the comment with PERF_EFLAGS_EXACT.
229  */
230 #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
231 	(regs)->ip = (__ip);					\
232 	(regs)->bp = caller_frame_pointer();			\
233 	(regs)->cs = __KERNEL_CS;				\
234 	regs->flags = 0;					\
235 	asm volatile(						\
236 		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
237 		: "=m" ((regs)->sp)				\
238 		:: "memory"					\
239 	);							\
240 }
241 
242 struct perf_guest_switch_msr {
243 	unsigned msr;
244 	u64 host, guest;
245 };
246 
247 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
248 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
249 extern void perf_check_microcode(void);
250 #else
perf_guest_get_msrs(int * nr)251 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
252 {
253 	*nr = 0;
254 	return NULL;
255 }
256 
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)257 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
258 {
259 	memset(cap, 0, sizeof(*cap));
260 }
261 
perf_events_lapic_init(void)262 static inline void perf_events_lapic_init(void)	{ }
perf_check_microcode(void)263 static inline void perf_check_microcode(void) { }
264 #endif
265 
266 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
267  extern void amd_pmu_enable_virt(void);
268  extern void amd_pmu_disable_virt(void);
269 #else
amd_pmu_enable_virt(void)270  static inline void amd_pmu_enable_virt(void) { }
amd_pmu_disable_virt(void)271  static inline void amd_pmu_disable_virt(void) { }
272 #endif
273 
274 #define arch_perf_out_copy_user copy_from_user_nmi
275 
276 #endif /* _ASM_X86_PERF_EVENT_H */
277