• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20 
21 /*
22  * User-space ABI bits:
23  */
24 
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29 	PERF_TYPE_HARDWARE			= 0,
30 	PERF_TYPE_SOFTWARE			= 1,
31 	PERF_TYPE_TRACEPOINT			= 2,
32 	PERF_TYPE_HW_CACHE			= 3,
33 	PERF_TYPE_RAW				= 4,
34 	PERF_TYPE_BREAKPOINT			= 5,
35 
36 	PERF_TYPE_MAX,				/* non-ABI */
37 };
38 
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45 	/*
46 	 * Common hardware events, generalized by the kernel:
47 	 */
48 	PERF_COUNT_HW_CPU_CYCLES		= 0,
49 	PERF_COUNT_HW_INSTRUCTIONS		= 1,
50 	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
51 	PERF_COUNT_HW_CACHE_MISSES		= 3,
52 	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
53 	PERF_COUNT_HW_BRANCH_MISSES		= 5,
54 	PERF_COUNT_HW_BUS_CYCLES		= 6,
55 	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
56 	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
57 	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
58 
59 	PERF_COUNT_HW_MAX,			/* non-ABI */
60 };
61 
62 /*
63  * Generalized hardware cache events:
64  *
65  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66  *       { read, write, prefetch } x
67  *       { accesses, misses }
68  */
69 enum perf_hw_cache_id {
70 	PERF_COUNT_HW_CACHE_L1D			= 0,
71 	PERF_COUNT_HW_CACHE_L1I			= 1,
72 	PERF_COUNT_HW_CACHE_LL			= 2,
73 	PERF_COUNT_HW_CACHE_DTLB		= 3,
74 	PERF_COUNT_HW_CACHE_ITLB		= 4,
75 	PERF_COUNT_HW_CACHE_BPU			= 5,
76 	PERF_COUNT_HW_CACHE_NODE		= 6,
77 
78 	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
79 };
80 
81 enum perf_hw_cache_op_id {
82 	PERF_COUNT_HW_CACHE_OP_READ		= 0,
83 	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
84 	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
85 
86 	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
87 };
88 
89 enum perf_hw_cache_op_result_id {
90 	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
91 	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
92 
93 	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
94 };
95 
96 /*
97  * Special "software" events provided by the kernel, even if the hardware
98  * does not support performance events. These events measure various
99  * physical and sw events of the kernel (and allow the profiling of them as
100  * well):
101  */
102 enum perf_sw_ids {
103 	PERF_COUNT_SW_CPU_CLOCK			= 0,
104 	PERF_COUNT_SW_TASK_CLOCK		= 1,
105 	PERF_COUNT_SW_PAGE_FAULTS		= 2,
106 	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
107 	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
108 	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
109 	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
110 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
111 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
112 
113 	PERF_COUNT_SW_MAX,			/* non-ABI */
114 };
115 
116 /*
117  * Bits that can be set in attr.sample_type to request information
118  * in the overflow packets.
119  */
120 enum perf_event_sample_format {
121 	PERF_SAMPLE_IP				= 1U << 0,
122 	PERF_SAMPLE_TID				= 1U << 1,
123 	PERF_SAMPLE_TIME			= 1U << 2,
124 	PERF_SAMPLE_ADDR			= 1U << 3,
125 	PERF_SAMPLE_READ			= 1U << 4,
126 	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
127 	PERF_SAMPLE_ID				= 1U << 6,
128 	PERF_SAMPLE_CPU				= 1U << 7,
129 	PERF_SAMPLE_PERIOD			= 1U << 8,
130 	PERF_SAMPLE_STREAM_ID			= 1U << 9,
131 	PERF_SAMPLE_RAW				= 1U << 10,
132 	PERF_SAMPLE_BRANCH_STACK		= 1U << 11,
133 
134 	PERF_SAMPLE_MAX = 1U << 12,		/* non-ABI */
135 };
136 
137 /*
138  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
139  *
140  * If the user does not pass priv level information via branch_sample_type,
141  * the kernel uses the event's priv level. Branch and event priv levels do
142  * not have to match. Branch priv level is checked for permissions.
143  *
144  * The branch types can be combined, however BRANCH_ANY covers all types
145  * of branches and therefore it supersedes all the other types.
146  */
147 enum perf_branch_sample_type {
148 	PERF_SAMPLE_BRANCH_USER		= 1U << 0, /* user branches */
149 	PERF_SAMPLE_BRANCH_KERNEL	= 1U << 1, /* kernel branches */
150 	PERF_SAMPLE_BRANCH_HV		= 1U << 2, /* hypervisor branches */
151 
152 	PERF_SAMPLE_BRANCH_ANY		= 1U << 3, /* any branch types */
153 	PERF_SAMPLE_BRANCH_ANY_CALL	= 1U << 4, /* any call branch */
154 	PERF_SAMPLE_BRANCH_ANY_RETURN	= 1U << 5, /* any return branch */
155 	PERF_SAMPLE_BRANCH_IND_CALL	= 1U << 6, /* indirect calls */
156 
157 	PERF_SAMPLE_BRANCH_MAX		= 1U << 7, /* non-ABI */
158 };
159 
160 #define PERF_SAMPLE_BRANCH_PLM_ALL \
161 	(PERF_SAMPLE_BRANCH_USER|\
162 	 PERF_SAMPLE_BRANCH_KERNEL|\
163 	 PERF_SAMPLE_BRANCH_HV)
164 
165 /*
166  * The format of the data returned by read() on a perf event fd,
167  * as specified by attr.read_format:
168  *
169  * struct read_format {
170  *	{ u64		value;
171  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
172  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
173  *	  { u64		id;           } && PERF_FORMAT_ID
174  *	} && !PERF_FORMAT_GROUP
175  *
176  *	{ u64		nr;
177  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
178  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
179  *	  { u64		value;
180  *	    { u64	id;           } && PERF_FORMAT_ID
181  *	  }		cntr[nr];
182  *	} && PERF_FORMAT_GROUP
183  * };
184  */
185 enum perf_event_read_format {
186 	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
187 	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
188 	PERF_FORMAT_ID				= 1U << 2,
189 	PERF_FORMAT_GROUP			= 1U << 3,
190 
191 	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
192 };
193 
194 #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
195 #define PERF_ATTR_SIZE_VER1	72	/* add: config2 */
196 #define PERF_ATTR_SIZE_VER2	80	/* add: branch_sample_type */
197 
198 /*
199  * Hardware event_id to monitor via a performance monitoring event:
200  */
201 struct perf_event_attr {
202 
203 	/*
204 	 * Major type: hardware/software/tracepoint/etc.
205 	 */
206 	__u32			type;
207 
208 	/*
209 	 * Size of the attr structure, for fwd/bwd compat.
210 	 */
211 	__u32			size;
212 
213 	/*
214 	 * Type specific configuration information.
215 	 */
216 	__u64			config;
217 
218 	union {
219 		__u64		sample_period;
220 		__u64		sample_freq;
221 	};
222 
223 	__u64			sample_type;
224 	__u64			read_format;
225 
226 	__u64			disabled       :  1, /* off by default        */
227 				inherit	       :  1, /* children inherit it   */
228 				pinned	       :  1, /* must always be on PMU */
229 				exclusive      :  1, /* only group on PMU     */
230 				exclude_user   :  1, /* don't count user      */
231 				exclude_kernel :  1, /* ditto kernel          */
232 				exclude_hv     :  1, /* ditto hypervisor      */
233 				exclude_idle   :  1, /* don't count when idle */
234 				mmap           :  1, /* include mmap data     */
235 				comm	       :  1, /* include comm data     */
236 				freq           :  1, /* use freq, not period  */
237 				inherit_stat   :  1, /* per task counts       */
238 				enable_on_exec :  1, /* next exec enables     */
239 				task           :  1, /* trace fork/exit       */
240 				watermark      :  1, /* wakeup_watermark      */
241 				/*
242 				 * precise_ip:
243 				 *
244 				 *  0 - SAMPLE_IP can have arbitrary skid
245 				 *  1 - SAMPLE_IP must have constant skid
246 				 *  2 - SAMPLE_IP requested to have 0 skid
247 				 *  3 - SAMPLE_IP must have 0 skid
248 				 *
249 				 *  See also PERF_RECORD_MISC_EXACT_IP
250 				 */
251 				precise_ip     :  2, /* skid constraint       */
252 				mmap_data      :  1, /* non-exec mmap data    */
253 				sample_id_all  :  1, /* sample_type all events */
254 
255 				exclude_host   :  1, /* don't count in host   */
256 				exclude_guest  :  1, /* don't count in guest  */
257 
258 				__reserved_1   : 43;
259 
260 	union {
261 		__u32		wakeup_events;	  /* wakeup every n events */
262 		__u32		wakeup_watermark; /* bytes before wakeup   */
263 	};
264 
265 	__u32			bp_type;
266 	union {
267 		__u64		bp_addr;
268 		__u64		config1; /* extension of config */
269 	};
270 	union {
271 		__u64		bp_len;
272 		__u64		config2; /* extension of config1 */
273 	};
274 	__u64	branch_sample_type; /* enum branch_sample_type */
275 };
276 
277 /*
278  * Ioctls that can be done on a perf event fd:
279  */
280 #define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
281 #define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
282 #define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
283 #define PERF_EVENT_IOC_RESET		_IO ('$', 3)
284 #define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
285 #define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
286 #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
287 
288 enum perf_event_ioc_flags {
289 	PERF_IOC_FLAG_GROUP		= 1U << 0,
290 };
291 
292 /*
293  * Structure of the page that can be mapped via mmap
294  */
295 struct perf_event_mmap_page {
296 	__u32	version;		/* version number of this structure */
297 	__u32	compat_version;		/* lowest version this is compat with */
298 
299 	/*
300 	 * Bits needed to read the hw events in user-space.
301 	 *
302 	 *   u32 seq, time_mult, time_shift, idx, width;
303 	 *   u64 count, enabled, running;
304 	 *   u64 cyc, time_offset;
305 	 *   s64 pmc = 0;
306 	 *
307 	 *   do {
308 	 *     seq = pc->lock;
309 	 *     barrier()
310 	 *
311 	 *     enabled = pc->time_enabled;
312 	 *     running = pc->time_running;
313 	 *
314 	 *     if (pc->cap_usr_time && enabled != running) {
315 	 *       cyc = rdtsc();
316 	 *       time_offset = pc->time_offset;
317 	 *       time_mult   = pc->time_mult;
318 	 *       time_shift  = pc->time_shift;
319 	 *     }
320 	 *
321 	 *     idx = pc->index;
322 	 *     count = pc->offset;
323 	 *     if (pc->cap_usr_rdpmc && idx) {
324 	 *       width = pc->pmc_width;
325 	 *       pmc = rdpmc(idx - 1);
326 	 *     }
327 	 *
328 	 *     barrier();
329 	 *   } while (pc->lock != seq);
330 	 *
331 	 * NOTE: for obvious reason this only works on self-monitoring
332 	 *       processes.
333 	 */
334 	__u32	lock;			/* seqlock for synchronization */
335 	__u32	index;			/* hardware event identifier */
336 	__s64	offset;			/* add to hardware event value */
337 	__u64	time_enabled;		/* time event active */
338 	__u64	time_running;		/* time event on cpu */
339 	union {
340 		__u64	capabilities;
341 		__u64	cap_usr_time  : 1,
342 			cap_usr_rdpmc : 1,
343 			cap_____res   : 62;
344 	};
345 
346 	/*
347 	 * If cap_usr_rdpmc this field provides the bit-width of the value
348 	 * read using the rdpmc() or equivalent instruction. This can be used
349 	 * to sign extend the result like:
350 	 *
351 	 *   pmc <<= 64 - width;
352 	 *   pmc >>= 64 - width; // signed shift right
353 	 *   count += pmc;
354 	 */
355 	__u16	pmc_width;
356 
357 	/*
358 	 * If cap_usr_time the below fields can be used to compute the time
359 	 * delta since time_enabled (in ns) using rdtsc or similar.
360 	 *
361 	 *   u64 quot, rem;
362 	 *   u64 delta;
363 	 *
364 	 *   quot = (cyc >> time_shift);
365 	 *   rem = cyc & ((1 << time_shift) - 1);
366 	 *   delta = time_offset + quot * time_mult +
367 	 *              ((rem * time_mult) >> time_shift);
368 	 *
369 	 * Where time_offset,time_mult,time_shift and cyc are read in the
370 	 * seqcount loop described above. This delta can then be added to
371 	 * enabled and possible running (if idx), improving the scaling:
372 	 *
373 	 *   enabled += delta;
374 	 *   if (idx)
375 	 *     running += delta;
376 	 *
377 	 *   quot = count / running;
378 	 *   rem  = count % running;
379 	 *   count = quot * enabled + (rem * enabled) / running;
380 	 */
381 	__u16	time_shift;
382 	__u32	time_mult;
383 	__u64	time_offset;
384 
385 		/*
386 		 * Hole for extension of the self monitor capabilities
387 		 */
388 
389 	__u64	__reserved[120];	/* align to 1k */
390 
391 	/*
392 	 * Control data for the mmap() data buffer.
393 	 *
394 	 * User-space reading the @data_head value should issue an rmb(), on
395 	 * SMP capable platforms, after reading this value -- see
396 	 * perf_event_wakeup().
397 	 *
398 	 * When the mapping is PROT_WRITE the @data_tail value should be
399 	 * written by userspace to reflect the last read data. In this case
400 	 * the kernel will not over-write unread data.
401 	 */
402 	__u64   data_head;		/* head in the data section */
403 	__u64	data_tail;		/* user-space written tail */
404 };
405 
406 #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
407 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
408 #define PERF_RECORD_MISC_KERNEL			(1 << 0)
409 #define PERF_RECORD_MISC_USER			(2 << 0)
410 #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
411 #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
412 #define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
413 
414 /*
415  * Indicates that the content of PERF_SAMPLE_IP points to
416  * the actual instruction that triggered the event. See also
417  * perf_event_attr::precise_ip.
418  */
419 #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
420 /*
421  * Reserve the last bit to indicate some extended misc field
422  */
423 #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)
424 
425 struct perf_event_header {
426 	__u32	type;
427 	__u16	misc;
428 	__u16	size;
429 };
430 
431 enum perf_event_type {
432 
433 	/*
434 	 * If perf_event_attr.sample_id_all is set then all event types will
435 	 * have the sample_type selected fields related to where/when
436 	 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
437 	 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
438 	 * the perf_event_header and the fields already present for the existing
439 	 * fields, i.e. at the end of the payload. That way a newer perf.data
440 	 * file will be supported by older perf tools, with these new optional
441 	 * fields being ignored.
442 	 *
443 	 * The MMAP events record the PROT_EXEC mappings so that we can
444 	 * correlate userspace IPs to code. They have the following structure:
445 	 *
446 	 * struct {
447 	 *	struct perf_event_header	header;
448 	 *
449 	 *	u32				pid, tid;
450 	 *	u64				addr;
451 	 *	u64				len;
452 	 *	u64				pgoff;
453 	 *	char				filename[];
454 	 * };
455 	 */
456 	PERF_RECORD_MMAP			= 1,
457 
458 	/*
459 	 * struct {
460 	 *	struct perf_event_header	header;
461 	 *	u64				id;
462 	 *	u64				lost;
463 	 * };
464 	 */
465 	PERF_RECORD_LOST			= 2,
466 
467 	/*
468 	 * struct {
469 	 *	struct perf_event_header	header;
470 	 *
471 	 *	u32				pid, tid;
472 	 *	char				comm[];
473 	 * };
474 	 */
475 	PERF_RECORD_COMM			= 3,
476 
477 	/*
478 	 * struct {
479 	 *	struct perf_event_header	header;
480 	 *	u32				pid, ppid;
481 	 *	u32				tid, ptid;
482 	 *	u64				time;
483 	 * };
484 	 */
485 	PERF_RECORD_EXIT			= 4,
486 
487 	/*
488 	 * struct {
489 	 *	struct perf_event_header	header;
490 	 *	u64				time;
491 	 *	u64				id;
492 	 *	u64				stream_id;
493 	 * };
494 	 */
495 	PERF_RECORD_THROTTLE			= 5,
496 	PERF_RECORD_UNTHROTTLE			= 6,
497 
498 	/*
499 	 * struct {
500 	 *	struct perf_event_header	header;
501 	 *	u32				pid, ppid;
502 	 *	u32				tid, ptid;
503 	 *	u64				time;
504 	 * };
505 	 */
506 	PERF_RECORD_FORK			= 7,
507 
508 	/*
509 	 * struct {
510 	 *	struct perf_event_header	header;
511 	 *	u32				pid, tid;
512 	 *
513 	 *	struct read_format		values;
514 	 * };
515 	 */
516 	PERF_RECORD_READ			= 8,
517 
518 	/*
519 	 * struct {
520 	 *	struct perf_event_header	header;
521 	 *
522 	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
523 	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
524 	 *	{ u64			time;     } && PERF_SAMPLE_TIME
525 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
526 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
527 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
528 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
529 	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
530 	 *
531 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
532 	 *
533 	 *	{ u64			nr,
534 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
535 	 *
536 	 *	#
537 	 *	# The RAW record below is opaque data wrt the ABI
538 	 *	#
539 	 *	# That is, the ABI doesn't make any promises wrt to
540 	 *	# the stability of its content, it may vary depending
541 	 *	# on event, hardware, kernel version and phase of
542 	 *	# the moon.
543 	 *	#
544 	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
545 	 *	#
546 	 *
547 	 *	{ u32			size;
548 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
549 	 *
550 	 *	{ u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
551 	 * };
552 	 */
553 	PERF_RECORD_SAMPLE			= 9,
554 
555 	PERF_RECORD_MAX,			/* non-ABI */
556 };
557 
558 enum perf_callchain_context {
559 	PERF_CONTEXT_HV			= (__u64)-32,
560 	PERF_CONTEXT_KERNEL		= (__u64)-128,
561 	PERF_CONTEXT_USER		= (__u64)-512,
562 
563 	PERF_CONTEXT_GUEST		= (__u64)-2048,
564 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
565 	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,
566 
567 	PERF_CONTEXT_MAX		= (__u64)-4095,
568 };
569 
570 #define PERF_FLAG_FD_NO_GROUP		(1U << 0)
571 #define PERF_FLAG_FD_OUTPUT		(1U << 1)
572 #define PERF_FLAG_PID_CGROUP		(1U << 2) /* pid=cgroup id, per-cpu mode only */
573 
574 #ifdef __KERNEL__
575 /*
576  * Kernel-internal data types and definitions:
577  */
578 
579 #ifdef CONFIG_PERF_EVENTS
580 # include <linux/cgroup.h>
581 # include <asm/perf_event.h>
582 # include <asm/local64.h>
583 #endif
584 
585 struct perf_guest_info_callbacks {
586 	int				(*is_in_guest)(void);
587 	int				(*is_user_mode)(void);
588 	unsigned long			(*get_guest_ip)(void);
589 };
590 
591 #ifdef CONFIG_HAVE_HW_BREAKPOINT
592 #include <asm/hw_breakpoint.h>
593 #endif
594 
595 #include <linux/list.h>
596 #include <linux/mutex.h>
597 #include <linux/rculist.h>
598 #include <linux/rcupdate.h>
599 #include <linux/spinlock.h>
600 #include <linux/hrtimer.h>
601 #include <linux/fs.h>
602 #include <linux/pid_namespace.h>
603 #include <linux/workqueue.h>
604 #include <linux/ftrace.h>
605 #include <linux/cpu.h>
606 #include <linux/irq_work.h>
607 #include <linux/static_key.h>
608 #include <linux/atomic.h>
609 #include <linux/sysfs.h>
610 #include <asm/local.h>
611 
612 #define PERF_MAX_STACK_DEPTH		255
613 
614 struct perf_callchain_entry {
615 	__u64				nr;
616 	__u64				ip[PERF_MAX_STACK_DEPTH];
617 };
618 
619 struct perf_raw_record {
620 	u32				size;
621 	void				*data;
622 };
623 
624 /*
625  * single taken branch record layout:
626  *
627  *      from: source instruction (may not always be a branch insn)
628  *        to: branch target
629  *   mispred: branch target was mispredicted
630  * predicted: branch target was predicted
631  *
632  * support for mispred, predicted is optional. In case it
633  * is not supported mispred = predicted = 0.
634  */
635 struct perf_branch_entry {
636 	__u64	from;
637 	__u64	to;
638 	__u64	mispred:1,  /* target mispredicted */
639 		predicted:1,/* target predicted */
640 		reserved:62;
641 };
642 
643 /*
644  * branch stack layout:
645  *  nr: number of taken branches stored in entries[]
646  *
647  * Note that nr can vary from sample to sample
648  * branches (to, from) are stored from most recent
649  * to least recent, i.e., entries[0] contains the most
650  * recent branch.
651  */
652 struct perf_branch_stack {
653 	__u64				nr;
654 	struct perf_branch_entry	entries[0];
655 };
656 
657 struct task_struct;
658 
659 /*
660  * extra PMU register associated with an event
661  */
662 struct hw_perf_event_extra {
663 	u64		config;	/* register value */
664 	unsigned int	reg;	/* register address or index */
665 	int		alloc;	/* extra register already allocated */
666 	int		idx;	/* index in shared_regs->regs[] */
667 };
668 
669 /**
670  * struct hw_perf_event - performance event hardware details:
671  */
672 struct hw_perf_event {
673 #ifdef CONFIG_PERF_EVENTS
674 	union {
675 		struct { /* hardware */
676 			u64		config;
677 			u64		last_tag;
678 			unsigned long	config_base;
679 			unsigned long	event_base;
680 			int		idx;
681 			int		last_cpu;
682 
683 			struct hw_perf_event_extra extra_reg;
684 			struct hw_perf_event_extra branch_reg;
685 		};
686 		struct { /* software */
687 			struct hrtimer	hrtimer;
688 		};
689 #ifdef CONFIG_HAVE_HW_BREAKPOINT
690 		struct { /* breakpoint */
691 			struct arch_hw_breakpoint	info;
692 			struct list_head		bp_list;
693 			/*
694 			 * Crufty hack to avoid the chicken and egg
695 			 * problem hw_breakpoint has with context
696 			 * creation and event initalization.
697 			 */
698 			struct task_struct		*bp_target;
699 		};
700 #endif
701 	};
702 	int				state;
703 	local64_t			prev_count;
704 	u64				sample_period;
705 	u64				last_period;
706 	local64_t			period_left;
707 	u64                             interrupts_seq;
708 	u64				interrupts;
709 
710 	u64				freq_time_stamp;
711 	u64				freq_count_stamp;
712 #endif
713 };
714 
715 /*
716  * hw_perf_event::state flags
717  */
718 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
719 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
720 #define PERF_HES_ARCH		0x04
721 
722 struct perf_event;
723 
724 /*
725  * Common implementation detail of pmu::{start,commit,cancel}_txn
726  */
727 #define PERF_EVENT_TXN 0x1
728 
729 /**
730  * struct pmu - generic performance monitoring unit
731  */
732 struct pmu {
733 	struct list_head		entry;
734 
735 	struct device			*dev;
736 	const struct attribute_group	**attr_groups;
737 	char				*name;
738 	int				type;
739 
740 	int * __percpu			pmu_disable_count;
741 	struct perf_cpu_context * __percpu pmu_cpu_context;
742 	int				task_ctx_nr;
743 
744 	/*
745 	 * Fully disable/enable this PMU, can be used to protect from the PMI
746 	 * as well as for lazy/batch writing of the MSRs.
747 	 */
748 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
749 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
750 
751 	/*
752 	 * Try and initialize the event for this PMU.
753 	 * Should return -ENOENT when the @event doesn't match this PMU.
754 	 */
755 	int (*event_init)		(struct perf_event *event);
756 
757 #define PERF_EF_START	0x01		/* start the counter when adding    */
758 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
759 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
760 
761 	/*
762 	 * Adds/Removes a counter to/from the PMU, can be done inside
763 	 * a transaction, see the ->*_txn() methods.
764 	 */
765 	int  (*add)			(struct perf_event *event, int flags);
766 	void (*del)			(struct perf_event *event, int flags);
767 
768 	/*
769 	 * Starts/Stops a counter present on the PMU. The PMI handler
770 	 * should stop the counter when perf_event_overflow() returns
771 	 * !0. ->start() will be used to continue.
772 	 */
773 	void (*start)			(struct perf_event *event, int flags);
774 	void (*stop)			(struct perf_event *event, int flags);
775 
776 	/*
777 	 * Updates the counter value of the event.
778 	 */
779 	void (*read)			(struct perf_event *event);
780 
781 	/*
782 	 * Group events scheduling is treated as a transaction, add
783 	 * group events as a whole and perform one schedulability test.
784 	 * If the test fails, roll back the whole group
785 	 *
786 	 * Start the transaction, after this ->add() doesn't need to
787 	 * do schedulability tests.
788 	 */
789 	void (*start_txn)		(struct pmu *pmu); /* optional */
790 	/*
791 	 * If ->start_txn() disabled the ->add() schedulability test
792 	 * then ->commit_txn() is required to perform one. On success
793 	 * the transaction is closed. On error the transaction is kept
794 	 * open until ->cancel_txn() is called.
795 	 */
796 	int  (*commit_txn)		(struct pmu *pmu); /* optional */
797 	/*
798 	 * Will cancel the transaction, assumes ->del() is called
799 	 * for each successful ->add() during the transaction.
800 	 */
801 	void (*cancel_txn)		(struct pmu *pmu); /* optional */
802 
803 	/*
804 	 * Will return the value for perf_event_mmap_page::index for this event,
805 	 * if no implementation is provided it will default to: event->hw.idx + 1.
806 	 */
807 	int (*event_idx)		(struct perf_event *event); /*optional */
808 
809 	/*
810 	 * flush branch stack on context-switches (needed in cpu-wide mode)
811 	 */
812 	void (*flush_branch_stack)	(void);
813 };
814 
815 /**
816  * enum perf_event_active_state - the states of a event
817  */
818 enum perf_event_active_state {
819 	PERF_EVENT_STATE_ERROR		= -2,
820 	PERF_EVENT_STATE_OFF		= -1,
821 	PERF_EVENT_STATE_INACTIVE	=  0,
822 	PERF_EVENT_STATE_ACTIVE		=  1,
823 };
824 
825 struct file;
826 struct perf_sample_data;
827 
828 typedef void (*perf_overflow_handler_t)(struct perf_event *,
829 					struct perf_sample_data *,
830 					struct pt_regs *regs);
831 
832 enum perf_group_flag {
833 	PERF_GROUP_SOFTWARE		= 0x1,
834 };
835 
836 #define SWEVENT_HLIST_BITS		8
837 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
838 
839 struct swevent_hlist {
840 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
841 	struct rcu_head			rcu_head;
842 };
843 
844 #define PERF_ATTACH_CONTEXT	0x01
845 #define PERF_ATTACH_GROUP	0x02
846 #define PERF_ATTACH_TASK	0x04
847 
848 #ifdef CONFIG_CGROUP_PERF
849 /*
850  * perf_cgroup_info keeps track of time_enabled for a cgroup.
851  * This is a per-cpu dynamically allocated data structure.
852  */
853 struct perf_cgroup_info {
854 	u64				time;
855 	u64				timestamp;
856 };
857 
858 struct perf_cgroup {
859 	struct				cgroup_subsys_state css;
860 	struct				perf_cgroup_info *info;	/* timing info, one per cpu */
861 };
862 #endif
863 
864 struct ring_buffer;
865 
866 /**
867  * struct perf_event - performance event kernel representation:
868  */
869 struct perf_event {
870 #ifdef CONFIG_PERF_EVENTS
871 	struct list_head		group_entry;
872 	struct list_head		event_entry;
873 	struct list_head		sibling_list;
874 	struct hlist_node		hlist_entry;
875 	int				nr_siblings;
876 	int				group_flags;
877 	struct perf_event		*group_leader;
878 	struct pmu			*pmu;
879 
880 	enum perf_event_active_state	state;
881 	unsigned int			attach_state;
882 	local64_t			count;
883 	atomic64_t			child_count;
884 
885 	/*
886 	 * These are the total time in nanoseconds that the event
887 	 * has been enabled (i.e. eligible to run, and the task has
888 	 * been scheduled in, if this is a per-task event)
889 	 * and running (scheduled onto the CPU), respectively.
890 	 *
891 	 * They are computed from tstamp_enabled, tstamp_running and
892 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
893 	 */
894 	u64				total_time_enabled;
895 	u64				total_time_running;
896 
897 	/*
898 	 * These are timestamps used for computing total_time_enabled
899 	 * and total_time_running when the event is in INACTIVE or
900 	 * ACTIVE state, measured in nanoseconds from an arbitrary point
901 	 * in time.
902 	 * tstamp_enabled: the notional time when the event was enabled
903 	 * tstamp_running: the notional time when the event was scheduled on
904 	 * tstamp_stopped: in INACTIVE state, the notional time when the
905 	 *	event was scheduled off.
906 	 */
907 	u64				tstamp_enabled;
908 	u64				tstamp_running;
909 	u64				tstamp_stopped;
910 
911 	/*
912 	 * timestamp shadows the actual context timing but it can
913 	 * be safely used in NMI interrupt context. It reflects the
914 	 * context time as it was when the event was last scheduled in.
915 	 *
916 	 * ctx_time already accounts for ctx->timestamp. Therefore to
917 	 * compute ctx_time for a sample, simply add perf_clock().
918 	 */
919 	u64				shadow_ctx_time;
920 
921 	struct perf_event_attr		attr;
922 	u16				header_size;
923 	u16				id_header_size;
924 	u16				read_size;
925 	struct hw_perf_event		hw;
926 
927 	struct perf_event_context	*ctx;
928 	atomic_long_t			refcount;
929 
930 	/*
931 	 * These accumulate total time (in nanoseconds) that children
932 	 * events have been enabled and running, respectively.
933 	 */
934 	atomic64_t			child_total_time_enabled;
935 	atomic64_t			child_total_time_running;
936 
937 	/*
938 	 * Protect attach/detach and child_list:
939 	 */
940 	struct mutex			child_mutex;
941 	struct list_head		child_list;
942 	struct perf_event		*parent;
943 
944 	int				oncpu;
945 	int				cpu;
946 
947 	struct list_head		owner_entry;
948 	struct task_struct		*owner;
949 
950 	/* mmap bits */
951 	struct mutex			mmap_mutex;
952 	atomic_t			mmap_count;
953 
954 	struct ring_buffer		*rb;
955 	struct list_head		rb_entry;
956 
957 	/* poll related */
958 	wait_queue_head_t		waitq;
959 	struct fasync_struct		*fasync;
960 
961 	/* delayed work for NMIs and such */
962 	int				pending_wakeup;
963 	int				pending_kill;
964 	int				pending_disable;
965 	struct irq_work			pending;
966 
967 	atomic_t			event_limit;
968 
969 	void (*destroy)(struct perf_event *);
970 	struct rcu_head			rcu_head;
971 
972 	struct pid_namespace		*ns;
973 	u64				id;
974 
975 	perf_overflow_handler_t		overflow_handler;
976 	void				*overflow_handler_context;
977 
978 #ifdef CONFIG_EVENT_TRACING
979 	struct ftrace_event_call	*tp_event;
980 	struct event_filter		*filter;
981 #ifdef CONFIG_FUNCTION_TRACER
982 	struct ftrace_ops               ftrace_ops;
983 #endif
984 #endif
985 
986 #ifdef CONFIG_CGROUP_PERF
987 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
988 	int				cgrp_defer_enabled;
989 #endif
990 
991 #endif /* CONFIG_PERF_EVENTS */
992 };
993 
994 enum perf_event_context_type {
995 	task_context,
996 	cpu_context,
997 };
998 
999 /**
1000  * struct perf_event_context - event context structure
1001  *
1002  * Used as a container for task events and CPU events as well:
1003  */
1004 struct perf_event_context {
1005 	struct pmu			*pmu;
1006 	enum perf_event_context_type	type;
1007 	/*
1008 	 * Protect the states of the events in the list,
1009 	 * nr_active, and the list:
1010 	 */
1011 	raw_spinlock_t			lock;
1012 	/*
1013 	 * Protect the list of events.  Locking either mutex or lock
1014 	 * is sufficient to ensure the list doesn't change; to change
1015 	 * the list you need to lock both the mutex and the spinlock.
1016 	 */
1017 	struct mutex			mutex;
1018 
1019 	struct list_head		pinned_groups;
1020 	struct list_head		flexible_groups;
1021 	struct list_head		event_list;
1022 	int				nr_events;
1023 	int				nr_active;
1024 	int				is_active;
1025 	int				nr_stat;
1026 	int				nr_freq;
1027 	int				rotate_disable;
1028 	atomic_t			refcount;
1029 	struct task_struct		*task;
1030 
1031 	/*
1032 	 * Context clock, runs when context enabled.
1033 	 */
1034 	u64				time;
1035 	u64				timestamp;
1036 
1037 	/*
1038 	 * These fields let us detect when two contexts have both
1039 	 * been cloned (inherited) from a common ancestor.
1040 	 */
1041 	struct perf_event_context	*parent_ctx;
1042 	u64				parent_gen;
1043 	u64				generation;
1044 	int				pin_count;
1045 	int				nr_cgroups;	 /* cgroup evts */
1046 	int				nr_branch_stack; /* branch_stack evt */
1047 	struct rcu_head			rcu_head;
1048 };
1049 
1050 /*
1051  * Number of contexts where an event can trigger:
1052  *	task, softirq, hardirq, nmi.
1053  */
1054 #define PERF_NR_CONTEXTS	4
1055 
1056 /**
1057  * struct perf_event_cpu_context - per cpu event context structure
1058  */
1059 struct perf_cpu_context {
1060 	struct perf_event_context	ctx;
1061 	struct perf_event_context	*task_ctx;
1062 	int				active_oncpu;
1063 	int				exclusive;
1064 	struct list_head		rotation_list;
1065 	int				jiffies_interval;
1066 	struct pmu			*unique_pmu;
1067 	struct perf_cgroup		*cgrp;
1068 };
1069 
1070 struct perf_output_handle {
1071 	struct perf_event		*event;
1072 	struct ring_buffer		*rb;
1073 	unsigned long			wakeup;
1074 	unsigned long			size;
1075 	void				*addr;
1076 	int				page;
1077 };
1078 
1079 #ifdef CONFIG_PERF_EVENTS
1080 
1081 extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
1082 extern void perf_pmu_unregister(struct pmu *pmu);
1083 
1084 extern int perf_num_counters(void);
1085 extern const char *perf_pmu_name(void);
1086 extern void __perf_event_task_sched_in(struct task_struct *prev,
1087 				       struct task_struct *task);
1088 extern void __perf_event_task_sched_out(struct task_struct *prev,
1089 					struct task_struct *next);
1090 extern int perf_event_init_task(struct task_struct *child);
1091 extern void perf_event_exit_task(struct task_struct *child);
1092 extern void perf_event_free_task(struct task_struct *task);
1093 extern void perf_event_delayed_put(struct task_struct *task);
1094 extern void perf_event_print_debug(void);
1095 extern void perf_pmu_disable(struct pmu *pmu);
1096 extern void perf_pmu_enable(struct pmu *pmu);
1097 extern int perf_event_task_disable(void);
1098 extern int perf_event_task_enable(void);
1099 extern int perf_event_refresh(struct perf_event *event, int refresh);
1100 extern void perf_event_update_userpage(struct perf_event *event);
1101 extern int perf_event_release_kernel(struct perf_event *event);
1102 extern struct perf_event *
1103 perf_event_create_kernel_counter(struct perf_event_attr *attr,
1104 				int cpu,
1105 				struct task_struct *task,
1106 				perf_overflow_handler_t callback,
1107 				void *context);
1108 extern u64 perf_event_read_value(struct perf_event *event,
1109 				 u64 *enabled, u64 *running);
1110 
1111 
1112 struct perf_sample_data {
1113 	u64				type;
1114 
1115 	u64				ip;
1116 	struct {
1117 		u32	pid;
1118 		u32	tid;
1119 	}				tid_entry;
1120 	u64				time;
1121 	u64				addr;
1122 	u64				id;
1123 	u64				stream_id;
1124 	struct {
1125 		u32	cpu;
1126 		u32	reserved;
1127 	}				cpu_entry;
1128 	u64				period;
1129 	struct perf_callchain_entry	*callchain;
1130 	struct perf_raw_record		*raw;
1131 	struct perf_branch_stack	*br_stack;
1132 };
1133 
perf_sample_data_init(struct perf_sample_data * data,u64 addr)1134 static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1135 {
1136 	data->addr = addr;
1137 	data->raw  = NULL;
1138 	data->br_stack = NULL;
1139 }
1140 
1141 extern void perf_output_sample(struct perf_output_handle *handle,
1142 			       struct perf_event_header *header,
1143 			       struct perf_sample_data *data,
1144 			       struct perf_event *event);
1145 extern void perf_prepare_sample(struct perf_event_header *header,
1146 				struct perf_sample_data *data,
1147 				struct perf_event *event,
1148 				struct pt_regs *regs);
1149 
1150 extern int perf_event_overflow(struct perf_event *event,
1151 				 struct perf_sample_data *data,
1152 				 struct pt_regs *regs);
1153 
is_sampling_event(struct perf_event * event)1154 static inline bool is_sampling_event(struct perf_event *event)
1155 {
1156 	return event->attr.sample_period != 0;
1157 }
1158 
1159 /*
1160  * Return 1 for a software event, 0 for a hardware event
1161  */
is_software_event(struct perf_event * event)1162 static inline int is_software_event(struct perf_event *event)
1163 {
1164 	return event->pmu->task_ctx_nr == perf_sw_context;
1165 }
1166 
1167 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1168 
1169 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1170 
1171 #ifndef perf_arch_fetch_caller_regs
perf_arch_fetch_caller_regs(struct pt_regs * regs,unsigned long ip)1172 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1173 #endif
1174 
1175 /*
1176  * Take a snapshot of the regs. Skip ip and frame pointer to
1177  * the nth caller. We only need a few of the regs:
1178  * - ip for PERF_SAMPLE_IP
1179  * - cs for user_mode() tests
1180  * - bp for callchains
1181  * - eflags, for future purposes, just in case
1182  */
perf_fetch_caller_regs(struct pt_regs * regs)1183 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1184 {
1185 	memset(regs, 0, sizeof(*regs));
1186 
1187 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1188 }
1189 
1190 static __always_inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1191 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1192 {
1193 	struct pt_regs hot_regs;
1194 
1195 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1196 		if (!regs) {
1197 			perf_fetch_caller_regs(&hot_regs);
1198 			regs = &hot_regs;
1199 		}
1200 		__perf_sw_event(event_id, nr, regs, addr);
1201 	}
1202 }
1203 
1204 extern struct static_key_deferred perf_sched_events;
1205 
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1206 static inline void perf_event_task_sched_in(struct task_struct *prev,
1207 					    struct task_struct *task)
1208 {
1209 	if (static_key_false(&perf_sched_events.key))
1210 		__perf_event_task_sched_in(prev, task);
1211 }
1212 
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1213 static inline void perf_event_task_sched_out(struct task_struct *prev,
1214 					     struct task_struct *next)
1215 {
1216 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1217 
1218 	if (static_key_false(&perf_sched_events.key))
1219 		__perf_event_task_sched_out(prev, next);
1220 }
1221 
1222 extern void perf_event_mmap(struct vm_area_struct *vma);
1223 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1224 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1225 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1226 
1227 extern void perf_event_comm(struct task_struct *tsk);
1228 extern void perf_event_fork(struct task_struct *tsk);
1229 
1230 /* Callchains */
1231 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1232 
1233 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1234 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1235 
perf_callchain_store(struct perf_callchain_entry * entry,u64 ip)1236 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1237 {
1238 	if (entry->nr < PERF_MAX_STACK_DEPTH)
1239 		entry->ip[entry->nr++] = ip;
1240 }
1241 
1242 extern int sysctl_perf_event_paranoid;
1243 extern int sysctl_perf_event_mlock;
1244 extern int sysctl_perf_event_sample_rate;
1245 
1246 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1247 		void __user *buffer, size_t *lenp,
1248 		loff_t *ppos);
1249 
perf_paranoid_any(void)1250 static inline bool perf_paranoid_any(void)
1251 {
1252 	return sysctl_perf_event_paranoid > 2;
1253 }
1254 
perf_paranoid_tracepoint_raw(void)1255 static inline bool perf_paranoid_tracepoint_raw(void)
1256 {
1257 	return sysctl_perf_event_paranoid > -1;
1258 }
1259 
perf_paranoid_cpu(void)1260 static inline bool perf_paranoid_cpu(void)
1261 {
1262 	return sysctl_perf_event_paranoid > 0;
1263 }
1264 
perf_paranoid_kernel(void)1265 static inline bool perf_paranoid_kernel(void)
1266 {
1267 	return sysctl_perf_event_paranoid > 1;
1268 }
1269 
1270 extern void perf_event_init(void);
1271 extern void perf_tp_event(u64 addr, u64 count, void *record,
1272 			  int entry_size, struct pt_regs *regs,
1273 			  struct hlist_head *head, int rctx);
1274 extern void perf_bp_event(struct perf_event *event, void *data);
1275 
1276 #ifndef perf_misc_flags
1277 # define perf_misc_flags(regs) \
1278 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1279 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1280 #endif
1281 
has_branch_stack(struct perf_event * event)1282 static inline bool has_branch_stack(struct perf_event *event)
1283 {
1284 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1285 }
1286 
1287 extern int perf_output_begin(struct perf_output_handle *handle,
1288 			     struct perf_event *event, unsigned int size);
1289 extern void perf_output_end(struct perf_output_handle *handle);
1290 extern void perf_output_copy(struct perf_output_handle *handle,
1291 			     const void *buf, unsigned int len);
1292 extern int perf_swevent_get_recursion_context(void);
1293 extern void perf_swevent_put_recursion_context(int rctx);
1294 extern void perf_event_enable(struct perf_event *event);
1295 extern void perf_event_disable(struct perf_event *event);
1296 extern void perf_event_task_tick(void);
1297 #else
1298 static inline void
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1299 perf_event_task_sched_in(struct task_struct *prev,
1300 			 struct task_struct *task)			{ }
1301 static inline void
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1302 perf_event_task_sched_out(struct task_struct *prev,
1303 			  struct task_struct *next)			{ }
perf_event_init_task(struct task_struct * child)1304 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
perf_event_exit_task(struct task_struct * child)1305 static inline void perf_event_exit_task(struct task_struct *child)	{ }
perf_event_free_task(struct task_struct * task)1306 static inline void perf_event_free_task(struct task_struct *task)	{ }
perf_event_delayed_put(struct task_struct * task)1307 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
perf_event_print_debug(void)1308 static inline void perf_event_print_debug(void)				{ }
perf_event_task_disable(void)1309 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
perf_event_task_enable(void)1310 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
perf_event_refresh(struct perf_event * event,int refresh)1311 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1312 {
1313 	return -EINVAL;
1314 }
1315 
1316 static inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1317 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1318 static inline void
perf_bp_event(struct perf_event * event,void * data)1319 perf_bp_event(struct perf_event *event, void *data)			{ }
1320 
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1321 static inline int perf_register_guest_info_callbacks
1322 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1323 static inline int perf_unregister_guest_info_callbacks
1324 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1325 
perf_event_mmap(struct vm_area_struct * vma)1326 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
perf_event_comm(struct task_struct * tsk)1327 static inline void perf_event_comm(struct task_struct *tsk)		{ }
perf_event_fork(struct task_struct * tsk)1328 static inline void perf_event_fork(struct task_struct *tsk)		{ }
perf_event_init(void)1329 static inline void perf_event_init(void)				{ }
perf_swevent_get_recursion_context(void)1330 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
perf_swevent_put_recursion_context(int rctx)1331 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
perf_event_enable(struct perf_event * event)1332 static inline void perf_event_enable(struct perf_event *event)		{ }
perf_event_disable(struct perf_event * event)1333 static inline void perf_event_disable(struct perf_event *event)		{ }
perf_event_task_tick(void)1334 static inline void perf_event_task_tick(void)				{ }
1335 #endif
1336 
1337 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1338 extern void perf_restore_debug_store(void);
1339 #else
perf_restore_debug_store(void)1340 static inline void perf_restore_debug_store(void)			{ }
1341 #endif
1342 
1343 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1344 
1345 /*
1346  * This has to have a higher priority than migration_notifier in sched.c.
1347  */
1348 #define perf_cpu_notifier(fn)						\
1349 do {									\
1350 	static struct notifier_block fn##_nb __cpuinitdata =		\
1351 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
1352 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
1353 		(void *)(unsigned long)smp_processor_id());		\
1354 	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
1355 		(void *)(unsigned long)smp_processor_id());		\
1356 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
1357 		(void *)(unsigned long)smp_processor_id());		\
1358 	register_cpu_notifier(&fn##_nb);				\
1359 } while (0)
1360 
1361 
1362 #define PMU_FORMAT_ATTR(_name, _format)					\
1363 static ssize_t								\
1364 _name##_show(struct device *dev,					\
1365 			       struct device_attribute *attr,		\
1366 			       char *page)				\
1367 {									\
1368 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1369 	return sprintf(page, _format "\n");				\
1370 }									\
1371 									\
1372 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1373 
1374 #endif /* __KERNEL__ */
1375 #endif /* _LINUX_PERF_EVENT_H */
1376