• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <uapi/linux/perf_event.h>
18 #include <uapi/linux/bpf_perf_event.h>
19 
20 /*
21  * Kernel-internal data types and definitions:
22  */
23 
24 #ifdef CONFIG_PERF_EVENTS
25 # include <asm/perf_event.h>
26 # include <asm/local64.h>
27 #endif
28 
29 struct perf_guest_info_callbacks {
30 	int				(*is_in_guest)(void);
31 	int				(*is_user_mode)(void);
32 	unsigned long			(*get_guest_ip)(void);
33 	void				(*handle_intel_pt_intr)(void);
34 };
35 
36 #ifdef CONFIG_HAVE_HW_BREAKPOINT
37 #include <asm/hw_breakpoint.h>
38 #endif
39 
40 #include <linux/list.h>
41 #include <linux/mutex.h>
42 #include <linux/rculist.h>
43 #include <linux/rcupdate.h>
44 #include <linux/spinlock.h>
45 #include <linux/hrtimer.h>
46 #include <linux/fs.h>
47 #include <linux/pid_namespace.h>
48 #include <linux/workqueue.h>
49 #include <linux/ftrace.h>
50 #include <linux/cpu.h>
51 #include <linux/irq_work.h>
52 #include <linux/static_key.h>
53 #include <linux/jump_label_ratelimit.h>
54 #include <linux/atomic.h>
55 #include <linux/sysfs.h>
56 #include <linux/perf_regs.h>
57 #include <linux/cgroup.h>
58 #include <linux/refcount.h>
59 #include <linux/security.h>
60 #include <asm/local.h>
61 
62 struct perf_callchain_entry {
63 	__u64				nr;
64 	__u64				ip[]; /* /proc/sys/kernel/perf_event_max_stack */
65 };
66 
67 struct perf_callchain_entry_ctx {
68 	struct perf_callchain_entry *entry;
69 	u32			    max_stack;
70 	u32			    nr;
71 	short			    contexts;
72 	bool			    contexts_maxed;
73 };
74 
75 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
76 				     unsigned long off, unsigned long len);
77 
78 struct perf_raw_frag {
79 	union {
80 		struct perf_raw_frag	*next;
81 		unsigned long		pad;
82 	};
83 	perf_copy_f			copy;
84 	void				*data;
85 	u32				size;
86 } __packed;
87 
88 struct perf_raw_record {
89 	struct perf_raw_frag		frag;
90 	u32				size;
91 };
92 
93 /*
94  * branch stack layout:
95  *  nr: number of taken branches stored in entries[]
96  *  hw_idx: The low level index of raw branch records
97  *          for the most recent branch.
98  *          -1ULL means invalid/unknown.
99  *
100  * Note that nr can vary from sample to sample
101  * branches (to, from) are stored from most recent
102  * to least recent, i.e., entries[0] contains the most
103  * recent branch.
104  * The entries[] is an abstraction of raw branch records,
105  * which may not be stored in age order in HW, e.g. Intel LBR.
106  * The hw_idx is to expose the low level index of raw
107  * branch record for the most recent branch aka entries[0].
108  * The hw_idx index is between -1 (unknown) and max depth,
109  * which can be retrieved in /sys/devices/cpu/caps/branches.
110  * For the architectures whose raw branch records are
111  * already stored in age order, the hw_idx should be 0.
112  */
113 struct perf_branch_stack {
114 	__u64				nr;
115 	__u64				hw_idx;
116 	struct perf_branch_entry	entries[];
117 };
118 
119 struct task_struct;
120 
121 /*
122  * extra PMU register associated with an event
123  */
124 struct hw_perf_event_extra {
125 	u64		config;	/* register value */
126 	unsigned int	reg;	/* register address or index */
127 	int		alloc;	/* extra register already allocated */
128 	int		idx;	/* index in shared_regs->regs[] */
129 };
130 
131 /**
132  * struct hw_perf_event - performance event hardware details:
133  */
134 struct hw_perf_event {
135 #ifdef CONFIG_PERF_EVENTS
136 	union {
137 		struct { /* hardware */
138 			u64		config;
139 			u64		last_tag;
140 			unsigned long	config_base;
141 			unsigned long	event_base;
142 			int		event_base_rdpmc;
143 			int		idx;
144 			int		last_cpu;
145 			int		flags;
146 
147 			struct hw_perf_event_extra extra_reg;
148 			struct hw_perf_event_extra branch_reg;
149 		};
150 		struct { /* software */
151 			struct hrtimer	hrtimer;
152 		};
153 		struct { /* tracepoint */
154 			/* for tp_event->class */
155 			struct list_head	tp_list;
156 		};
157 		struct { /* amd_power */
158 			u64	pwr_acc;
159 			u64	ptsc;
160 		};
161 #ifdef CONFIG_HAVE_HW_BREAKPOINT
162 		struct { /* breakpoint */
163 			/*
164 			 * Crufty hack to avoid the chicken and egg
165 			 * problem hw_breakpoint has with context
166 			 * creation and event initalization.
167 			 */
168 			struct arch_hw_breakpoint	info;
169 			struct list_head		bp_list;
170 		};
171 #endif
172 		struct { /* amd_iommu */
173 			u8	iommu_bank;
174 			u8	iommu_cntr;
175 			u16	padding;
176 			u64	conf;
177 			u64	conf1;
178 		};
179 	};
180 	/*
181 	 * If the event is a per task event, this will point to the task in
182 	 * question. See the comment in perf_event_alloc().
183 	 */
184 	struct task_struct		*target;
185 
186 	/*
187 	 * PMU would store hardware filter configuration
188 	 * here.
189 	 */
190 	void				*addr_filters;
191 
192 	/* Last sync'ed generation of filters */
193 	unsigned long			addr_filters_gen;
194 
195 /*
196  * hw_perf_event::state flags; used to track the PERF_EF_* state.
197  */
198 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
199 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
200 #define PERF_HES_ARCH		0x04
201 
202 	int				state;
203 
204 	/*
205 	 * The last observed hardware counter value, updated with a
206 	 * local64_cmpxchg() such that pmu::read() can be called nested.
207 	 */
208 	local64_t			prev_count;
209 
210 	/*
211 	 * The period to start the next sample with.
212 	 */
213 	u64				sample_period;
214 
215 	union {
216 		struct { /* Sampling */
217 			/*
218 			 * The period we started this sample with.
219 			 */
220 			u64				last_period;
221 
222 			/*
223 			 * However much is left of the current period;
224 			 * note that this is a full 64bit value and
225 			 * allows for generation of periods longer
226 			 * than hardware might allow.
227 			 */
228 			local64_t			period_left;
229 		};
230 		struct { /* Topdown events counting for context switch */
231 			u64				saved_metric;
232 			u64				saved_slots;
233 		};
234 	};
235 
236 	/*
237 	 * State for throttling the event, see __perf_event_overflow() and
238 	 * perf_adjust_freq_unthr_context().
239 	 */
240 	u64                             interrupts_seq;
241 	u64				interrupts;
242 
243 	/*
244 	 * State for freq target events, see __perf_event_overflow() and
245 	 * perf_adjust_freq_unthr_context().
246 	 */
247 	u64				freq_time_stamp;
248 	u64				freq_count_stamp;
249 #endif
250 };
251 
252 struct perf_event;
253 
254 /*
255  * Common implementation detail of pmu::{start,commit,cancel}_txn
256  */
257 #define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
258 #define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
259 
260 /**
261  * pmu::capabilities flags
262  */
263 #define PERF_PMU_CAP_NO_INTERRUPT		0x01
264 #define PERF_PMU_CAP_NO_NMI			0x02
265 #define PERF_PMU_CAP_AUX_NO_SG			0x04
266 #define PERF_PMU_CAP_EXTENDED_REGS		0x08
267 #define PERF_PMU_CAP_EXCLUSIVE			0x10
268 #define PERF_PMU_CAP_ITRACE			0x20
269 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
270 #define PERF_PMU_CAP_NO_EXCLUDE			0x80
271 #define PERF_PMU_CAP_AUX_OUTPUT			0x100
272 
273 struct perf_output_handle;
274 
275 /**
276  * struct pmu - generic performance monitoring unit
277  */
278 struct pmu {
279 	struct list_head		entry;
280 
281 	struct module			*module;
282 	struct device			*dev;
283 	const struct attribute_group	**attr_groups;
284 	const struct attribute_group	**attr_update;
285 	const char			*name;
286 	int				type;
287 
288 	/*
289 	 * various common per-pmu feature flags
290 	 */
291 	int				capabilities;
292 
293 	int __percpu			*pmu_disable_count;
294 	struct perf_cpu_context __percpu *pmu_cpu_context;
295 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
296 	int				task_ctx_nr;
297 	int				hrtimer_interval_ms;
298 
299 	/* number of address filters this PMU can do */
300 	unsigned int			nr_addr_filters;
301 
302 	/*
303 	 * Fully disable/enable this PMU, can be used to protect from the PMI
304 	 * as well as for lazy/batch writing of the MSRs.
305 	 */
306 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
307 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
308 
309 	/*
310 	 * Try and initialize the event for this PMU.
311 	 *
312 	 * Returns:
313 	 *  -ENOENT	-- @event is not for this PMU
314 	 *
315 	 *  -ENODEV	-- @event is for this PMU but PMU not present
316 	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
317 	 *  -EINVAL	-- @event is for this PMU but @event is not valid
318 	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
319 	 *  -EACCES	-- @event is for this PMU, @event is valid, but no privileges
320 	 *
321 	 *  0		-- @event is for this PMU and valid
322 	 *
323 	 * Other error return values are allowed.
324 	 */
325 	int (*event_init)		(struct perf_event *event);
326 
327 	/*
328 	 * Notification that the event was mapped or unmapped.  Called
329 	 * in the context of the mapping task.
330 	 */
331 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
332 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
333 
334 	/*
335 	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
336 	 * matching hw_perf_event::state flags.
337 	 */
338 #define PERF_EF_START	0x01		/* start the counter when adding    */
339 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
340 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
341 
342 	/*
343 	 * Adds/Removes a counter to/from the PMU, can be done inside a
344 	 * transaction, see the ->*_txn() methods.
345 	 *
346 	 * The add/del callbacks will reserve all hardware resources required
347 	 * to service the event, this includes any counter constraint
348 	 * scheduling etc.
349 	 *
350 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
351 	 * is on.
352 	 *
353 	 * ->add() called without PERF_EF_START should result in the same state
354 	 *  as ->add() followed by ->stop().
355 	 *
356 	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
357 	 *  ->stop() that must deal with already being stopped without
358 	 *  PERF_EF_UPDATE.
359 	 */
360 	int  (*add)			(struct perf_event *event, int flags);
361 	void (*del)			(struct perf_event *event, int flags);
362 
363 	/*
364 	 * Starts/Stops a counter present on the PMU.
365 	 *
366 	 * The PMI handler should stop the counter when perf_event_overflow()
367 	 * returns !0. ->start() will be used to continue.
368 	 *
369 	 * Also used to change the sample period.
370 	 *
371 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
372 	 * is on -- will be called from NMI context with the PMU generates
373 	 * NMIs.
374 	 *
375 	 * ->stop() with PERF_EF_UPDATE will read the counter and update
376 	 *  period/count values like ->read() would.
377 	 *
378 	 * ->start() with PERF_EF_RELOAD will reprogram the counter
379 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
380 	 */
381 	void (*start)			(struct perf_event *event, int flags);
382 	void (*stop)			(struct perf_event *event, int flags);
383 
384 	/*
385 	 * Updates the counter value of the event.
386 	 *
387 	 * For sampling capable PMUs this will also update the software period
388 	 * hw_perf_event::period_left field.
389 	 */
390 	void (*read)			(struct perf_event *event);
391 
392 	/*
393 	 * Group events scheduling is treated as a transaction, add
394 	 * group events as a whole and perform one schedulability test.
395 	 * If the test fails, roll back the whole group
396 	 *
397 	 * Start the transaction, after this ->add() doesn't need to
398 	 * do schedulability tests.
399 	 *
400 	 * Optional.
401 	 */
402 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
403 	/*
404 	 * If ->start_txn() disabled the ->add() schedulability test
405 	 * then ->commit_txn() is required to perform one. On success
406 	 * the transaction is closed. On error the transaction is kept
407 	 * open until ->cancel_txn() is called.
408 	 *
409 	 * Optional.
410 	 */
411 	int  (*commit_txn)		(struct pmu *pmu);
412 	/*
413 	 * Will cancel the transaction, assumes ->del() is called
414 	 * for each successful ->add() during the transaction.
415 	 *
416 	 * Optional.
417 	 */
418 	void (*cancel_txn)		(struct pmu *pmu);
419 
420 	/*
421 	 * Will return the value for perf_event_mmap_page::index for this event,
422 	 * if no implementation is provided it will default to: event->hw.idx + 1.
423 	 */
424 	int (*event_idx)		(struct perf_event *event); /*optional */
425 
426 	/*
427 	 * context-switches callback
428 	 */
429 	void (*sched_task)		(struct perf_event_context *ctx,
430 					bool sched_in);
431 
432 	/*
433 	 * Kmem cache of PMU specific data
434 	 */
435 	struct kmem_cache		*task_ctx_cache;
436 
437 	/*
438 	 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
439 	 * can be synchronized using this function. See Intel LBR callstack support
440 	 * implementation and Perf core context switch handling callbacks for usage
441 	 * examples.
442 	 */
443 	void (*swap_task_ctx)		(struct perf_event_context *prev,
444 					 struct perf_event_context *next);
445 					/* optional */
446 
447 	/*
448 	 * Set up pmu-private data structures for an AUX area
449 	 */
450 	void *(*setup_aux)		(struct perf_event *event, void **pages,
451 					 int nr_pages, bool overwrite);
452 					/* optional */
453 
454 	/*
455 	 * Free pmu-private AUX data structures
456 	 */
457 	void (*free_aux)		(void *aux); /* optional */
458 
459 	/*
460 	 * Take a snapshot of the AUX buffer without touching the event
461 	 * state, so that preempting ->start()/->stop() callbacks does
462 	 * not interfere with their logic. Called in PMI context.
463 	 *
464 	 * Returns the size of AUX data copied to the output handle.
465 	 *
466 	 * Optional.
467 	 */
468 	long (*snapshot_aux)		(struct perf_event *event,
469 					 struct perf_output_handle *handle,
470 					 unsigned long size);
471 
472 	/*
473 	 * Validate address range filters: make sure the HW supports the
474 	 * requested configuration and number of filters; return 0 if the
475 	 * supplied filters are valid, -errno otherwise.
476 	 *
477 	 * Runs in the context of the ioctl()ing process and is not serialized
478 	 * with the rest of the PMU callbacks.
479 	 */
480 	int (*addr_filters_validate)	(struct list_head *filters);
481 					/* optional */
482 
483 	/*
484 	 * Synchronize address range filter configuration:
485 	 * translate hw-agnostic filters into hardware configuration in
486 	 * event::hw::addr_filters.
487 	 *
488 	 * Runs as a part of filter sync sequence that is done in ->start()
489 	 * callback by calling perf_event_addr_filters_sync().
490 	 *
491 	 * May (and should) traverse event::addr_filters::list, for which its
492 	 * caller provides necessary serialization.
493 	 */
494 	void (*addr_filters_sync)	(struct perf_event *event);
495 					/* optional */
496 
497 	/*
498 	 * Check if event can be used for aux_output purposes for
499 	 * events of this PMU.
500 	 *
501 	 * Runs from perf_event_open(). Should return 0 for "no match"
502 	 * or non-zero for "match".
503 	 */
504 	int (*aux_output_match)		(struct perf_event *event);
505 					/* optional */
506 
507 	/*
508 	 * Filter events for PMU-specific reasons.
509 	 */
510 	int (*filter_match)		(struct perf_event *event); /* optional */
511 
512 	/*
513 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
514 	 */
515 	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
516 };
517 
518 enum perf_addr_filter_action_t {
519 	PERF_ADDR_FILTER_ACTION_STOP = 0,
520 	PERF_ADDR_FILTER_ACTION_START,
521 	PERF_ADDR_FILTER_ACTION_FILTER,
522 };
523 
524 /**
525  * struct perf_addr_filter - address range filter definition
526  * @entry:	event's filter list linkage
527  * @path:	object file's path for file-based filters
528  * @offset:	filter range offset
529  * @size:	filter range size (size==0 means single address trigger)
530  * @action:	filter/start/stop
531  *
532  * This is a hardware-agnostic filter configuration as specified by the user.
533  */
534 struct perf_addr_filter {
535 	struct list_head	entry;
536 	struct path		path;
537 	unsigned long		offset;
538 	unsigned long		size;
539 	enum perf_addr_filter_action_t	action;
540 };
541 
542 /**
543  * struct perf_addr_filters_head - container for address range filters
544  * @list:	list of filters for this event
545  * @lock:	spinlock that serializes accesses to the @list and event's
546  *		(and its children's) filter generations.
547  * @nr_file_filters:	number of file-based filters
548  *
549  * A child event will use parent's @list (and therefore @lock), so they are
550  * bundled together; see perf_event_addr_filters().
551  */
552 struct perf_addr_filters_head {
553 	struct list_head	list;
554 	raw_spinlock_t		lock;
555 	unsigned int		nr_file_filters;
556 };
557 
558 struct perf_addr_filter_range {
559 	unsigned long		start;
560 	unsigned long		size;
561 };
562 
563 /**
564  * enum perf_event_state - the states of an event:
565  */
566 enum perf_event_state {
567 	PERF_EVENT_STATE_DEAD		= -4,
568 	PERF_EVENT_STATE_EXIT		= -3,
569 	PERF_EVENT_STATE_ERROR		= -2,
570 	PERF_EVENT_STATE_OFF		= -1,
571 	PERF_EVENT_STATE_INACTIVE	=  0,
572 	PERF_EVENT_STATE_ACTIVE		=  1,
573 };
574 
575 struct file;
576 struct perf_sample_data;
577 
578 typedef void (*perf_overflow_handler_t)(struct perf_event *,
579 					struct perf_sample_data *,
580 					struct pt_regs *regs);
581 
582 /*
583  * Event capabilities. For event_caps and groups caps.
584  *
585  * PERF_EV_CAP_SOFTWARE: Is a software event.
586  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
587  * from any CPU in the package where it is active.
588  * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
589  * cannot be a group leader. If an event with this flag is detached from the
590  * group it is scheduled out and moved into an unrecoverable ERROR state.
591  */
592 #define PERF_EV_CAP_SOFTWARE		BIT(0)
593 #define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
594 #define PERF_EV_CAP_SIBLING		BIT(2)
595 
596 #define SWEVENT_HLIST_BITS		8
597 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
598 
599 struct swevent_hlist {
600 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
601 	struct rcu_head			rcu_head;
602 };
603 
604 #define PERF_ATTACH_CONTEXT	0x01
605 #define PERF_ATTACH_GROUP	0x02
606 #define PERF_ATTACH_TASK	0x04
607 #define PERF_ATTACH_TASK_DATA	0x08
608 #define PERF_ATTACH_ITRACE	0x10
609 #define PERF_ATTACH_SCHED_CB	0x20
610 
611 struct perf_cgroup;
612 struct perf_buffer;
613 
614 struct pmu_event_list {
615 	raw_spinlock_t		lock;
616 	struct list_head	list;
617 };
618 
619 #define for_each_sibling_event(sibling, event)			\
620 	if ((event)->group_leader == (event))			\
621 		list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
622 
623 /**
624  * struct perf_event - performance event kernel representation:
625  */
626 struct perf_event {
627 #ifdef CONFIG_PERF_EVENTS
628 	/*
629 	 * entry onto perf_event_context::event_list;
630 	 *   modifications require ctx->lock
631 	 *   RCU safe iterations.
632 	 */
633 	struct list_head		event_entry;
634 
635 	/*
636 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
637 	 * either sufficies for read.
638 	 */
639 	struct list_head		sibling_list;
640 	struct list_head		active_list;
641 	/*
642 	 * Node on the pinned or flexible tree located at the event context;
643 	 */
644 	struct rb_node			group_node;
645 	u64				group_index;
646 	/*
647 	 * We need storage to track the entries in perf_pmu_migrate_context; we
648 	 * cannot use the event_entry because of RCU and we want to keep the
649 	 * group in tact which avoids us using the other two entries.
650 	 */
651 	struct list_head		migrate_entry;
652 
653 	struct hlist_node		hlist_entry;
654 	struct list_head		active_entry;
655 	int				nr_siblings;
656 
657 	/* Not serialized. Only written during event initialization. */
658 	int				event_caps;
659 	/* The cumulative AND of all event_caps for events in this group. */
660 	int				group_caps;
661 
662 	struct perf_event		*group_leader;
663 	struct pmu			*pmu;
664 	void				*pmu_private;
665 
666 	enum perf_event_state		state;
667 	unsigned int			attach_state;
668 	local64_t			count;
669 	atomic64_t			child_count;
670 
671 	/*
672 	 * These are the total time in nanoseconds that the event
673 	 * has been enabled (i.e. eligible to run, and the task has
674 	 * been scheduled in, if this is a per-task event)
675 	 * and running (scheduled onto the CPU), respectively.
676 	 */
677 	u64				total_time_enabled;
678 	u64				total_time_running;
679 	u64				tstamp;
680 
681 	struct perf_event_attr		attr;
682 	u16				header_size;
683 	u16				id_header_size;
684 	u16				read_size;
685 	struct hw_perf_event		hw;
686 
687 	struct perf_event_context	*ctx;
688 	atomic_long_t			refcount;
689 
690 	/*
691 	 * These accumulate total time (in nanoseconds) that children
692 	 * events have been enabled and running, respectively.
693 	 */
694 	atomic64_t			child_total_time_enabled;
695 	atomic64_t			child_total_time_running;
696 
697 	/*
698 	 * Protect attach/detach and child_list:
699 	 */
700 	struct mutex			child_mutex;
701 	struct list_head		child_list;
702 	struct perf_event		*parent;
703 
704 	int				oncpu;
705 	int				cpu;
706 
707 	struct list_head		owner_entry;
708 	struct task_struct		*owner;
709 
710 	/* mmap bits */
711 	struct mutex			mmap_mutex;
712 	atomic_t			mmap_count;
713 
714 	struct perf_buffer		*rb;
715 	struct list_head		rb_entry;
716 	unsigned long			rcu_batches;
717 	int				rcu_pending;
718 
719 	/* poll related */
720 	wait_queue_head_t		waitq;
721 	struct fasync_struct		*fasync;
722 
723 	/* delayed work for NMIs and such */
724 	int				pending_wakeup;
725 	int				pending_kill;
726 	int				pending_disable;
727 	struct irq_work			pending;
728 
729 	atomic_t			event_limit;
730 
731 	/* address range filters */
732 	struct perf_addr_filters_head	addr_filters;
733 	/* vma address array for file-based filders */
734 	struct perf_addr_filter_range	*addr_filter_ranges;
735 	unsigned long			addr_filters_gen;
736 
737 	/* for aux_output events */
738 	struct perf_event		*aux_event;
739 
740 	void (*destroy)(struct perf_event *);
741 	struct rcu_head			rcu_head;
742 
743 	struct pid_namespace		*ns;
744 	u64				id;
745 
746 	u64				(*clock)(void);
747 	perf_overflow_handler_t		overflow_handler;
748 	void				*overflow_handler_context;
749 #ifdef CONFIG_BPF_SYSCALL
750 	perf_overflow_handler_t		orig_overflow_handler;
751 	struct bpf_prog			*prog;
752 #endif
753 
754 #ifdef CONFIG_EVENT_TRACING
755 	struct trace_event_call		*tp_event;
756 	struct event_filter		*filter;
757 #ifdef CONFIG_FUNCTION_TRACER
758 	struct ftrace_ops               ftrace_ops;
759 #endif
760 #endif
761 
762 #ifdef CONFIG_CGROUP_PERF
763 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
764 #endif
765 
766 #ifdef CONFIG_SECURITY
767 	void *security;
768 #endif
769 	struct list_head		sb_list;
770 #endif /* CONFIG_PERF_EVENTS */
771 };
772 
773 
774 struct perf_event_groups {
775 	struct rb_root	tree;
776 	u64		index;
777 };
778 
779 /**
780  * struct perf_event_context - event context structure
781  *
782  * Used as a container for task events and CPU events as well:
783  */
784 struct perf_event_context {
785 	struct pmu			*pmu;
786 	/*
787 	 * Protect the states of the events in the list,
788 	 * nr_active, and the list:
789 	 */
790 	raw_spinlock_t			lock;
791 	/*
792 	 * Protect the list of events.  Locking either mutex or lock
793 	 * is sufficient to ensure the list doesn't change; to change
794 	 * the list you need to lock both the mutex and the spinlock.
795 	 */
796 	struct mutex			mutex;
797 
798 	struct list_head		active_ctx_list;
799 	struct perf_event_groups	pinned_groups;
800 	struct perf_event_groups	flexible_groups;
801 	struct list_head		event_list;
802 
803 	struct list_head		pinned_active;
804 	struct list_head		flexible_active;
805 
806 	int				nr_events;
807 	int				nr_active;
808 	int				is_active;
809 	int				nr_stat;
810 	int				nr_freq;
811 	int				rotate_disable;
812 	/*
813 	 * Set when nr_events != nr_active, except tolerant to events not
814 	 * necessary to be active due to scheduling constraints, such as cgroups.
815 	 */
816 	int				rotate_necessary;
817 	refcount_t			refcount;
818 	struct task_struct		*task;
819 
820 	/*
821 	 * Context clock, runs when context enabled.
822 	 */
823 	u64				time;
824 	u64				timestamp;
825 	u64				timeoffset;
826 
827 	/*
828 	 * These fields let us detect when two contexts have both
829 	 * been cloned (inherited) from a common ancestor.
830 	 */
831 	struct perf_event_context	*parent_ctx;
832 	u64				parent_gen;
833 	u64				generation;
834 	int				pin_count;
835 #ifdef CONFIG_CGROUP_PERF
836 	int				nr_cgroups;	 /* cgroup evts */
837 #endif
838 	void				*task_ctx_data; /* pmu specific data */
839 	struct rcu_head			rcu_head;
840 };
841 
842 /*
843  * Number of contexts where an event can trigger:
844  *	task, softirq, hardirq, nmi.
845  */
846 #define PERF_NR_CONTEXTS	4
847 
848 /**
849  * struct perf_event_cpu_context - per cpu event context structure
850  */
851 struct perf_cpu_context {
852 	struct perf_event_context	ctx;
853 	struct perf_event_context	*task_ctx;
854 	int				active_oncpu;
855 	int				exclusive;
856 
857 	raw_spinlock_t			hrtimer_lock;
858 	struct hrtimer			hrtimer;
859 	ktime_t				hrtimer_interval;
860 	unsigned int			hrtimer_active;
861 
862 #ifdef CONFIG_CGROUP_PERF
863 	struct perf_cgroup		*cgrp;
864 	struct list_head		cgrp_cpuctx_entry;
865 #endif
866 
867 	struct list_head		sched_cb_entry;
868 	int				sched_cb_usage;
869 
870 	int				online;
871 	/*
872 	 * Per-CPU storage for iterators used in visit_groups_merge. The default
873 	 * storage is of size 2 to hold the CPU and any CPU event iterators.
874 	 */
875 	int				heap_size;
876 	struct perf_event		**heap;
877 	struct perf_event		*heap_default[2];
878 };
879 
880 struct perf_output_handle {
881 	struct perf_event		*event;
882 	struct perf_buffer		*rb;
883 	unsigned long			wakeup;
884 	unsigned long			size;
885 	u64				aux_flags;
886 	union {
887 		void			*addr;
888 		unsigned long		head;
889 	};
890 	int				page;
891 };
892 
893 struct bpf_perf_event_data_kern {
894 	bpf_user_pt_regs_t *regs;
895 	struct perf_sample_data *data;
896 	struct perf_event *event;
897 };
898 
899 #ifdef CONFIG_CGROUP_PERF
900 
901 /*
902  * perf_cgroup_info keeps track of time_enabled for a cgroup.
903  * This is a per-cpu dynamically allocated data structure.
904  */
905 struct perf_cgroup_info {
906 	u64				time;
907 	u64				timestamp;
908 	u64				timeoffset;
909 	int				active;
910 };
911 
912 struct perf_cgroup {
913 	struct cgroup_subsys_state	css;
914 	struct perf_cgroup_info	__percpu *info;
915 };
916 
917 /*
918  * Must ensure cgroup is pinned (css_get) before calling
919  * this function. In other words, we cannot call this function
920  * if there is no cgroup event for the current CPU context.
921  */
922 static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct * task,struct perf_event_context * ctx)923 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
924 {
925 	return container_of(task_css_check(task, perf_event_cgrp_id,
926 					   ctx ? lockdep_is_held(&ctx->lock)
927 					       : true),
928 			    struct perf_cgroup, css);
929 }
930 #endif /* CONFIG_CGROUP_PERF */
931 
932 #ifdef CONFIG_PERF_EVENTS
933 
934 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
935 				   struct perf_event *event);
936 extern void perf_aux_output_end(struct perf_output_handle *handle,
937 				unsigned long size);
938 extern int perf_aux_output_skip(struct perf_output_handle *handle,
939 				unsigned long size);
940 extern void *perf_get_aux(struct perf_output_handle *handle);
941 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
942 extern void perf_event_itrace_started(struct perf_event *event);
943 
944 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
945 extern void perf_pmu_unregister(struct pmu *pmu);
946 
947 extern int perf_num_counters(void);
948 extern const char *perf_pmu_name(void);
949 extern void __perf_event_task_sched_in(struct task_struct *prev,
950 				       struct task_struct *task);
951 extern void __perf_event_task_sched_out(struct task_struct *prev,
952 					struct task_struct *next);
953 extern int perf_event_init_task(struct task_struct *child);
954 extern void perf_event_exit_task(struct task_struct *child);
955 extern void perf_event_free_task(struct task_struct *task);
956 extern void perf_event_delayed_put(struct task_struct *task);
957 extern struct file *perf_event_get(unsigned int fd);
958 extern const struct perf_event *perf_get_event(struct file *file);
959 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
960 extern void perf_event_print_debug(void);
961 extern void perf_pmu_disable(struct pmu *pmu);
962 extern void perf_pmu_enable(struct pmu *pmu);
963 extern void perf_sched_cb_dec(struct pmu *pmu);
964 extern void perf_sched_cb_inc(struct pmu *pmu);
965 extern int perf_event_task_disable(void);
966 extern int perf_event_task_enable(void);
967 
968 extern void perf_pmu_resched(struct pmu *pmu);
969 
970 extern int perf_event_refresh(struct perf_event *event, int refresh);
971 extern void perf_event_update_userpage(struct perf_event *event);
972 extern int perf_event_release_kernel(struct perf_event *event);
973 extern struct perf_event *
974 perf_event_create_kernel_counter(struct perf_event_attr *attr,
975 				int cpu,
976 				struct task_struct *task,
977 				perf_overflow_handler_t callback,
978 				void *context);
979 extern void perf_pmu_migrate_context(struct pmu *pmu,
980 				int src_cpu, int dst_cpu);
981 int perf_event_read_local(struct perf_event *event, u64 *value,
982 			  u64 *enabled, u64 *running);
983 extern u64 perf_event_read_value(struct perf_event *event,
984 				 u64 *enabled, u64 *running);
985 
986 
987 struct perf_sample_data {
988 	/*
989 	 * Fields set by perf_sample_data_init(), group so as to
990 	 * minimize the cachelines touched.
991 	 */
992 	u64				addr;
993 	struct perf_raw_record		*raw;
994 	struct perf_branch_stack	*br_stack;
995 	u64				period;
996 	u64				weight;
997 	u64				txn;
998 	union  perf_mem_data_src	data_src;
999 
1000 	/*
1001 	 * The other fields, optionally {set,used} by
1002 	 * perf_{prepare,output}_sample().
1003 	 */
1004 	u64				type;
1005 	u64				ip;
1006 	struct {
1007 		u32	pid;
1008 		u32	tid;
1009 	}				tid_entry;
1010 	u64				time;
1011 	u64				id;
1012 	u64				stream_id;
1013 	struct {
1014 		u32	cpu;
1015 		u32	reserved;
1016 	}				cpu_entry;
1017 	struct perf_callchain_entry	*callchain;
1018 	u64				aux_size;
1019 
1020 	struct perf_regs		regs_user;
1021 	struct perf_regs		regs_intr;
1022 	u64				stack_user_size;
1023 
1024 	u64				phys_addr;
1025 	u64				cgroup;
1026 } ____cacheline_aligned;
1027 
1028 /* default value for data source */
1029 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1030 		    PERF_MEM_S(LVL, NA)   |\
1031 		    PERF_MEM_S(SNOOP, NA) |\
1032 		    PERF_MEM_S(LOCK, NA)  |\
1033 		    PERF_MEM_S(TLB, NA))
1034 
perf_sample_data_init(struct perf_sample_data * data,u64 addr,u64 period)1035 static inline void perf_sample_data_init(struct perf_sample_data *data,
1036 					 u64 addr, u64 period)
1037 {
1038 	/* remaining struct members initialized in perf_prepare_sample() */
1039 	data->addr = addr;
1040 	data->raw  = NULL;
1041 	data->br_stack = NULL;
1042 	data->period = period;
1043 	data->weight = 0;
1044 	data->data_src.val = PERF_MEM_NA;
1045 	data->txn = 0;
1046 }
1047 
1048 extern void perf_output_sample(struct perf_output_handle *handle,
1049 			       struct perf_event_header *header,
1050 			       struct perf_sample_data *data,
1051 			       struct perf_event *event);
1052 extern void perf_prepare_sample(struct perf_event_header *header,
1053 				struct perf_sample_data *data,
1054 				struct perf_event *event,
1055 				struct pt_regs *regs);
1056 
1057 extern int perf_event_overflow(struct perf_event *event,
1058 				 struct perf_sample_data *data,
1059 				 struct pt_regs *regs);
1060 
1061 extern void perf_event_output_forward(struct perf_event *event,
1062 				     struct perf_sample_data *data,
1063 				     struct pt_regs *regs);
1064 extern void perf_event_output_backward(struct perf_event *event,
1065 				       struct perf_sample_data *data,
1066 				       struct pt_regs *regs);
1067 extern int perf_event_output(struct perf_event *event,
1068 			     struct perf_sample_data *data,
1069 			     struct pt_regs *regs);
1070 
1071 static inline bool
is_default_overflow_handler(struct perf_event * event)1072 is_default_overflow_handler(struct perf_event *event)
1073 {
1074 	if (likely(event->overflow_handler == perf_event_output_forward))
1075 		return true;
1076 	if (unlikely(event->overflow_handler == perf_event_output_backward))
1077 		return true;
1078 	return false;
1079 }
1080 
1081 extern void
1082 perf_event_header__init_id(struct perf_event_header *header,
1083 			   struct perf_sample_data *data,
1084 			   struct perf_event *event);
1085 extern void
1086 perf_event__output_id_sample(struct perf_event *event,
1087 			     struct perf_output_handle *handle,
1088 			     struct perf_sample_data *sample);
1089 
1090 extern void
1091 perf_log_lost_samples(struct perf_event *event, u64 lost);
1092 
event_has_any_exclude_flag(struct perf_event * event)1093 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1094 {
1095 	struct perf_event_attr *attr = &event->attr;
1096 
1097 	return attr->exclude_idle || attr->exclude_user ||
1098 	       attr->exclude_kernel || attr->exclude_hv ||
1099 	       attr->exclude_guest || attr->exclude_host;
1100 }
1101 
is_sampling_event(struct perf_event * event)1102 static inline bool is_sampling_event(struct perf_event *event)
1103 {
1104 	return event->attr.sample_period != 0;
1105 }
1106 
1107 /*
1108  * Return 1 for a software event, 0 for a hardware event
1109  */
is_software_event(struct perf_event * event)1110 static inline int is_software_event(struct perf_event *event)
1111 {
1112 	return event->event_caps & PERF_EV_CAP_SOFTWARE;
1113 }
1114 
1115 /*
1116  * Return 1 for event in sw context, 0 for event in hw context
1117  */
in_software_context(struct perf_event * event)1118 static inline int in_software_context(struct perf_event *event)
1119 {
1120 	return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1121 }
1122 
is_exclusive_pmu(struct pmu * pmu)1123 static inline int is_exclusive_pmu(struct pmu *pmu)
1124 {
1125 	return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1126 }
1127 
1128 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1129 
1130 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1131 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1132 
1133 #ifndef perf_arch_fetch_caller_regs
perf_arch_fetch_caller_regs(struct pt_regs * regs,unsigned long ip)1134 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1135 #endif
1136 
1137 /*
1138  * When generating a perf sample in-line, instead of from an interrupt /
1139  * exception, we lack a pt_regs. This is typically used from software events
1140  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1141  *
1142  * We typically don't need a full set, but (for x86) do require:
1143  * - ip for PERF_SAMPLE_IP
1144  * - cs for user_mode() tests
1145  * - sp for PERF_SAMPLE_CALLCHAIN
1146  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1147  *
1148  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1149  * things like PERF_SAMPLE_REGS_INTR.
1150  */
perf_fetch_caller_regs(struct pt_regs * regs)1151 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1152 {
1153 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1154 }
1155 
1156 static __always_inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1157 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1158 {
1159 	if (static_key_false(&perf_swevent_enabled[event_id]))
1160 		__perf_sw_event(event_id, nr, regs, addr);
1161 }
1162 
1163 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1164 
1165 /*
1166  * 'Special' version for the scheduler, it hard assumes no recursion,
1167  * which is guaranteed by us not actually scheduling inside other swevents
1168  * because those disable preemption.
1169  */
1170 static __always_inline void
perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1171 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1172 {
1173 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1174 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1175 
1176 		perf_fetch_caller_regs(regs);
1177 		___perf_sw_event(event_id, nr, regs, addr);
1178 	}
1179 }
1180 
1181 extern struct static_key_false perf_sched_events;
1182 
1183 static __always_inline bool
perf_sw_migrate_enabled(void)1184 perf_sw_migrate_enabled(void)
1185 {
1186 	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1187 		return true;
1188 	return false;
1189 }
1190 
perf_event_task_migrate(struct task_struct * task)1191 static inline void perf_event_task_migrate(struct task_struct *task)
1192 {
1193 	if (perf_sw_migrate_enabled())
1194 		task->sched_migrated = 1;
1195 }
1196 
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1197 static inline void perf_event_task_sched_in(struct task_struct *prev,
1198 					    struct task_struct *task)
1199 {
1200 	if (static_branch_unlikely(&perf_sched_events))
1201 		__perf_event_task_sched_in(prev, task);
1202 
1203 	if (perf_sw_migrate_enabled() && task->sched_migrated) {
1204 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1205 
1206 		perf_fetch_caller_regs(regs);
1207 		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1208 		task->sched_migrated = 0;
1209 	}
1210 }
1211 
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1212 static inline void perf_event_task_sched_out(struct task_struct *prev,
1213 					     struct task_struct *next)
1214 {
1215 	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1216 
1217 	if (static_branch_unlikely(&perf_sched_events))
1218 		__perf_event_task_sched_out(prev, next);
1219 }
1220 
1221 extern void perf_event_mmap(struct vm_area_struct *vma);
1222 
1223 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1224 			       bool unregister, const char *sym);
1225 extern void perf_event_bpf_event(struct bpf_prog *prog,
1226 				 enum perf_bpf_event_type type,
1227 				 u16 flags);
1228 
1229 extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
perf_get_guest_cbs(void)1230 static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
1231 {
1232 	/*
1233 	 * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
1234 	 * the callbacks between a !NULL check and dereferences, to ensure
1235 	 * pending stores/changes to the callback pointers are visible before a
1236 	 * non-NULL perf_guest_cbs is visible to readers, and to prevent a
1237 	 * module from unloading callbacks while readers are active.
1238 	 */
1239 	return rcu_dereference(perf_guest_cbs);
1240 }
1241 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1242 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1243 
1244 extern void perf_event_exec(void);
1245 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1246 extern void perf_event_namespaces(struct task_struct *tsk);
1247 extern void perf_event_fork(struct task_struct *tsk);
1248 extern void perf_event_text_poke(const void *addr,
1249 				 const void *old_bytes, size_t old_len,
1250 				 const void *new_bytes, size_t new_len);
1251 
1252 /* Callchains */
1253 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1254 
1255 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1256 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1257 extern struct perf_callchain_entry *
1258 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1259 		   u32 max_stack, bool crosstask, bool add_mark);
1260 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1261 extern int get_callchain_buffers(int max_stack);
1262 extern void put_callchain_buffers(void);
1263 extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1264 extern void put_callchain_entry(int rctx);
1265 
1266 extern int sysctl_perf_event_max_stack;
1267 extern int sysctl_perf_event_max_contexts_per_stack;
1268 
perf_callchain_store_context(struct perf_callchain_entry_ctx * ctx,u64 ip)1269 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1270 {
1271 	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1272 		struct perf_callchain_entry *entry = ctx->entry;
1273 		entry->ip[entry->nr++] = ip;
1274 		++ctx->contexts;
1275 		return 0;
1276 	} else {
1277 		ctx->contexts_maxed = true;
1278 		return -1; /* no more room, stop walking the stack */
1279 	}
1280 }
1281 
perf_callchain_store(struct perf_callchain_entry_ctx * ctx,u64 ip)1282 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1283 {
1284 	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1285 		struct perf_callchain_entry *entry = ctx->entry;
1286 		entry->ip[entry->nr++] = ip;
1287 		++ctx->nr;
1288 		return 0;
1289 	} else {
1290 		return -1; /* no more room, stop walking the stack */
1291 	}
1292 }
1293 
1294 extern int sysctl_perf_event_paranoid;
1295 extern int sysctl_perf_event_mlock;
1296 extern int sysctl_perf_event_sample_rate;
1297 extern int sysctl_perf_cpu_time_max_percent;
1298 
1299 extern void perf_sample_event_took(u64 sample_len_ns);
1300 
1301 int perf_proc_update_handler(struct ctl_table *table, int write,
1302 		void *buffer, size_t *lenp, loff_t *ppos);
1303 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1304 		void *buffer, size_t *lenp, loff_t *ppos);
1305 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1306 		void *buffer, size_t *lenp, loff_t *ppos);
1307 
1308 /* Access to perf_event_open(2) syscall. */
1309 #define PERF_SECURITY_OPEN		0
1310 
1311 /* Finer grained perf_event_open(2) access control. */
1312 #define PERF_SECURITY_CPU		1
1313 #define PERF_SECURITY_KERNEL		2
1314 #define PERF_SECURITY_TRACEPOINT	3
1315 
perf_is_paranoid(void)1316 static inline int perf_is_paranoid(void)
1317 {
1318 	return sysctl_perf_event_paranoid > -1;
1319 }
1320 
perf_allow_kernel(struct perf_event_attr * attr)1321 static inline int perf_allow_kernel(struct perf_event_attr *attr)
1322 {
1323 	if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
1324 		return -EACCES;
1325 
1326 	return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1327 }
1328 
perf_allow_cpu(struct perf_event_attr * attr)1329 static inline int perf_allow_cpu(struct perf_event_attr *attr)
1330 {
1331 	if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
1332 		return -EACCES;
1333 
1334 	return security_perf_event_open(attr, PERF_SECURITY_CPU);
1335 }
1336 
perf_allow_tracepoint(struct perf_event_attr * attr)1337 static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1338 {
1339 	if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
1340 		return -EPERM;
1341 
1342 	return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1343 }
1344 
1345 extern void perf_event_init(void);
1346 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1347 			  int entry_size, struct pt_regs *regs,
1348 			  struct hlist_head *head, int rctx,
1349 			  struct task_struct *task);
1350 extern void perf_bp_event(struct perf_event *event, void *data);
1351 
1352 #ifndef perf_misc_flags
1353 # define perf_misc_flags(regs) \
1354 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1355 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1356 #endif
1357 #ifndef perf_arch_bpf_user_pt_regs
1358 # define perf_arch_bpf_user_pt_regs(regs) regs
1359 #endif
1360 
has_branch_stack(struct perf_event * event)1361 static inline bool has_branch_stack(struct perf_event *event)
1362 {
1363 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1364 }
1365 
needs_branch_stack(struct perf_event * event)1366 static inline bool needs_branch_stack(struct perf_event *event)
1367 {
1368 	return event->attr.branch_sample_type != 0;
1369 }
1370 
has_aux(struct perf_event * event)1371 static inline bool has_aux(struct perf_event *event)
1372 {
1373 	return event->pmu->setup_aux;
1374 }
1375 
is_write_backward(struct perf_event * event)1376 static inline bool is_write_backward(struct perf_event *event)
1377 {
1378 	return !!event->attr.write_backward;
1379 }
1380 
has_addr_filter(struct perf_event * event)1381 static inline bool has_addr_filter(struct perf_event *event)
1382 {
1383 	return event->pmu->nr_addr_filters;
1384 }
1385 
1386 /*
1387  * An inherited event uses parent's filters
1388  */
1389 static inline struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event * event)1390 perf_event_addr_filters(struct perf_event *event)
1391 {
1392 	struct perf_addr_filters_head *ifh = &event->addr_filters;
1393 
1394 	if (event->parent)
1395 		ifh = &event->parent->addr_filters;
1396 
1397 	return ifh;
1398 }
1399 
1400 extern void perf_event_addr_filters_sync(struct perf_event *event);
1401 
1402 extern int perf_output_begin(struct perf_output_handle *handle,
1403 			     struct perf_sample_data *data,
1404 			     struct perf_event *event, unsigned int size);
1405 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1406 				     struct perf_sample_data *data,
1407 				     struct perf_event *event,
1408 				     unsigned int size);
1409 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1410 				      struct perf_sample_data *data,
1411 				      struct perf_event *event,
1412 				      unsigned int size);
1413 
1414 extern void perf_output_end(struct perf_output_handle *handle);
1415 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1416 			     const void *buf, unsigned int len);
1417 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1418 				     unsigned int len);
1419 extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1420 				 struct perf_output_handle *handle,
1421 				 unsigned long from, unsigned long to);
1422 extern int perf_swevent_get_recursion_context(void);
1423 extern void perf_swevent_put_recursion_context(int rctx);
1424 extern u64 perf_swevent_set_period(struct perf_event *event);
1425 extern void perf_event_enable(struct perf_event *event);
1426 extern void perf_event_disable(struct perf_event *event);
1427 extern void perf_event_disable_local(struct perf_event *event);
1428 extern void perf_event_disable_inatomic(struct perf_event *event);
1429 extern void perf_event_task_tick(void);
1430 extern int perf_event_account_interrupt(struct perf_event *event);
1431 extern int perf_event_period(struct perf_event *event, u64 value);
1432 extern u64 perf_event_pause(struct perf_event *event, bool reset);
1433 #else /* !CONFIG_PERF_EVENTS: */
1434 static inline void *
perf_aux_output_begin(struct perf_output_handle * handle,struct perf_event * event)1435 perf_aux_output_begin(struct perf_output_handle *handle,
1436 		      struct perf_event *event)				{ return NULL; }
1437 static inline void
perf_aux_output_end(struct perf_output_handle * handle,unsigned long size)1438 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1439 									{ }
1440 static inline int
perf_aux_output_skip(struct perf_output_handle * handle,unsigned long size)1441 perf_aux_output_skip(struct perf_output_handle *handle,
1442 		     unsigned long size)				{ return -EINVAL; }
1443 static inline void *
perf_get_aux(struct perf_output_handle * handle)1444 perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
1445 static inline void
perf_event_task_migrate(struct task_struct * task)1446 perf_event_task_migrate(struct task_struct *task)			{ }
1447 static inline void
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1448 perf_event_task_sched_in(struct task_struct *prev,
1449 			 struct task_struct *task)			{ }
1450 static inline void
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1451 perf_event_task_sched_out(struct task_struct *prev,
1452 			  struct task_struct *next)			{ }
perf_event_init_task(struct task_struct * child)1453 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
perf_event_exit_task(struct task_struct * child)1454 static inline void perf_event_exit_task(struct task_struct *child)	{ }
perf_event_free_task(struct task_struct * task)1455 static inline void perf_event_free_task(struct task_struct *task)	{ }
perf_event_delayed_put(struct task_struct * task)1456 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
perf_event_get(unsigned int fd)1457 static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
perf_get_event(struct file * file)1458 static inline const struct perf_event *perf_get_event(struct file *file)
1459 {
1460 	return ERR_PTR(-EINVAL);
1461 }
perf_event_attrs(struct perf_event * event)1462 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1463 {
1464 	return ERR_PTR(-EINVAL);
1465 }
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)1466 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1467 					u64 *enabled, u64 *running)
1468 {
1469 	return -EINVAL;
1470 }
perf_event_print_debug(void)1471 static inline void perf_event_print_debug(void)				{ }
perf_event_task_disable(void)1472 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
perf_event_task_enable(void)1473 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
perf_event_refresh(struct perf_event * event,int refresh)1474 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1475 {
1476 	return -EINVAL;
1477 }
1478 
1479 static inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1480 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1481 static inline void
perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1482 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
1483 static inline void
perf_bp_event(struct perf_event * event,void * data)1484 perf_bp_event(struct perf_event *event, void *data)			{ }
1485 
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1486 static inline int perf_register_guest_info_callbacks
1487 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1488 static inline int perf_unregister_guest_info_callbacks
1489 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1490 
perf_event_mmap(struct vm_area_struct * vma)1491 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1492 
1493 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)1494 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1495 				      bool unregister, const char *sym)	{ }
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)1496 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1497 					enum perf_bpf_event_type type,
1498 					u16 flags)			{ }
perf_event_exec(void)1499 static inline void perf_event_exec(void)				{ }
perf_event_comm(struct task_struct * tsk,bool exec)1500 static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
perf_event_namespaces(struct task_struct * tsk)1501 static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
perf_event_fork(struct task_struct * tsk)1502 static inline void perf_event_fork(struct task_struct *tsk)		{ }
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)1503 static inline void perf_event_text_poke(const void *addr,
1504 					const void *old_bytes,
1505 					size_t old_len,
1506 					const void *new_bytes,
1507 					size_t new_len)			{ }
perf_event_init(void)1508 static inline void perf_event_init(void)				{ }
perf_swevent_get_recursion_context(void)1509 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
perf_swevent_put_recursion_context(int rctx)1510 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
perf_swevent_set_period(struct perf_event * event)1511 static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
perf_event_enable(struct perf_event * event)1512 static inline void perf_event_enable(struct perf_event *event)		{ }
perf_event_disable(struct perf_event * event)1513 static inline void perf_event_disable(struct perf_event *event)		{ }
__perf_event_disable(void * info)1514 static inline int __perf_event_disable(void *info)			{ return -1; }
perf_event_task_tick(void)1515 static inline void perf_event_task_tick(void)				{ }
perf_event_release_kernel(struct perf_event * event)1516 static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
perf_event_period(struct perf_event * event,u64 value)1517 static inline int perf_event_period(struct perf_event *event, u64 value)
1518 {
1519 	return -EINVAL;
1520 }
perf_event_pause(struct perf_event * event,bool reset)1521 static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1522 {
1523 	return 0;
1524 }
1525 #endif
1526 
1527 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1528 extern void perf_restore_debug_store(void);
1529 #else
perf_restore_debug_store(void)1530 static inline void perf_restore_debug_store(void)			{ }
1531 #endif
1532 
perf_raw_frag_last(const struct perf_raw_frag * frag)1533 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1534 {
1535 	return frag->pad < sizeof(u64);
1536 }
1537 
1538 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1539 
1540 struct perf_pmu_events_attr {
1541 	struct device_attribute attr;
1542 	u64 id;
1543 	const char *event_str;
1544 };
1545 
1546 struct perf_pmu_events_ht_attr {
1547 	struct device_attribute			attr;
1548 	u64					id;
1549 	const char				*event_str_ht;
1550 	const char				*event_str_noht;
1551 };
1552 
1553 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1554 			      char *page);
1555 
1556 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
1557 static struct perf_pmu_events_attr _var = {				\
1558 	.attr = __ATTR(_name, 0444, _show, NULL),			\
1559 	.id   =  _id,							\
1560 };
1561 
1562 #define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
1563 static struct perf_pmu_events_attr _var = {				    \
1564 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1565 	.id		= 0,						    \
1566 	.event_str	= _str,						    \
1567 };
1568 
1569 #define PMU_FORMAT_ATTR(_name, _format)					\
1570 static ssize_t								\
1571 _name##_show(struct device *dev,					\
1572 			       struct device_attribute *attr,		\
1573 			       char *page)				\
1574 {									\
1575 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1576 	return sprintf(page, _format "\n");				\
1577 }									\
1578 									\
1579 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1580 
1581 /* Performance counter hotplug functions */
1582 #ifdef CONFIG_PERF_EVENTS
1583 int perf_event_init_cpu(unsigned int cpu);
1584 int perf_event_exit_cpu(unsigned int cpu);
1585 #else
1586 #define perf_event_init_cpu	NULL
1587 #define perf_event_exit_cpu	NULL
1588 #endif
1589 
1590 extern void __weak arch_perf_update_userpage(struct perf_event *event,
1591 					     struct perf_event_mmap_page *userpg,
1592 					     u64 now);
1593 
1594 #endif /* _LINUX_PERF_EVENT_H */
1595