• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_EVLIST_H
3 #define __PERF_EVLIST_H 1
4 
5 #include <linux/compiler.h>
6 #include <linux/kernel.h>
7 #include <linux/refcount.h>
8 #include <linux/list.h>
9 #include <api/fd/array.h>
10 #include <stdio.h>
11 #include "../perf.h"
12 #include "event.h"
13 #include "evsel.h"
14 #include "util.h"
15 #include "auxtrace.h"
16 #include <signal.h>
17 #include <unistd.h>
18 
19 struct pollfd;
20 struct thread_map;
21 struct cpu_map;
22 struct record_opts;
23 
24 #define PERF_EVLIST__HLIST_BITS 8
25 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
26 
27 /**
28  * struct perf_mmap - perf's ring buffer mmap details
29  *
30  * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
31  */
32 struct perf_mmap {
33 	void		 *base;
34 	int		 mask;
35 	int		 fd;
36 	refcount_t	 refcnt;
37 	u64		 prev;
38 	struct auxtrace_mmap auxtrace_mmap;
39 	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
40 };
41 
42 static inline size_t
perf_mmap__mmap_len(struct perf_mmap * map)43 perf_mmap__mmap_len(struct perf_mmap *map)
44 {
45 	return map->mask + 1 + page_size;
46 }
47 
48 /*
49  * State machine of bkw_mmap_state:
50  *
51  *                     .________________(forbid)_____________.
52  *                     |                                     V
53  * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
54  *                     ^  ^              |   ^               |
55  *                     |  |__(forbid)____/   |___(forbid)___/|
56  *                     |                                     |
57  *                      \_________________(3)_______________/
58  *
59  * NOTREADY     : Backward ring buffers are not ready
60  * RUNNING      : Backward ring buffers are recording
61  * DATA_PENDING : We are required to collect data from backward ring buffers
62  * EMPTY        : We have collected data from backward ring buffers.
63  *
64  * (0): Setup backward ring buffer
65  * (1): Pause ring buffers for reading
66  * (2): Read from ring buffers
67  * (3): Resume ring buffers for recording
68  */
69 enum bkw_mmap_state {
70 	BKW_MMAP_NOTREADY,
71 	BKW_MMAP_RUNNING,
72 	BKW_MMAP_DATA_PENDING,
73 	BKW_MMAP_EMPTY,
74 };
75 
76 struct perf_evlist {
77 	struct list_head entries;
78 	struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
79 	int		 nr_entries;
80 	int		 nr_groups;
81 	int		 nr_mmaps;
82 	bool		 overwrite;
83 	bool		 enabled;
84 	bool		 has_user_cpus;
85 	size_t		 mmap_len;
86 	int		 id_pos;
87 	int		 is_pos;
88 	u64		 combined_sample_type;
89 	enum bkw_mmap_state bkw_mmap_state;
90 	struct {
91 		int	cork_fd;
92 		pid_t	pid;
93 	} workload;
94 	struct fdarray	 pollfd;
95 	struct perf_mmap *mmap;
96 	struct perf_mmap *backward_mmap;
97 	struct thread_map *threads;
98 	struct cpu_map	  *cpus;
99 	struct perf_evsel *selected;
100 	struct events_stats stats;
101 	struct perf_env	*env;
102 };
103 
104 struct perf_evsel_str_handler {
105 	const char *name;
106 	void	   *handler;
107 };
108 
109 struct perf_evlist *perf_evlist__new(void);
110 struct perf_evlist *perf_evlist__new_default(void);
111 struct perf_evlist *perf_evlist__new_dummy(void);
112 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
113 		       struct thread_map *threads);
114 void perf_evlist__exit(struct perf_evlist *evlist);
115 void perf_evlist__delete(struct perf_evlist *evlist);
116 
117 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
118 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
119 
120 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise);
121 
perf_evlist__add_default(struct perf_evlist * evlist)122 static inline int perf_evlist__add_default(struct perf_evlist *evlist)
123 {
124 	return __perf_evlist__add_default(evlist, true);
125 }
126 
127 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
128 				     struct perf_event_attr *attrs, size_t nr_attrs);
129 
130 #define perf_evlist__add_default_attrs(evlist, array) \
131 	__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
132 
133 int perf_evlist__add_dummy(struct perf_evlist *evlist);
134 
135 int perf_evlist__add_newtp(struct perf_evlist *evlist,
136 			   const char *sys, const char *name, void *handler);
137 
138 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
139 				   enum perf_event_sample_format bit);
140 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
141 				     enum perf_event_sample_format bit);
142 
143 #define perf_evlist__set_sample_bit(evlist, bit) \
144 	__perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
145 
146 #define perf_evlist__reset_sample_bit(evlist, bit) \
147 	__perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
148 
149 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
150 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
151 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
152 
153 struct perf_evsel *
154 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
155 
156 struct perf_evsel *
157 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
158 				     const char *name);
159 
160 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
161 			 int cpu, int thread, u64 id);
162 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
163 			   struct perf_evsel *evsel,
164 			   int cpu, int thread, int fd);
165 
166 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
167 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
168 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
169 
170 int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
171 
172 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
173 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
174 						u64 id);
175 
176 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
177 
178 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
179 
180 union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
181 union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
182 
183 void perf_mmap__read_catchup(struct perf_mmap *md);
184 void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
185 
186 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
187 
188 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
189 						 int idx);
190 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
191 						  int idx);
192 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
193 
194 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
195 
196 int perf_evlist__open(struct perf_evlist *evlist);
197 void perf_evlist__close(struct perf_evlist *evlist);
198 
199 struct callchain_param;
200 
201 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
202 bool perf_can_sample_identifier(void);
203 bool perf_can_record_switch_events(void);
204 bool perf_can_record_cpu_wide(void);
205 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
206 			 struct callchain_param *callchain);
207 int record_opts__config(struct record_opts *opts);
208 
209 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
210 				  struct target *target,
211 				  const char *argv[], bool pipe_output,
212 				  void (*exec_error)(int signo, siginfo_t *info,
213 						     void *ucontext));
214 int perf_evlist__start_workload(struct perf_evlist *evlist);
215 
216 struct option;
217 
218 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
219 int perf_evlist__parse_mmap_pages(const struct option *opt,
220 				  const char *str,
221 				  int unset);
222 
223 unsigned long perf_event_mlock_kb_in_pages(void);
224 
225 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
226 			 bool overwrite, unsigned int auxtrace_pages,
227 			 bool auxtrace_overwrite);
228 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
229 		      bool overwrite);
230 void perf_evlist__munmap(struct perf_evlist *evlist);
231 
232 size_t perf_evlist__mmap_size(unsigned long pages);
233 
234 void perf_evlist__disable(struct perf_evlist *evlist);
235 void perf_evlist__enable(struct perf_evlist *evlist);
236 void perf_evlist__toggle_enable(struct perf_evlist *evlist);
237 
238 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
239 				  struct perf_evsel *evsel, int idx);
240 
241 void perf_evlist__set_selected(struct perf_evlist *evlist,
242 			       struct perf_evsel *evsel);
243 
244 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
245 			   struct thread_map *threads);
246 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
247 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
248 
249 void __perf_evlist__set_leader(struct list_head *list);
250 void perf_evlist__set_leader(struct perf_evlist *evlist);
251 
252 u64 perf_evlist__read_format(struct perf_evlist *evlist);
253 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
254 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
255 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
256 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
257 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
258 
259 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
260 			      struct perf_sample *sample);
261 
262 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
263 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
264 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
265 
266 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
267 				   struct list_head *list);
268 
perf_evlist__empty(struct perf_evlist * evlist)269 static inline bool perf_evlist__empty(struct perf_evlist *evlist)
270 {
271 	return list_empty(&evlist->entries);
272 }
273 
perf_evlist__first(struct perf_evlist * evlist)274 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
275 {
276 	return list_entry(evlist->entries.next, struct perf_evsel, node);
277 }
278 
perf_evlist__last(struct perf_evlist * evlist)279 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
280 {
281 	return list_entry(evlist->entries.prev, struct perf_evsel, node);
282 }
283 
284 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
285 
286 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
287 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
288 
perf_mmap__read_head(struct perf_mmap * mm)289 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
290 {
291 	struct perf_event_mmap_page *pc = mm->base;
292 	u64 head = ACCESS_ONCE(pc->data_head);
293 	rmb();
294 	return head;
295 }
296 
perf_mmap__write_tail(struct perf_mmap * md,u64 tail)297 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
298 {
299 	struct perf_event_mmap_page *pc = md->base;
300 
301 	/*
302 	 * ensure all reads are done before we write the tail out.
303 	 */
304 	mb();
305 	pc->data_tail = tail;
306 }
307 
308 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
309 void perf_evlist__to_front(struct perf_evlist *evlist,
310 			   struct perf_evsel *move_evsel);
311 
312 /**
313  * __evlist__for_each_entry - iterate thru all the evsels
314  * @list: list_head instance to iterate
315  * @evsel: struct evsel iterator
316  */
317 #define __evlist__for_each_entry(list, evsel) \
318         list_for_each_entry(evsel, list, node)
319 
320 /**
321  * evlist__for_each_entry - iterate thru all the evsels
322  * @evlist: evlist instance to iterate
323  * @evsel: struct evsel iterator
324  */
325 #define evlist__for_each_entry(evlist, evsel) \
326 	__evlist__for_each_entry(&(evlist)->entries, evsel)
327 
328 /**
329  * __evlist__for_each_entry_continue - continue iteration thru all the evsels
330  * @list: list_head instance to iterate
331  * @evsel: struct evsel iterator
332  */
333 #define __evlist__for_each_entry_continue(list, evsel) \
334         list_for_each_entry_continue(evsel, list, node)
335 
336 /**
337  * evlist__for_each_entry_continue - continue iteration thru all the evsels
338  * @evlist: evlist instance to iterate
339  * @evsel: struct evsel iterator
340  */
341 #define evlist__for_each_entry_continue(evlist, evsel) \
342 	__evlist__for_each_entry_continue(&(evlist)->entries, evsel)
343 
344 /**
345  * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
346  * @list: list_head instance to iterate
347  * @evsel: struct evsel iterator
348  */
349 #define __evlist__for_each_entry_reverse(list, evsel) \
350         list_for_each_entry_reverse(evsel, list, node)
351 
352 /**
353  * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
354  * @evlist: evlist instance to iterate
355  * @evsel: struct evsel iterator
356  */
357 #define evlist__for_each_entry_reverse(evlist, evsel) \
358 	__evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
359 
360 /**
361  * __evlist__for_each_entry_safe - safely iterate thru all the evsels
362  * @list: list_head instance to iterate
363  * @tmp: struct evsel temp iterator
364  * @evsel: struct evsel iterator
365  */
366 #define __evlist__for_each_entry_safe(list, tmp, evsel) \
367         list_for_each_entry_safe(evsel, tmp, list, node)
368 
369 /**
370  * evlist__for_each_entry_safe - safely iterate thru all the evsels
371  * @evlist: evlist instance to iterate
372  * @evsel: struct evsel iterator
373  * @tmp: struct evsel temp iterator
374  */
375 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
376 	__evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
377 
378 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
379 				     struct perf_evsel *tracking_evsel);
380 
381 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
382 
383 struct perf_evsel *
384 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
385 
386 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
387 					    union perf_event *event);
388 #endif /* __PERF_EVLIST_H */
389