• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from builtin-{top,stat,record}.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <byteswap.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <linux/bitops.h>
13 #include <api/fs/fs.h>
14 #include <api/fs/tracing_path.h>
15 #include <traceevent/event-parse.h>
16 #include <linux/hw_breakpoint.h>
17 #include <linux/perf_event.h>
18 #include <linux/compiler.h>
19 #include <linux/err.h>
20 #include <linux/zalloc.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
24 #include <dirent.h>
25 #include <stdlib.h>
26 #include <perf/evsel.h>
27 #include "asm/bug.h"
28 #include "callchain.h"
29 #include "cgroup.h"
30 #include "counts.h"
31 #include "event.h"
32 #include "evsel.h"
33 #include "util/env.h"
34 #include "util/evsel_config.h"
35 #include "util/evsel_fprintf.h"
36 #include "evlist.h"
37 #include <perf/cpumap.h>
38 #include "thread_map.h"
39 #include "target.h"
40 #include "perf_regs.h"
41 #include "record.h"
42 #include "debug.h"
43 #include "trace-event.h"
44 #include "stat.h"
45 #include "string2.h"
46 #include "memswap.h"
47 #include "util.h"
48 #include "../perf-sys.h"
49 #include "util/parse-branch-options.h"
50 #include <internal/xyarray.h>
51 #include <internal/lib.h>
52 
53 #include <linux/ctype.h>
54 
55 struct perf_missing_features perf_missing_features;
56 
57 static clockid_t clockid;
58 
perf_evsel__no_extra_init(struct evsel * evsel __maybe_unused)59 static int perf_evsel__no_extra_init(struct evsel *evsel __maybe_unused)
60 {
61 	return 0;
62 }
63 
test_attr__ready(void)64 void __weak test_attr__ready(void) { }
65 
perf_evsel__no_extra_fini(struct evsel * evsel __maybe_unused)66 static void perf_evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
67 {
68 }
69 
70 static struct {
71 	size_t	size;
72 	int	(*init)(struct evsel *evsel);
73 	void	(*fini)(struct evsel *evsel);
74 } perf_evsel__object = {
75 	.size = sizeof(struct evsel),
76 	.init = perf_evsel__no_extra_init,
77 	.fini = perf_evsel__no_extra_fini,
78 };
79 
perf_evsel__object_config(size_t object_size,int (* init)(struct evsel * evsel),void (* fini)(struct evsel * evsel))80 int perf_evsel__object_config(size_t object_size,
81 			      int (*init)(struct evsel *evsel),
82 			      void (*fini)(struct evsel *evsel))
83 {
84 
85 	if (object_size == 0)
86 		goto set_methods;
87 
88 	if (perf_evsel__object.size > object_size)
89 		return -EINVAL;
90 
91 	perf_evsel__object.size = object_size;
92 
93 set_methods:
94 	if (init != NULL)
95 		perf_evsel__object.init = init;
96 
97 	if (fini != NULL)
98 		perf_evsel__object.fini = fini;
99 
100 	return 0;
101 }
102 
103 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
104 
__perf_evsel__sample_size(u64 sample_type)105 int __perf_evsel__sample_size(u64 sample_type)
106 {
107 	u64 mask = sample_type & PERF_SAMPLE_MASK;
108 	int size = 0;
109 	int i;
110 
111 	for (i = 0; i < 64; i++) {
112 		if (mask & (1ULL << i))
113 			size++;
114 	}
115 
116 	size *= sizeof(u64);
117 
118 	return size;
119 }
120 
121 /**
122  * __perf_evsel__calc_id_pos - calculate id_pos.
123  * @sample_type: sample type
124  *
125  * This function returns the position of the event id (PERF_SAMPLE_ID or
126  * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
127  * perf_record_sample.
128  */
__perf_evsel__calc_id_pos(u64 sample_type)129 static int __perf_evsel__calc_id_pos(u64 sample_type)
130 {
131 	int idx = 0;
132 
133 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
134 		return 0;
135 
136 	if (!(sample_type & PERF_SAMPLE_ID))
137 		return -1;
138 
139 	if (sample_type & PERF_SAMPLE_IP)
140 		idx += 1;
141 
142 	if (sample_type & PERF_SAMPLE_TID)
143 		idx += 1;
144 
145 	if (sample_type & PERF_SAMPLE_TIME)
146 		idx += 1;
147 
148 	if (sample_type & PERF_SAMPLE_ADDR)
149 		idx += 1;
150 
151 	return idx;
152 }
153 
154 /**
155  * __perf_evsel__calc_is_pos - calculate is_pos.
156  * @sample_type: sample type
157  *
158  * This function returns the position (counting backwards) of the event id
159  * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
160  * sample_id_all is used there is an id sample appended to non-sample events.
161  */
__perf_evsel__calc_is_pos(u64 sample_type)162 static int __perf_evsel__calc_is_pos(u64 sample_type)
163 {
164 	int idx = 1;
165 
166 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
167 		return 1;
168 
169 	if (!(sample_type & PERF_SAMPLE_ID))
170 		return -1;
171 
172 	if (sample_type & PERF_SAMPLE_CPU)
173 		idx += 1;
174 
175 	if (sample_type & PERF_SAMPLE_STREAM_ID)
176 		idx += 1;
177 
178 	return idx;
179 }
180 
perf_evsel__calc_id_pos(struct evsel * evsel)181 void perf_evsel__calc_id_pos(struct evsel *evsel)
182 {
183 	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
184 	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
185 }
186 
__perf_evsel__set_sample_bit(struct evsel * evsel,enum perf_event_sample_format bit)187 void __perf_evsel__set_sample_bit(struct evsel *evsel,
188 				  enum perf_event_sample_format bit)
189 {
190 	if (!(evsel->core.attr.sample_type & bit)) {
191 		evsel->core.attr.sample_type |= bit;
192 		evsel->sample_size += sizeof(u64);
193 		perf_evsel__calc_id_pos(evsel);
194 	}
195 }
196 
__perf_evsel__reset_sample_bit(struct evsel * evsel,enum perf_event_sample_format bit)197 void __perf_evsel__reset_sample_bit(struct evsel *evsel,
198 				    enum perf_event_sample_format bit)
199 {
200 	if (evsel->core.attr.sample_type & bit) {
201 		evsel->core.attr.sample_type &= ~bit;
202 		evsel->sample_size -= sizeof(u64);
203 		perf_evsel__calc_id_pos(evsel);
204 	}
205 }
206 
perf_evsel__set_sample_id(struct evsel * evsel,bool can_sample_identifier)207 void perf_evsel__set_sample_id(struct evsel *evsel,
208 			       bool can_sample_identifier)
209 {
210 	if (can_sample_identifier) {
211 		perf_evsel__reset_sample_bit(evsel, ID);
212 		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
213 	} else {
214 		perf_evsel__set_sample_bit(evsel, ID);
215 	}
216 	evsel->core.attr.read_format |= PERF_FORMAT_ID;
217 }
218 
219 /**
220  * perf_evsel__is_function_event - Return whether given evsel is a function
221  * trace event
222  *
223  * @evsel - evsel selector to be tested
224  *
225  * Return %true if event is function trace event
226  */
perf_evsel__is_function_event(struct evsel * evsel)227 bool perf_evsel__is_function_event(struct evsel *evsel)
228 {
229 #define FUNCTION_EVENT "ftrace:function"
230 
231 	return evsel->name &&
232 	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
233 
234 #undef FUNCTION_EVENT
235 }
236 
evsel__init(struct evsel * evsel,struct perf_event_attr * attr,int idx)237 void evsel__init(struct evsel *evsel,
238 		 struct perf_event_attr *attr, int idx)
239 {
240 	perf_evsel__init(&evsel->core, attr);
241 	evsel->idx	   = idx;
242 	evsel->tracking	   = !idx;
243 	evsel->leader	   = evsel;
244 	evsel->unit	   = "";
245 	evsel->scale	   = 1.0;
246 	evsel->max_events  = ULONG_MAX;
247 	evsel->evlist	   = NULL;
248 	evsel->bpf_obj	   = NULL;
249 	evsel->bpf_fd	   = -1;
250 	INIT_LIST_HEAD(&evsel->config_terms);
251 	perf_evsel__object.init(evsel);
252 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
253 	perf_evsel__calc_id_pos(evsel);
254 	evsel->cmdline_group_boundary = false;
255 	evsel->metric_expr   = NULL;
256 	evsel->metric_name   = NULL;
257 	evsel->metric_events = NULL;
258 	evsel->collect_stat  = false;
259 	evsel->pmu_name      = NULL;
260 }
261 
perf_evsel__new_idx(struct perf_event_attr * attr,int idx)262 struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
263 {
264 	struct evsel *evsel = zalloc(perf_evsel__object.size);
265 
266 	if (!evsel)
267 		return NULL;
268 	evsel__init(evsel, attr, idx);
269 
270 	if (perf_evsel__is_bpf_output(evsel)) {
271 		evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
272 					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
273 		evsel->core.attr.sample_period = 1;
274 	}
275 
276 	if (perf_evsel__is_clock(evsel)) {
277 		/*
278 		 * The evsel->unit points to static alias->unit
279 		 * so it's ok to use static string in here.
280 		 */
281 		static const char *unit = "msec";
282 
283 		evsel->unit = unit;
284 		evsel->scale = 1e-6;
285 	}
286 
287 	return evsel;
288 }
289 
perf_event_can_profile_kernel(void)290 static bool perf_event_can_profile_kernel(void)
291 {
292 	return perf_event_paranoid_check(1);
293 }
294 
perf_evsel__new_cycles(bool precise)295 struct evsel *perf_evsel__new_cycles(bool precise)
296 {
297 	struct perf_event_attr attr = {
298 		.type	= PERF_TYPE_HARDWARE,
299 		.config	= PERF_COUNT_HW_CPU_CYCLES,
300 		.exclude_kernel	= !perf_event_can_profile_kernel(),
301 	};
302 	struct evsel *evsel;
303 
304 	event_attr_init(&attr);
305 
306 	if (!precise)
307 		goto new_event;
308 
309 	/*
310 	 * Now let the usual logic to set up the perf_event_attr defaults
311 	 * to kick in when we return and before perf_evsel__open() is called.
312 	 */
313 new_event:
314 	evsel = evsel__new(&attr);
315 	if (evsel == NULL)
316 		goto out;
317 
318 	evsel->precise_max = true;
319 
320 	/* use asprintf() because free(evsel) assumes name is allocated */
321 	if (asprintf(&evsel->name, "cycles%s%s%.*s",
322 		     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
323 		     attr.exclude_kernel ? "u" : "",
324 		     attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
325 		goto error_free;
326 out:
327 	return evsel;
328 error_free:
329 	evsel__delete(evsel);
330 	evsel = NULL;
331 	goto out;
332 }
333 
334 /*
335  * Returns pointer with encoded error via <linux/err.h> interface.
336  */
perf_evsel__newtp_idx(const char * sys,const char * name,int idx)337 struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
338 {
339 	struct evsel *evsel = zalloc(perf_evsel__object.size);
340 	int err = -ENOMEM;
341 
342 	if (evsel == NULL) {
343 		goto out_err;
344 	} else {
345 		struct perf_event_attr attr = {
346 			.type	       = PERF_TYPE_TRACEPOINT,
347 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
348 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
349 		};
350 
351 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
352 			goto out_free;
353 
354 		evsel->tp_format = trace_event__tp_format(sys, name);
355 		if (IS_ERR(evsel->tp_format)) {
356 			err = PTR_ERR(evsel->tp_format);
357 			goto out_free;
358 		}
359 
360 		event_attr_init(&attr);
361 		attr.config = evsel->tp_format->id;
362 		attr.sample_period = 1;
363 		evsel__init(evsel, &attr, idx);
364 	}
365 
366 	return evsel;
367 
368 out_free:
369 	zfree(&evsel->name);
370 	free(evsel);
371 out_err:
372 	return ERR_PTR(err);
373 }
374 
375 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
376 	"cycles",
377 	"instructions",
378 	"cache-references",
379 	"cache-misses",
380 	"branches",
381 	"branch-misses",
382 	"bus-cycles",
383 	"stalled-cycles-frontend",
384 	"stalled-cycles-backend",
385 	"ref-cycles",
386 };
387 
__perf_evsel__hw_name(u64 config)388 static const char *__perf_evsel__hw_name(u64 config)
389 {
390 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
391 		return perf_evsel__hw_names[config];
392 
393 	return "unknown-hardware";
394 }
395 
perf_evsel__add_modifiers(struct evsel * evsel,char * bf,size_t size)396 static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
397 {
398 	int colon = 0, r = 0;
399 	struct perf_event_attr *attr = &evsel->core.attr;
400 	bool exclude_guest_default = false;
401 
402 #define MOD_PRINT(context, mod)	do {					\
403 		if (!attr->exclude_##context) {				\
404 			if (!colon) colon = ++r;			\
405 			r += scnprintf(bf + r, size - r, "%c", mod);	\
406 		} } while(0)
407 
408 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
409 		MOD_PRINT(kernel, 'k');
410 		MOD_PRINT(user, 'u');
411 		MOD_PRINT(hv, 'h');
412 		exclude_guest_default = true;
413 	}
414 
415 	if (attr->precise_ip) {
416 		if (!colon)
417 			colon = ++r;
418 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
419 		exclude_guest_default = true;
420 	}
421 
422 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
423 		MOD_PRINT(host, 'H');
424 		MOD_PRINT(guest, 'G');
425 	}
426 #undef MOD_PRINT
427 	if (colon)
428 		bf[colon - 1] = ':';
429 	return r;
430 }
431 
perf_evsel__hw_name(struct evsel * evsel,char * bf,size_t size)432 static int perf_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
433 {
434 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->core.attr.config));
435 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
436 }
437 
438 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
439 	"cpu-clock",
440 	"task-clock",
441 	"page-faults",
442 	"context-switches",
443 	"cpu-migrations",
444 	"minor-faults",
445 	"major-faults",
446 	"alignment-faults",
447 	"emulation-faults",
448 	"dummy",
449 };
450 
__perf_evsel__sw_name(u64 config)451 static const char *__perf_evsel__sw_name(u64 config)
452 {
453 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
454 		return perf_evsel__sw_names[config];
455 	return "unknown-software";
456 }
457 
perf_evsel__sw_name(struct evsel * evsel,char * bf,size_t size)458 static int perf_evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
459 {
460 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->core.attr.config));
461 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
462 }
463 
__perf_evsel__bp_name(char * bf,size_t size,u64 addr,u64 type)464 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
465 {
466 	int r;
467 
468 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
469 
470 	if (type & HW_BREAKPOINT_R)
471 		r += scnprintf(bf + r, size - r, "r");
472 
473 	if (type & HW_BREAKPOINT_W)
474 		r += scnprintf(bf + r, size - r, "w");
475 
476 	if (type & HW_BREAKPOINT_X)
477 		r += scnprintf(bf + r, size - r, "x");
478 
479 	return r;
480 }
481 
perf_evsel__bp_name(struct evsel * evsel,char * bf,size_t size)482 static int perf_evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
483 {
484 	struct perf_event_attr *attr = &evsel->core.attr;
485 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
486 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
487 }
488 
489 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
490 				[PERF_EVSEL__MAX_ALIASES] = {
491  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
492  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
493  { "LLC",	"L2",							},
494  { "dTLB",	"d-tlb",	"Data-TLB",				},
495  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
496  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
497  { "node",								},
498 };
499 
500 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
501 				   [PERF_EVSEL__MAX_ALIASES] = {
502  { "load",	"loads",	"read",					},
503  { "store",	"stores",	"write",				},
504  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
505 };
506 
507 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
508 				       [PERF_EVSEL__MAX_ALIASES] = {
509  { "refs",	"Reference",	"ops",		"access",		},
510  { "misses",	"miss",							},
511 };
512 
513 #define C(x)		PERF_COUNT_HW_CACHE_##x
514 #define CACHE_READ	(1 << C(OP_READ))
515 #define CACHE_WRITE	(1 << C(OP_WRITE))
516 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
517 #define COP(x)		(1 << x)
518 
519 /*
520  * cache operartion stat
521  * L1I : Read and prefetch only
522  * ITLB and BPU : Read-only
523  */
524 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
525  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
526  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
527  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
528  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
529  [C(ITLB)]	= (CACHE_READ),
530  [C(BPU)]	= (CACHE_READ),
531  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
532 };
533 
perf_evsel__is_cache_op_valid(u8 type,u8 op)534 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
535 {
536 	if (perf_evsel__hw_cache_stat[type] & COP(op))
537 		return true;	/* valid */
538 	else
539 		return false;	/* invalid */
540 }
541 
__perf_evsel__hw_cache_type_op_res_name(u8 type,u8 op,u8 result,char * bf,size_t size)542 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
543 					    char *bf, size_t size)
544 {
545 	if (result) {
546 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
547 				 perf_evsel__hw_cache_op[op][0],
548 				 perf_evsel__hw_cache_result[result][0]);
549 	}
550 
551 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
552 			 perf_evsel__hw_cache_op[op][1]);
553 }
554 
__perf_evsel__hw_cache_name(u64 config,char * bf,size_t size)555 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
556 {
557 	u8 op, result, type = (config >>  0) & 0xff;
558 	const char *err = "unknown-ext-hardware-cache-type";
559 
560 	if (type >= PERF_COUNT_HW_CACHE_MAX)
561 		goto out_err;
562 
563 	op = (config >>  8) & 0xff;
564 	err = "unknown-ext-hardware-cache-op";
565 	if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
566 		goto out_err;
567 
568 	result = (config >> 16) & 0xff;
569 	err = "unknown-ext-hardware-cache-result";
570 	if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
571 		goto out_err;
572 
573 	err = "invalid-cache";
574 	if (!perf_evsel__is_cache_op_valid(type, op))
575 		goto out_err;
576 
577 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
578 out_err:
579 	return scnprintf(bf, size, "%s", err);
580 }
581 
perf_evsel__hw_cache_name(struct evsel * evsel,char * bf,size_t size)582 static int perf_evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
583 {
584 	int ret = __perf_evsel__hw_cache_name(evsel->core.attr.config, bf, size);
585 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
586 }
587 
perf_evsel__raw_name(struct evsel * evsel,char * bf,size_t size)588 static int perf_evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
589 {
590 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
591 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
592 }
593 
perf_evsel__tool_name(char * bf,size_t size)594 static int perf_evsel__tool_name(char *bf, size_t size)
595 {
596 	int ret = scnprintf(bf, size, "duration_time");
597 	return ret;
598 }
599 
perf_evsel__name(struct evsel * evsel)600 const char *perf_evsel__name(struct evsel *evsel)
601 {
602 	char bf[128];
603 
604 	if (!evsel)
605 		goto out_unknown;
606 
607 	if (evsel->name)
608 		return evsel->name;
609 
610 	switch (evsel->core.attr.type) {
611 	case PERF_TYPE_RAW:
612 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
613 		break;
614 
615 	case PERF_TYPE_HARDWARE:
616 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
617 		break;
618 
619 	case PERF_TYPE_HW_CACHE:
620 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
621 		break;
622 
623 	case PERF_TYPE_SOFTWARE:
624 		if (evsel->tool_event)
625 			perf_evsel__tool_name(bf, sizeof(bf));
626 		else
627 			perf_evsel__sw_name(evsel, bf, sizeof(bf));
628 		break;
629 
630 	case PERF_TYPE_TRACEPOINT:
631 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
632 		break;
633 
634 	case PERF_TYPE_BREAKPOINT:
635 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
636 		break;
637 
638 	default:
639 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
640 			  evsel->core.attr.type);
641 		break;
642 	}
643 
644 	evsel->name = strdup(bf);
645 
646 	if (evsel->name)
647 		return evsel->name;
648 out_unknown:
649 	return "unknown";
650 }
651 
perf_evsel__group_name(struct evsel * evsel)652 const char *perf_evsel__group_name(struct evsel *evsel)
653 {
654 	return evsel->group_name ?: "anon group";
655 }
656 
657 /*
658  * Returns the group details for the specified leader,
659  * with following rules.
660  *
661  *  For record -e '{cycles,instructions}'
662  *    'anon group { cycles:u, instructions:u }'
663  *
664  *  For record -e 'cycles,instructions' and report --group
665  *    'cycles:u, instructions:u'
666  */
perf_evsel__group_desc(struct evsel * evsel,char * buf,size_t size)667 int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
668 {
669 	int ret = 0;
670 	struct evsel *pos;
671 	const char *group_name = perf_evsel__group_name(evsel);
672 
673 	if (!evsel->forced_leader)
674 		ret = scnprintf(buf, size, "%s { ", group_name);
675 
676 	ret += scnprintf(buf + ret, size - ret, "%s",
677 			 perf_evsel__name(evsel));
678 
679 	for_each_group_member(pos, evsel)
680 		ret += scnprintf(buf + ret, size - ret, ", %s",
681 				 perf_evsel__name(pos));
682 
683 	if (!evsel->forced_leader)
684 		ret += scnprintf(buf + ret, size - ret, " }");
685 
686 	return ret;
687 }
688 
__perf_evsel__config_callchain(struct evsel * evsel,struct record_opts * opts,struct callchain_param * param)689 static void __perf_evsel__config_callchain(struct evsel *evsel,
690 					   struct record_opts *opts,
691 					   struct callchain_param *param)
692 {
693 	bool function = perf_evsel__is_function_event(evsel);
694 	struct perf_event_attr *attr = &evsel->core.attr;
695 
696 	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
697 
698 	attr->sample_max_stack = param->max_stack;
699 
700 	if (opts->kernel_callchains)
701 		attr->exclude_callchain_user = 1;
702 	if (opts->user_callchains)
703 		attr->exclude_callchain_kernel = 1;
704 	if (param->record_mode == CALLCHAIN_LBR) {
705 		if (!opts->branch_stack) {
706 			if (attr->exclude_user) {
707 				pr_warning("LBR callstack option is only available "
708 					   "to get user callchain information. "
709 					   "Falling back to framepointers.\n");
710 			} else {
711 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
712 				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
713 							PERF_SAMPLE_BRANCH_CALL_STACK |
714 							PERF_SAMPLE_BRANCH_NO_CYCLES |
715 							PERF_SAMPLE_BRANCH_NO_FLAGS;
716 			}
717 		} else
718 			 pr_warning("Cannot use LBR callstack with branch stack. "
719 				    "Falling back to framepointers.\n");
720 	}
721 
722 	if (param->record_mode == CALLCHAIN_DWARF) {
723 		if (!function) {
724 			perf_evsel__set_sample_bit(evsel, REGS_USER);
725 			perf_evsel__set_sample_bit(evsel, STACK_USER);
726 			if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
727 				attr->sample_regs_user |= DWARF_MINIMAL_REGS;
728 				pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
729 					   "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
730 					   "so the minimal registers set (IP, SP) is explicitly forced.\n");
731 			} else {
732 				attr->sample_regs_user |= PERF_REGS_MASK;
733 			}
734 			attr->sample_stack_user = param->dump_size;
735 			attr->exclude_callchain_user = 1;
736 		} else {
737 			pr_info("Cannot use DWARF unwind for function trace event,"
738 				" falling back to framepointers.\n");
739 		}
740 	}
741 
742 	if (function) {
743 		pr_info("Disabling user space callchains for function trace event.\n");
744 		attr->exclude_callchain_user = 1;
745 	}
746 }
747 
perf_evsel__config_callchain(struct evsel * evsel,struct record_opts * opts,struct callchain_param * param)748 void perf_evsel__config_callchain(struct evsel *evsel,
749 				  struct record_opts *opts,
750 				  struct callchain_param *param)
751 {
752 	if (param->enabled)
753 		return __perf_evsel__config_callchain(evsel, opts, param);
754 }
755 
756 static void
perf_evsel__reset_callgraph(struct evsel * evsel,struct callchain_param * param)757 perf_evsel__reset_callgraph(struct evsel *evsel,
758 			    struct callchain_param *param)
759 {
760 	struct perf_event_attr *attr = &evsel->core.attr;
761 
762 	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
763 	if (param->record_mode == CALLCHAIN_LBR) {
764 		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
765 		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
766 					      PERF_SAMPLE_BRANCH_CALL_STACK);
767 	}
768 	if (param->record_mode == CALLCHAIN_DWARF) {
769 		perf_evsel__reset_sample_bit(evsel, REGS_USER);
770 		perf_evsel__reset_sample_bit(evsel, STACK_USER);
771 	}
772 }
773 
apply_config_terms(struct evsel * evsel,struct record_opts * opts,bool track)774 static void apply_config_terms(struct evsel *evsel,
775 			       struct record_opts *opts, bool track)
776 {
777 	struct perf_evsel_config_term *term;
778 	struct list_head *config_terms = &evsel->config_terms;
779 	struct perf_event_attr *attr = &evsel->core.attr;
780 	/* callgraph default */
781 	struct callchain_param param = {
782 		.record_mode = callchain_param.record_mode,
783 	};
784 	u32 dump_size = 0;
785 	int max_stack = 0;
786 	const char *callgraph_buf = NULL;
787 
788 	list_for_each_entry(term, config_terms, list) {
789 		switch (term->type) {
790 		case PERF_EVSEL__CONFIG_TERM_PERIOD:
791 			if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
792 				attr->sample_period = term->val.period;
793 				attr->freq = 0;
794 				perf_evsel__reset_sample_bit(evsel, PERIOD);
795 			}
796 			break;
797 		case PERF_EVSEL__CONFIG_TERM_FREQ:
798 			if (!(term->weak && opts->user_freq != UINT_MAX)) {
799 				attr->sample_freq = term->val.freq;
800 				attr->freq = 1;
801 				perf_evsel__set_sample_bit(evsel, PERIOD);
802 			}
803 			break;
804 		case PERF_EVSEL__CONFIG_TERM_TIME:
805 			if (term->val.time)
806 				perf_evsel__set_sample_bit(evsel, TIME);
807 			else
808 				perf_evsel__reset_sample_bit(evsel, TIME);
809 			break;
810 		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
811 			callgraph_buf = term->val.callgraph;
812 			break;
813 		case PERF_EVSEL__CONFIG_TERM_BRANCH:
814 			if (term->val.branch && strcmp(term->val.branch, "no")) {
815 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
816 				parse_branch_str(term->val.branch,
817 						 &attr->branch_sample_type);
818 			} else
819 				perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
820 			break;
821 		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
822 			dump_size = term->val.stack_user;
823 			break;
824 		case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
825 			max_stack = term->val.max_stack;
826 			break;
827 		case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
828 			evsel->max_events = term->val.max_events;
829 			break;
830 		case PERF_EVSEL__CONFIG_TERM_INHERIT:
831 			/*
832 			 * attr->inherit should has already been set by
833 			 * perf_evsel__config. If user explicitly set
834 			 * inherit using config terms, override global
835 			 * opt->no_inherit setting.
836 			 */
837 			attr->inherit = term->val.inherit ? 1 : 0;
838 			break;
839 		case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
840 			attr->write_backward = term->val.overwrite ? 1 : 0;
841 			break;
842 		case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
843 			break;
844 		case PERF_EVSEL__CONFIG_TERM_PERCORE:
845 			break;
846 		case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT:
847 			attr->aux_output = term->val.aux_output ? 1 : 0;
848 			break;
849 		default:
850 			break;
851 		}
852 	}
853 
854 	/* User explicitly set per-event callgraph, clear the old setting and reset. */
855 	if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
856 		bool sample_address = false;
857 
858 		if (max_stack) {
859 			param.max_stack = max_stack;
860 			if (callgraph_buf == NULL)
861 				callgraph_buf = "fp";
862 		}
863 
864 		/* parse callgraph parameters */
865 		if (callgraph_buf != NULL) {
866 			if (!strcmp(callgraph_buf, "no")) {
867 				param.enabled = false;
868 				param.record_mode = CALLCHAIN_NONE;
869 			} else {
870 				param.enabled = true;
871 				if (parse_callchain_record(callgraph_buf, &param)) {
872 					pr_err("per-event callgraph setting for %s failed. "
873 					       "Apply callgraph global setting for it\n",
874 					       evsel->name);
875 					return;
876 				}
877 				if (param.record_mode == CALLCHAIN_DWARF)
878 					sample_address = true;
879 			}
880 		}
881 		if (dump_size > 0) {
882 			dump_size = round_up(dump_size, sizeof(u64));
883 			param.dump_size = dump_size;
884 		}
885 
886 		/* If global callgraph set, clear it */
887 		if (callchain_param.enabled)
888 			perf_evsel__reset_callgraph(evsel, &callchain_param);
889 
890 		/* set perf-event callgraph */
891 		if (param.enabled) {
892 			if (sample_address) {
893 				perf_evsel__set_sample_bit(evsel, ADDR);
894 				perf_evsel__set_sample_bit(evsel, DATA_SRC);
895 				evsel->core.attr.mmap_data = track;
896 			}
897 			perf_evsel__config_callchain(evsel, opts, &param);
898 		}
899 	}
900 }
901 
is_dummy_event(struct evsel * evsel)902 static bool is_dummy_event(struct evsel *evsel)
903 {
904 	return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
905 	       (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
906 }
907 
908 /*
909  * The enable_on_exec/disabled value strategy:
910  *
911  *  1) For any type of traced program:
912  *    - all independent events and group leaders are disabled
913  *    - all group members are enabled
914  *
915  *     Group members are ruled by group leaders. They need to
916  *     be enabled, because the group scheduling relies on that.
917  *
918  *  2) For traced programs executed by perf:
919  *     - all independent events and group leaders have
920  *       enable_on_exec set
921  *     - we don't specifically enable or disable any event during
922  *       the record command
923  *
924  *     Independent events and group leaders are initially disabled
925  *     and get enabled by exec. Group members are ruled by group
926  *     leaders as stated in 1).
927  *
928  *  3) For traced programs attached by perf (pid/tid):
929  *     - we specifically enable or disable all events during
930  *       the record command
931  *
932  *     When attaching events to already running traced we
933  *     enable/disable events specifically, as there's no
934  *     initial traced exec call.
935  */
perf_evsel__config(struct evsel * evsel,struct record_opts * opts,struct callchain_param * callchain)936 void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
937 			struct callchain_param *callchain)
938 {
939 	struct evsel *leader = evsel->leader;
940 	struct perf_event_attr *attr = &evsel->core.attr;
941 	int track = evsel->tracking;
942 	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
943 
944 	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
945 	attr->inherit	    = !opts->no_inherit;
946 	attr->write_backward = opts->overwrite ? 1 : 0;
947 
948 	perf_evsel__set_sample_bit(evsel, IP);
949 	perf_evsel__set_sample_bit(evsel, TID);
950 
951 	if (evsel->sample_read) {
952 		perf_evsel__set_sample_bit(evsel, READ);
953 
954 		/*
955 		 * We need ID even in case of single event, because
956 		 * PERF_SAMPLE_READ process ID specific data.
957 		 */
958 		perf_evsel__set_sample_id(evsel, false);
959 
960 		/*
961 		 * Apply group format only if we belong to group
962 		 * with more than one members.
963 		 */
964 		if (leader->core.nr_members > 1) {
965 			attr->read_format |= PERF_FORMAT_GROUP;
966 			attr->inherit = 0;
967 		}
968 	}
969 
970 	/*
971 	 * We default some events to have a default interval. But keep
972 	 * it a weak assumption overridable by the user.
973 	 */
974 	if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
975 				     opts->user_interval != ULLONG_MAX)) {
976 		if (opts->freq) {
977 			perf_evsel__set_sample_bit(evsel, PERIOD);
978 			attr->freq		= 1;
979 			attr->sample_freq	= opts->freq;
980 		} else {
981 			attr->sample_period = opts->default_interval;
982 		}
983 	}
984 
985 	/*
986 	 * Disable sampling for all group members other
987 	 * than leader in case leader 'leads' the sampling.
988 	 */
989 	if ((leader != evsel) && leader->sample_read) {
990 		attr->freq           = 0;
991 		attr->sample_freq    = 0;
992 		attr->sample_period  = 0;
993 		attr->write_backward = 0;
994 
995 		/*
996 		 * We don't get sample for slave events, we make them
997 		 * when delivering group leader sample. Set the slave
998 		 * event to follow the master sample_type to ease up
999 		 * report.
1000 		 */
1001 		attr->sample_type = leader->core.attr.sample_type;
1002 	}
1003 
1004 	if (opts->no_samples)
1005 		attr->sample_freq = 0;
1006 
1007 	if (opts->inherit_stat) {
1008 		evsel->core.attr.read_format |=
1009 			PERF_FORMAT_TOTAL_TIME_ENABLED |
1010 			PERF_FORMAT_TOTAL_TIME_RUNNING |
1011 			PERF_FORMAT_ID;
1012 		attr->inherit_stat = 1;
1013 	}
1014 
1015 	if (opts->sample_address) {
1016 		perf_evsel__set_sample_bit(evsel, ADDR);
1017 		attr->mmap_data = track;
1018 	}
1019 
1020 	/*
1021 	 * We don't allow user space callchains for  function trace
1022 	 * event, due to issues with page faults while tracing page
1023 	 * fault handler and its overall trickiness nature.
1024 	 */
1025 	if (perf_evsel__is_function_event(evsel))
1026 		evsel->core.attr.exclude_callchain_user = 1;
1027 
1028 	if (callchain && callchain->enabled && !evsel->no_aux_samples)
1029 		perf_evsel__config_callchain(evsel, opts, callchain);
1030 
1031 	if (opts->sample_intr_regs && !evsel->no_aux_samples) {
1032 		attr->sample_regs_intr = opts->sample_intr_regs;
1033 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
1034 	}
1035 
1036 	if (opts->sample_user_regs && !evsel->no_aux_samples) {
1037 		attr->sample_regs_user |= opts->sample_user_regs;
1038 		perf_evsel__set_sample_bit(evsel, REGS_USER);
1039 	}
1040 
1041 	if (target__has_cpu(&opts->target) || opts->sample_cpu)
1042 		perf_evsel__set_sample_bit(evsel, CPU);
1043 
1044 	/*
1045 	 * When the user explicitly disabled time don't force it here.
1046 	 */
1047 	if (opts->sample_time &&
1048 	    (!perf_missing_features.sample_id_all &&
1049 	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1050 	     opts->sample_time_set)))
1051 		perf_evsel__set_sample_bit(evsel, TIME);
1052 
1053 	if (opts->raw_samples && !evsel->no_aux_samples) {
1054 		perf_evsel__set_sample_bit(evsel, TIME);
1055 		perf_evsel__set_sample_bit(evsel, RAW);
1056 		perf_evsel__set_sample_bit(evsel, CPU);
1057 	}
1058 
1059 	if (opts->sample_address)
1060 		perf_evsel__set_sample_bit(evsel, DATA_SRC);
1061 
1062 	if (opts->sample_phys_addr)
1063 		perf_evsel__set_sample_bit(evsel, PHYS_ADDR);
1064 
1065 	if (opts->no_buffering) {
1066 		attr->watermark = 0;
1067 		attr->wakeup_events = 1;
1068 	}
1069 	if (opts->branch_stack && !evsel->no_aux_samples) {
1070 		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
1071 		attr->branch_sample_type = opts->branch_stack;
1072 	}
1073 
1074 	if (opts->sample_weight)
1075 		perf_evsel__set_sample_bit(evsel, WEIGHT);
1076 
1077 	attr->task  = track;
1078 	attr->mmap  = track;
1079 	attr->mmap2 = track && !perf_missing_features.mmap2;
1080 	attr->comm  = track;
1081 	attr->ksymbol = track && !perf_missing_features.ksymbol;
1082 	attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
1083 
1084 	if (opts->record_namespaces)
1085 		attr->namespaces  = track;
1086 
1087 	if (opts->record_switch_events)
1088 		attr->context_switch = track;
1089 
1090 	if (opts->sample_transaction)
1091 		perf_evsel__set_sample_bit(evsel, TRANSACTION);
1092 
1093 	if (opts->running_time) {
1094 		evsel->core.attr.read_format |=
1095 			PERF_FORMAT_TOTAL_TIME_ENABLED |
1096 			PERF_FORMAT_TOTAL_TIME_RUNNING;
1097 	}
1098 
1099 	/*
1100 	 * XXX see the function comment above
1101 	 *
1102 	 * Disabling only independent events or group leaders,
1103 	 * keeping group members enabled.
1104 	 */
1105 	if (perf_evsel__is_group_leader(evsel))
1106 		attr->disabled = 1;
1107 
1108 	/*
1109 	 * Setting enable_on_exec for independent events and
1110 	 * group leaders for traced executed by perf.
1111 	 */
1112 	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
1113 		!opts->initial_delay)
1114 		attr->enable_on_exec = 1;
1115 
1116 	if (evsel->immediate) {
1117 		attr->disabled = 0;
1118 		attr->enable_on_exec = 0;
1119 	}
1120 
1121 	clockid = opts->clockid;
1122 	if (opts->use_clockid) {
1123 		attr->use_clockid = 1;
1124 		attr->clockid = opts->clockid;
1125 	}
1126 
1127 	if (evsel->precise_max)
1128 		attr->precise_ip = 3;
1129 
1130 	if (opts->all_user) {
1131 		attr->exclude_kernel = 1;
1132 		attr->exclude_user   = 0;
1133 	}
1134 
1135 	if (opts->all_kernel) {
1136 		attr->exclude_kernel = 0;
1137 		attr->exclude_user   = 1;
1138 	}
1139 
1140 	if (evsel->core.own_cpus || evsel->unit)
1141 		evsel->core.attr.read_format |= PERF_FORMAT_ID;
1142 
1143 	/*
1144 	 * Apply event specific term settings,
1145 	 * it overloads any global configuration.
1146 	 */
1147 	apply_config_terms(evsel, opts, track);
1148 
1149 	evsel->ignore_missing_thread = opts->ignore_missing_thread;
1150 
1151 	/* The --period option takes the precedence. */
1152 	if (opts->period_set) {
1153 		if (opts->period)
1154 			perf_evsel__set_sample_bit(evsel, PERIOD);
1155 		else
1156 			perf_evsel__reset_sample_bit(evsel, PERIOD);
1157 	}
1158 
1159 	/*
1160 	 * For initial_delay, a dummy event is added implicitly.
1161 	 * The software event will trigger -EOPNOTSUPP error out,
1162 	 * if BRANCH_STACK bit is set.
1163 	 */
1164 	if (opts->initial_delay && is_dummy_event(evsel))
1165 		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
1166 }
1167 
perf_evsel__set_filter(struct evsel * evsel,const char * filter)1168 int perf_evsel__set_filter(struct evsel *evsel, const char *filter)
1169 {
1170 	char *new_filter = strdup(filter);
1171 
1172 	if (new_filter != NULL) {
1173 		free(evsel->filter);
1174 		evsel->filter = new_filter;
1175 		return 0;
1176 	}
1177 
1178 	return -1;
1179 }
1180 
perf_evsel__append_filter(struct evsel * evsel,const char * fmt,const char * filter)1181 static int perf_evsel__append_filter(struct evsel *evsel,
1182 				     const char *fmt, const char *filter)
1183 {
1184 	char *new_filter;
1185 
1186 	if (evsel->filter == NULL)
1187 		return perf_evsel__set_filter(evsel, filter);
1188 
1189 	if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1190 		free(evsel->filter);
1191 		evsel->filter = new_filter;
1192 		return 0;
1193 	}
1194 
1195 	return -1;
1196 }
1197 
perf_evsel__append_tp_filter(struct evsel * evsel,const char * filter)1198 int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter)
1199 {
1200 	return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
1201 }
1202 
perf_evsel__append_addr_filter(struct evsel * evsel,const char * filter)1203 int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter)
1204 {
1205 	return perf_evsel__append_filter(evsel, "%s,%s", filter);
1206 }
1207 
evsel__enable(struct evsel * evsel)1208 int evsel__enable(struct evsel *evsel)
1209 {
1210 	int err = perf_evsel__enable(&evsel->core);
1211 
1212 	if (!err)
1213 		evsel->disabled = false;
1214 
1215 	return err;
1216 }
1217 
evsel__disable(struct evsel * evsel)1218 int evsel__disable(struct evsel *evsel)
1219 {
1220 	int err = perf_evsel__disable(&evsel->core);
1221 	/*
1222 	 * We mark it disabled here so that tools that disable a event can
1223 	 * ignore events after they disable it. I.e. the ring buffer may have
1224 	 * already a few more events queued up before the kernel got the stop
1225 	 * request.
1226 	 */
1227 	if (!err)
1228 		evsel->disabled = true;
1229 
1230 	return err;
1231 }
1232 
perf_evsel__free_config_terms(struct evsel * evsel)1233 static void perf_evsel__free_config_terms(struct evsel *evsel)
1234 {
1235 	struct perf_evsel_config_term *term, *h;
1236 
1237 	list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1238 		list_del_init(&term->list);
1239 		free(term);
1240 	}
1241 }
1242 
perf_evsel__exit(struct evsel * evsel)1243 void perf_evsel__exit(struct evsel *evsel)
1244 {
1245 	assert(list_empty(&evsel->core.node));
1246 	assert(evsel->evlist == NULL);
1247 	perf_evsel__free_counts(evsel);
1248 	perf_evsel__free_fd(&evsel->core);
1249 	perf_evsel__free_id(&evsel->core);
1250 	perf_evsel__free_config_terms(evsel);
1251 	cgroup__put(evsel->cgrp);
1252 	perf_cpu_map__put(evsel->core.cpus);
1253 	perf_cpu_map__put(evsel->core.own_cpus);
1254 	perf_thread_map__put(evsel->core.threads);
1255 	zfree(&evsel->group_name);
1256 	zfree(&evsel->name);
1257 	zfree(&evsel->pmu_name);
1258 	zfree(&evsel->per_pkg_mask);
1259 	zfree(&evsel->metric_events);
1260 	perf_evsel__object.fini(evsel);
1261 }
1262 
evsel__delete(struct evsel * evsel)1263 void evsel__delete(struct evsel *evsel)
1264 {
1265 	perf_evsel__exit(evsel);
1266 	free(evsel);
1267 }
1268 
perf_evsel__compute_deltas(struct evsel * evsel,int cpu,int thread,struct perf_counts_values * count)1269 void perf_evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
1270 				struct perf_counts_values *count)
1271 {
1272 	struct perf_counts_values tmp;
1273 
1274 	if (!evsel->prev_raw_counts)
1275 		return;
1276 
1277 	if (cpu == -1) {
1278 		tmp = evsel->prev_raw_counts->aggr;
1279 		evsel->prev_raw_counts->aggr = *count;
1280 	} else {
1281 		tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1282 		*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1283 	}
1284 
1285 	count->val = count->val - tmp.val;
1286 	count->ena = count->ena - tmp.ena;
1287 	count->run = count->run - tmp.run;
1288 }
1289 
perf_counts_values__scale(struct perf_counts_values * count,bool scale,s8 * pscaled)1290 void perf_counts_values__scale(struct perf_counts_values *count,
1291 			       bool scale, s8 *pscaled)
1292 {
1293 	s8 scaled = 0;
1294 
1295 	if (scale) {
1296 		if (count->run == 0) {
1297 			scaled = -1;
1298 			count->val = 0;
1299 		} else if (count->run < count->ena) {
1300 			scaled = 1;
1301 			count->val = (u64)((double) count->val * count->ena / count->run);
1302 		}
1303 	}
1304 
1305 	if (pscaled)
1306 		*pscaled = scaled;
1307 }
1308 
1309 static int
perf_evsel__read_one(struct evsel * evsel,int cpu,int thread)1310 perf_evsel__read_one(struct evsel *evsel, int cpu, int thread)
1311 {
1312 	struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
1313 
1314 	return perf_evsel__read(&evsel->core, cpu, thread, count);
1315 }
1316 
1317 static void
perf_evsel__set_count(struct evsel * counter,int cpu,int thread,u64 val,u64 ena,u64 run)1318 perf_evsel__set_count(struct evsel *counter, int cpu, int thread,
1319 		      u64 val, u64 ena, u64 run)
1320 {
1321 	struct perf_counts_values *count;
1322 
1323 	count = perf_counts(counter->counts, cpu, thread);
1324 
1325 	count->val    = val;
1326 	count->ena    = ena;
1327 	count->run    = run;
1328 
1329 	perf_counts__set_loaded(counter->counts, cpu, thread, true);
1330 }
1331 
1332 static int
perf_evsel__process_group_data(struct evsel * leader,int cpu,int thread,u64 * data)1333 perf_evsel__process_group_data(struct evsel *leader,
1334 			       int cpu, int thread, u64 *data)
1335 {
1336 	u64 read_format = leader->core.attr.read_format;
1337 	struct sample_read_value *v;
1338 	u64 nr, ena = 0, run = 0, i;
1339 
1340 	nr = *data++;
1341 
1342 	if (nr != (u64) leader->core.nr_members)
1343 		return -EINVAL;
1344 
1345 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1346 		ena = *data++;
1347 
1348 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1349 		run = *data++;
1350 
1351 	v = (struct sample_read_value *) data;
1352 
1353 	perf_evsel__set_count(leader, cpu, thread,
1354 			      v[0].value, ena, run);
1355 
1356 	for (i = 1; i < nr; i++) {
1357 		struct evsel *counter;
1358 
1359 		counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
1360 		if (!counter)
1361 			return -EINVAL;
1362 
1363 		perf_evsel__set_count(counter, cpu, thread,
1364 				      v[i].value, ena, run);
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 static int
perf_evsel__read_group(struct evsel * leader,int cpu,int thread)1371 perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
1372 {
1373 	struct perf_stat_evsel *ps = leader->stats;
1374 	u64 read_format = leader->core.attr.read_format;
1375 	int size = perf_evsel__read_size(&leader->core);
1376 	u64 *data = ps->group_data;
1377 
1378 	if (!(read_format & PERF_FORMAT_ID))
1379 		return -EINVAL;
1380 
1381 	if (!perf_evsel__is_group_leader(leader))
1382 		return -EINVAL;
1383 
1384 	if (!data) {
1385 		data = zalloc(size);
1386 		if (!data)
1387 			return -ENOMEM;
1388 
1389 		ps->group_data = data;
1390 	}
1391 
1392 	if (FD(leader, cpu, thread) < 0)
1393 		return -EINVAL;
1394 
1395 	if (readn(FD(leader, cpu, thread), data, size) <= 0)
1396 		return -errno;
1397 
1398 	return perf_evsel__process_group_data(leader, cpu, thread, data);
1399 }
1400 
perf_evsel__read_counter(struct evsel * evsel,int cpu,int thread)1401 int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread)
1402 {
1403 	u64 read_format = evsel->core.attr.read_format;
1404 
1405 	if (read_format & PERF_FORMAT_GROUP)
1406 		return perf_evsel__read_group(evsel, cpu, thread);
1407 	else
1408 		return perf_evsel__read_one(evsel, cpu, thread);
1409 }
1410 
__perf_evsel__read_on_cpu(struct evsel * evsel,int cpu,int thread,bool scale)1411 int __perf_evsel__read_on_cpu(struct evsel *evsel,
1412 			      int cpu, int thread, bool scale)
1413 {
1414 	struct perf_counts_values count;
1415 	size_t nv = scale ? 3 : 1;
1416 
1417 	if (FD(evsel, cpu, thread) < 0)
1418 		return -EINVAL;
1419 
1420 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1421 		return -ENOMEM;
1422 
1423 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
1424 		return -errno;
1425 
1426 	perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1427 	perf_counts_values__scale(&count, scale, NULL);
1428 	*perf_counts(evsel->counts, cpu, thread) = count;
1429 	return 0;
1430 }
1431 
get_group_fd(struct evsel * evsel,int cpu,int thread)1432 static int get_group_fd(struct evsel *evsel, int cpu, int thread)
1433 {
1434 	struct evsel *leader = evsel->leader;
1435 	int fd;
1436 
1437 	if (perf_evsel__is_group_leader(evsel))
1438 		return -1;
1439 
1440 	/*
1441 	 * Leader must be already processed/open,
1442 	 * if not it's a bug.
1443 	 */
1444 	BUG_ON(!leader->core.fd);
1445 
1446 	fd = FD(leader, cpu, thread);
1447 	BUG_ON(fd == -1);
1448 
1449 	return fd;
1450 }
1451 
perf_evsel__remove_fd(struct evsel * pos,int nr_cpus,int nr_threads,int thread_idx)1452 static void perf_evsel__remove_fd(struct evsel *pos,
1453 				  int nr_cpus, int nr_threads,
1454 				  int thread_idx)
1455 {
1456 	for (int cpu = 0; cpu < nr_cpus; cpu++)
1457 		for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1458 			FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1459 }
1460 
update_fds(struct evsel * evsel,int nr_cpus,int cpu_idx,int nr_threads,int thread_idx)1461 static int update_fds(struct evsel *evsel,
1462 		      int nr_cpus, int cpu_idx,
1463 		      int nr_threads, int thread_idx)
1464 {
1465 	struct evsel *pos;
1466 
1467 	if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
1468 		return -EINVAL;
1469 
1470 	evlist__for_each_entry(evsel->evlist, pos) {
1471 		nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
1472 
1473 		perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1474 
1475 		/*
1476 		 * Since fds for next evsel has not been created,
1477 		 * there is no need to iterate whole event list.
1478 		 */
1479 		if (pos == evsel)
1480 			break;
1481 	}
1482 	return 0;
1483 }
1484 
ignore_missing_thread(struct evsel * evsel,int nr_cpus,int cpu,struct perf_thread_map * threads,int thread,int err)1485 static bool ignore_missing_thread(struct evsel *evsel,
1486 				  int nr_cpus, int cpu,
1487 				  struct perf_thread_map *threads,
1488 				  int thread, int err)
1489 {
1490 	pid_t ignore_pid = perf_thread_map__pid(threads, thread);
1491 
1492 	if (!evsel->ignore_missing_thread)
1493 		return false;
1494 
1495 	/* The system wide setup does not work with threads. */
1496 	if (evsel->core.system_wide)
1497 		return false;
1498 
1499 	/* The -ESRCH is perf event syscall errno for pid's not found. */
1500 	if (err != -ESRCH)
1501 		return false;
1502 
1503 	/* If there's only one thread, let it fail. */
1504 	if (threads->nr == 1)
1505 		return false;
1506 
1507 	/*
1508 	 * We should remove fd for missing_thread first
1509 	 * because thread_map__remove() will decrease threads->nr.
1510 	 */
1511 	if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
1512 		return false;
1513 
1514 	if (thread_map__remove(threads, thread))
1515 		return false;
1516 
1517 	pr_warning("WARNING: Ignored open failure for pid %d\n",
1518 		   ignore_pid);
1519 	return true;
1520 }
1521 
__open_attr__fprintf(FILE * fp,const char * name,const char * val,void * priv __maybe_unused)1522 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1523 				void *priv __maybe_unused)
1524 {
1525 	return fprintf(fp, "  %-32s %s\n", name, val);
1526 }
1527 
display_attr(struct perf_event_attr * attr)1528 static void display_attr(struct perf_event_attr *attr)
1529 {
1530 	if (verbose >= 2) {
1531 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1532 		fprintf(stderr, "perf_event_attr:\n");
1533 		perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1534 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1535 	}
1536 }
1537 
perf_event_open(struct evsel * evsel,pid_t pid,int cpu,int group_fd,unsigned long flags)1538 static int perf_event_open(struct evsel *evsel,
1539 			   pid_t pid, int cpu, int group_fd,
1540 			   unsigned long flags)
1541 {
1542 	int precise_ip = evsel->core.attr.precise_ip;
1543 	int fd;
1544 
1545 	while (1) {
1546 		pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
1547 			  pid, cpu, group_fd, flags);
1548 
1549 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags);
1550 		if (fd >= 0)
1551 			break;
1552 
1553 		/* Do not try less precise if not requested. */
1554 		if (!evsel->precise_max)
1555 			break;
1556 
1557 		/*
1558 		 * We tried all the precise_ip values, and it's
1559 		 * still failing, so leave it to standard fallback.
1560 		 */
1561 		if (!evsel->core.attr.precise_ip) {
1562 			evsel->core.attr.precise_ip = precise_ip;
1563 			break;
1564 		}
1565 
1566 		pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
1567 		evsel->core.attr.precise_ip--;
1568 		pr_debug2("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1569 		display_attr(&evsel->core.attr);
1570 	}
1571 
1572 	return fd;
1573 }
1574 
evsel__open(struct evsel * evsel,struct perf_cpu_map * cpus,struct perf_thread_map * threads)1575 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
1576 		struct perf_thread_map *threads)
1577 {
1578 	int cpu, thread, nthreads;
1579 	unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1580 	int pid = -1, err;
1581 	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1582 
1583 	if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1584 	    (perf_missing_features.aux_output     && evsel->core.attr.aux_output))
1585 		return -EINVAL;
1586 
1587 	if (cpus == NULL) {
1588 		static struct perf_cpu_map *empty_cpu_map;
1589 
1590 		if (empty_cpu_map == NULL) {
1591 			empty_cpu_map = perf_cpu_map__dummy_new();
1592 			if (empty_cpu_map == NULL)
1593 				return -ENOMEM;
1594 		}
1595 
1596 		cpus = empty_cpu_map;
1597 	}
1598 
1599 	if (threads == NULL) {
1600 		static struct perf_thread_map *empty_thread_map;
1601 
1602 		if (empty_thread_map == NULL) {
1603 			empty_thread_map = thread_map__new_by_tid(-1);
1604 			if (empty_thread_map == NULL)
1605 				return -ENOMEM;
1606 		}
1607 
1608 		threads = empty_thread_map;
1609 	}
1610 
1611 	if (evsel->core.system_wide)
1612 		nthreads = 1;
1613 	else
1614 		nthreads = threads->nr;
1615 
1616 	if (evsel->core.fd == NULL &&
1617 	    perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
1618 		return -ENOMEM;
1619 
1620 	if (evsel->cgrp) {
1621 		flags |= PERF_FLAG_PID_CGROUP;
1622 		pid = evsel->cgrp->fd;
1623 	}
1624 
1625 fallback_missing_features:
1626 	if (perf_missing_features.clockid_wrong)
1627 		evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
1628 	if (perf_missing_features.clockid) {
1629 		evsel->core.attr.use_clockid = 0;
1630 		evsel->core.attr.clockid = 0;
1631 	}
1632 	if (perf_missing_features.cloexec)
1633 		flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1634 	if (perf_missing_features.mmap2)
1635 		evsel->core.attr.mmap2 = 0;
1636 	if (perf_missing_features.exclude_guest)
1637 		evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
1638 	if (perf_missing_features.lbr_flags)
1639 		evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1640 				     PERF_SAMPLE_BRANCH_NO_CYCLES);
1641 	if (perf_missing_features.group_read && evsel->core.attr.inherit)
1642 		evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1643 	if (perf_missing_features.ksymbol)
1644 		evsel->core.attr.ksymbol = 0;
1645 	if (perf_missing_features.bpf)
1646 		evsel->core.attr.bpf_event = 0;
1647 retry_sample_id:
1648 	if (perf_missing_features.sample_id_all)
1649 		evsel->core.attr.sample_id_all = 0;
1650 
1651 	display_attr(&evsel->core.attr);
1652 
1653 	for (cpu = 0; cpu < cpus->nr; cpu++) {
1654 
1655 		for (thread = 0; thread < nthreads; thread++) {
1656 			int fd, group_fd;
1657 
1658 			if (!evsel->cgrp && !evsel->core.system_wide)
1659 				pid = perf_thread_map__pid(threads, thread);
1660 
1661 			group_fd = get_group_fd(evsel, cpu, thread);
1662 retry_open:
1663 			test_attr__ready();
1664 
1665 			fd = perf_event_open(evsel, pid, cpus->map[cpu],
1666 					     group_fd, flags);
1667 
1668 			FD(evsel, cpu, thread) = fd;
1669 
1670 			if (fd < 0) {
1671 				err = -errno;
1672 
1673 				if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
1674 					/*
1675 					 * We just removed 1 thread, so take a step
1676 					 * back on thread index and lower the upper
1677 					 * nthreads limit.
1678 					 */
1679 					nthreads--;
1680 					thread--;
1681 
1682 					/* ... and pretend like nothing have happened. */
1683 					err = 0;
1684 					continue;
1685 				}
1686 
1687 				pr_debug2("\nsys_perf_event_open failed, error %d\n",
1688 					  err);
1689 				goto try_fallback;
1690 			}
1691 
1692 			pr_debug2(" = %d\n", fd);
1693 
1694 			if (evsel->bpf_fd >= 0) {
1695 				int evt_fd = fd;
1696 				int bpf_fd = evsel->bpf_fd;
1697 
1698 				err = ioctl(evt_fd,
1699 					    PERF_EVENT_IOC_SET_BPF,
1700 					    bpf_fd);
1701 				if (err && errno != EEXIST) {
1702 					pr_err("failed to attach bpf fd %d: %s\n",
1703 					       bpf_fd, strerror(errno));
1704 					err = -EINVAL;
1705 					goto out_close;
1706 				}
1707 			}
1708 
1709 			set_rlimit = NO_CHANGE;
1710 
1711 			/*
1712 			 * If we succeeded but had to kill clockid, fail and
1713 			 * have perf_evsel__open_strerror() print us a nice
1714 			 * error.
1715 			 */
1716 			if (perf_missing_features.clockid ||
1717 			    perf_missing_features.clockid_wrong) {
1718 				err = -EINVAL;
1719 				goto out_close;
1720 			}
1721 		}
1722 	}
1723 
1724 	return 0;
1725 
1726 try_fallback:
1727 	/*
1728 	 * perf stat needs between 5 and 22 fds per CPU. When we run out
1729 	 * of them try to increase the limits.
1730 	 */
1731 	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1732 		struct rlimit l;
1733 		int old_errno = errno;
1734 
1735 		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1736 			if (set_rlimit == NO_CHANGE)
1737 				l.rlim_cur = l.rlim_max;
1738 			else {
1739 				l.rlim_cur = l.rlim_max + 1000;
1740 				l.rlim_max = l.rlim_cur;
1741 			}
1742 			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1743 				set_rlimit++;
1744 				errno = old_errno;
1745 				goto retry_open;
1746 			}
1747 		}
1748 		errno = old_errno;
1749 	}
1750 
1751 	if (err != -EINVAL || cpu > 0 || thread > 0)
1752 		goto out_close;
1753 
1754 	/*
1755 	 * Must probe features in the order they were added to the
1756 	 * perf_event_attr interface.
1757 	 */
1758 	if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
1759 		perf_missing_features.aux_output = true;
1760 		pr_debug2("Kernel has no attr.aux_output support, bailing out\n");
1761 		goto out_close;
1762 	} else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
1763 		perf_missing_features.bpf = true;
1764 		pr_debug2("switching off bpf_event\n");
1765 		goto fallback_missing_features;
1766 	} else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
1767 		perf_missing_features.ksymbol = true;
1768 		pr_debug2("switching off ksymbol\n");
1769 		goto fallback_missing_features;
1770 	} else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
1771 		perf_missing_features.write_backward = true;
1772 		pr_debug2("switching off write_backward\n");
1773 		goto out_close;
1774 	} else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
1775 		perf_missing_features.clockid_wrong = true;
1776 		pr_debug2("switching off clockid\n");
1777 		goto fallback_missing_features;
1778 	} else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
1779 		perf_missing_features.clockid = true;
1780 		pr_debug2("switching off use_clockid\n");
1781 		goto fallback_missing_features;
1782 	} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1783 		perf_missing_features.cloexec = true;
1784 		pr_debug2("switching off cloexec flag\n");
1785 		goto fallback_missing_features;
1786 	} else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
1787 		perf_missing_features.mmap2 = true;
1788 		pr_debug2("switching off mmap2\n");
1789 		goto fallback_missing_features;
1790 	} else if (!perf_missing_features.exclude_guest &&
1791 		   (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) {
1792 		perf_missing_features.exclude_guest = true;
1793 		pr_debug2("switching off exclude_guest, exclude_host\n");
1794 		goto fallback_missing_features;
1795 	} else if (!perf_missing_features.sample_id_all) {
1796 		perf_missing_features.sample_id_all = true;
1797 		pr_debug2("switching off sample_id_all\n");
1798 		goto retry_sample_id;
1799 	} else if (!perf_missing_features.lbr_flags &&
1800 			(evsel->core.attr.branch_sample_type &
1801 			 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1802 			  PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1803 		perf_missing_features.lbr_flags = true;
1804 		pr_debug2("switching off branch sample type no (cycles/flags)\n");
1805 		goto fallback_missing_features;
1806 	} else if (!perf_missing_features.group_read &&
1807 		    evsel->core.attr.inherit &&
1808 		   (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
1809 		   perf_evsel__is_group_leader(evsel)) {
1810 		perf_missing_features.group_read = true;
1811 		pr_debug2("switching off group read\n");
1812 		goto fallback_missing_features;
1813 	}
1814 out_close:
1815 	if (err)
1816 		threads->err_thread = thread;
1817 
1818 	do {
1819 		while (--thread >= 0) {
1820 			close(FD(evsel, cpu, thread));
1821 			FD(evsel, cpu, thread) = -1;
1822 		}
1823 		thread = nthreads;
1824 	} while (--cpu >= 0);
1825 	return err;
1826 }
1827 
evsel__close(struct evsel * evsel)1828 void evsel__close(struct evsel *evsel)
1829 {
1830 	perf_evsel__close(&evsel->core);
1831 	perf_evsel__free_id(&evsel->core);
1832 }
1833 
perf_evsel__open_per_cpu(struct evsel * evsel,struct perf_cpu_map * cpus)1834 int perf_evsel__open_per_cpu(struct evsel *evsel,
1835 			     struct perf_cpu_map *cpus)
1836 {
1837 	return evsel__open(evsel, cpus, NULL);
1838 }
1839 
perf_evsel__open_per_thread(struct evsel * evsel,struct perf_thread_map * threads)1840 int perf_evsel__open_per_thread(struct evsel *evsel,
1841 				struct perf_thread_map *threads)
1842 {
1843 	return evsel__open(evsel, NULL, threads);
1844 }
1845 
perf_evsel__parse_id_sample(const struct evsel * evsel,const union perf_event * event,struct perf_sample * sample)1846 static int perf_evsel__parse_id_sample(const struct evsel *evsel,
1847 				       const union perf_event *event,
1848 				       struct perf_sample *sample)
1849 {
1850 	u64 type = evsel->core.attr.sample_type;
1851 	const __u64 *array = event->sample.array;
1852 	bool swapped = evsel->needs_swap;
1853 	union u64_swap u;
1854 
1855 	array += ((event->header.size -
1856 		   sizeof(event->header)) / sizeof(u64)) - 1;
1857 
1858 	if (type & PERF_SAMPLE_IDENTIFIER) {
1859 		sample->id = *array;
1860 		array--;
1861 	}
1862 
1863 	if (type & PERF_SAMPLE_CPU) {
1864 		u.val64 = *array;
1865 		if (swapped) {
1866 			/* undo swap of u64, then swap on individual u32s */
1867 			u.val64 = bswap_64(u.val64);
1868 			u.val32[0] = bswap_32(u.val32[0]);
1869 		}
1870 
1871 		sample->cpu = u.val32[0];
1872 		array--;
1873 	}
1874 
1875 	if (type & PERF_SAMPLE_STREAM_ID) {
1876 		sample->stream_id = *array;
1877 		array--;
1878 	}
1879 
1880 	if (type & PERF_SAMPLE_ID) {
1881 		sample->id = *array;
1882 		array--;
1883 	}
1884 
1885 	if (type & PERF_SAMPLE_TIME) {
1886 		sample->time = *array;
1887 		array--;
1888 	}
1889 
1890 	if (type & PERF_SAMPLE_TID) {
1891 		u.val64 = *array;
1892 		if (swapped) {
1893 			/* undo swap of u64, then swap on individual u32s */
1894 			u.val64 = bswap_64(u.val64);
1895 			u.val32[0] = bswap_32(u.val32[0]);
1896 			u.val32[1] = bswap_32(u.val32[1]);
1897 		}
1898 
1899 		sample->pid = u.val32[0];
1900 		sample->tid = u.val32[1];
1901 		array--;
1902 	}
1903 
1904 	return 0;
1905 }
1906 
overflow(const void * endp,u16 max_size,const void * offset,u64 size)1907 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1908 			    u64 size)
1909 {
1910 	return size > max_size || offset + size > endp;
1911 }
1912 
1913 #define OVERFLOW_CHECK(offset, size, max_size)				\
1914 	do {								\
1915 		if (overflow(endp, (max_size), (offset), (size)))	\
1916 			return -EFAULT;					\
1917 	} while (0)
1918 
1919 #define OVERFLOW_CHECK_u64(offset) \
1920 	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1921 
1922 static int
perf_event__check_size(union perf_event * event,unsigned int sample_size)1923 perf_event__check_size(union perf_event *event, unsigned int sample_size)
1924 {
1925 	/*
1926 	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1927 	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
1928 	 * check the format does not go past the end of the event.
1929 	 */
1930 	if (sample_size + sizeof(event->header) > event->header.size)
1931 		return -EFAULT;
1932 
1933 	return 0;
1934 }
1935 
perf_evsel__parse_sample(struct evsel * evsel,union perf_event * event,struct perf_sample * data)1936 int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
1937 			     struct perf_sample *data)
1938 {
1939 	u64 type = evsel->core.attr.sample_type;
1940 	bool swapped = evsel->needs_swap;
1941 	const __u64 *array;
1942 	u16 max_size = event->header.size;
1943 	const void *endp = (void *)event + max_size;
1944 	u64 sz;
1945 
1946 	/*
1947 	 * used for cross-endian analysis. See git commit 65014ab3
1948 	 * for why this goofiness is needed.
1949 	 */
1950 	union u64_swap u;
1951 
1952 	memset(data, 0, sizeof(*data));
1953 	data->cpu = data->pid = data->tid = -1;
1954 	data->stream_id = data->id = data->time = -1ULL;
1955 	data->period = evsel->core.attr.sample_period;
1956 	data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1957 	data->misc    = event->header.misc;
1958 	data->data_src = PERF_MEM_DATA_SRC_NONE;
1959 
1960 	if (event->header.type != PERF_RECORD_SAMPLE) {
1961 		if (!evsel->core.attr.sample_id_all)
1962 			return 0;
1963 		return perf_evsel__parse_id_sample(evsel, event, data);
1964 	}
1965 
1966 	array = event->sample.array;
1967 
1968 	if (perf_event__check_size(event, evsel->sample_size))
1969 		return -EFAULT;
1970 
1971 	if (type & PERF_SAMPLE_IDENTIFIER) {
1972 		data->id = *array;
1973 		array++;
1974 	}
1975 
1976 	if (type & PERF_SAMPLE_IP) {
1977 		data->ip = *array;
1978 		array++;
1979 	}
1980 
1981 	if (type & PERF_SAMPLE_TID) {
1982 		u.val64 = *array;
1983 		if (swapped) {
1984 			/* undo swap of u64, then swap on individual u32s */
1985 			u.val64 = bswap_64(u.val64);
1986 			u.val32[0] = bswap_32(u.val32[0]);
1987 			u.val32[1] = bswap_32(u.val32[1]);
1988 		}
1989 
1990 		data->pid = u.val32[0];
1991 		data->tid = u.val32[1];
1992 		array++;
1993 	}
1994 
1995 	if (type & PERF_SAMPLE_TIME) {
1996 		data->time = *array;
1997 		array++;
1998 	}
1999 
2000 	if (type & PERF_SAMPLE_ADDR) {
2001 		data->addr = *array;
2002 		array++;
2003 	}
2004 
2005 	if (type & PERF_SAMPLE_ID) {
2006 		data->id = *array;
2007 		array++;
2008 	}
2009 
2010 	if (type & PERF_SAMPLE_STREAM_ID) {
2011 		data->stream_id = *array;
2012 		array++;
2013 	}
2014 
2015 	if (type & PERF_SAMPLE_CPU) {
2016 
2017 		u.val64 = *array;
2018 		if (swapped) {
2019 			/* undo swap of u64, then swap on individual u32s */
2020 			u.val64 = bswap_64(u.val64);
2021 			u.val32[0] = bswap_32(u.val32[0]);
2022 		}
2023 
2024 		data->cpu = u.val32[0];
2025 		array++;
2026 	}
2027 
2028 	if (type & PERF_SAMPLE_PERIOD) {
2029 		data->period = *array;
2030 		array++;
2031 	}
2032 
2033 	if (type & PERF_SAMPLE_READ) {
2034 		u64 read_format = evsel->core.attr.read_format;
2035 
2036 		OVERFLOW_CHECK_u64(array);
2037 		if (read_format & PERF_FORMAT_GROUP)
2038 			data->read.group.nr = *array;
2039 		else
2040 			data->read.one.value = *array;
2041 
2042 		array++;
2043 
2044 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2045 			OVERFLOW_CHECK_u64(array);
2046 			data->read.time_enabled = *array;
2047 			array++;
2048 		}
2049 
2050 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2051 			OVERFLOW_CHECK_u64(array);
2052 			data->read.time_running = *array;
2053 			array++;
2054 		}
2055 
2056 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2057 		if (read_format & PERF_FORMAT_GROUP) {
2058 			const u64 max_group_nr = UINT64_MAX /
2059 					sizeof(struct sample_read_value);
2060 
2061 			if (data->read.group.nr > max_group_nr)
2062 				return -EFAULT;
2063 			sz = data->read.group.nr *
2064 			     sizeof(struct sample_read_value);
2065 			OVERFLOW_CHECK(array, sz, max_size);
2066 			data->read.group.values =
2067 					(struct sample_read_value *)array;
2068 			array = (void *)array + sz;
2069 		} else {
2070 			OVERFLOW_CHECK_u64(array);
2071 			data->read.one.id = *array;
2072 			array++;
2073 		}
2074 	}
2075 
2076 	if (evsel__has_callchain(evsel)) {
2077 		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2078 
2079 		OVERFLOW_CHECK_u64(array);
2080 		data->callchain = (struct ip_callchain *)array++;
2081 		if (data->callchain->nr > max_callchain_nr)
2082 			return -EFAULT;
2083 		sz = data->callchain->nr * sizeof(u64);
2084 		OVERFLOW_CHECK(array, sz, max_size);
2085 		array = (void *)array + sz;
2086 	}
2087 
2088 	if (type & PERF_SAMPLE_RAW) {
2089 		OVERFLOW_CHECK_u64(array);
2090 		u.val64 = *array;
2091 
2092 		/*
2093 		 * Undo swap of u64, then swap on individual u32s,
2094 		 * get the size of the raw area and undo all of the
2095 		 * swap. The pevent interface handles endianity by
2096 		 * itself.
2097 		 */
2098 		if (swapped) {
2099 			u.val64 = bswap_64(u.val64);
2100 			u.val32[0] = bswap_32(u.val32[0]);
2101 			u.val32[1] = bswap_32(u.val32[1]);
2102 		}
2103 		data->raw_size = u.val32[0];
2104 
2105 		/*
2106 		 * The raw data is aligned on 64bits including the
2107 		 * u32 size, so it's safe to use mem_bswap_64.
2108 		 */
2109 		if (swapped)
2110 			mem_bswap_64((void *) array, data->raw_size);
2111 
2112 		array = (void *)array + sizeof(u32);
2113 
2114 		OVERFLOW_CHECK(array, data->raw_size, max_size);
2115 		data->raw_data = (void *)array;
2116 		array = (void *)array + data->raw_size;
2117 	}
2118 
2119 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2120 		const u64 max_branch_nr = UINT64_MAX /
2121 					  sizeof(struct branch_entry);
2122 
2123 		OVERFLOW_CHECK_u64(array);
2124 		data->branch_stack = (struct branch_stack *)array++;
2125 
2126 		if (data->branch_stack->nr > max_branch_nr)
2127 			return -EFAULT;
2128 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
2129 		OVERFLOW_CHECK(array, sz, max_size);
2130 		array = (void *)array + sz;
2131 	}
2132 
2133 	if (type & PERF_SAMPLE_REGS_USER) {
2134 		OVERFLOW_CHECK_u64(array);
2135 		data->user_regs.abi = *array;
2136 		array++;
2137 
2138 		if (data->user_regs.abi) {
2139 			u64 mask = evsel->core.attr.sample_regs_user;
2140 
2141 			sz = hweight64(mask) * sizeof(u64);
2142 			OVERFLOW_CHECK(array, sz, max_size);
2143 			data->user_regs.mask = mask;
2144 			data->user_regs.regs = (u64 *)array;
2145 			array = (void *)array + sz;
2146 		}
2147 	}
2148 
2149 	if (type & PERF_SAMPLE_STACK_USER) {
2150 		OVERFLOW_CHECK_u64(array);
2151 		sz = *array++;
2152 
2153 		data->user_stack.offset = ((char *)(array - 1)
2154 					  - (char *) event);
2155 
2156 		if (!sz) {
2157 			data->user_stack.size = 0;
2158 		} else {
2159 			OVERFLOW_CHECK(array, sz, max_size);
2160 			data->user_stack.data = (char *)array;
2161 			array = (void *)array + sz;
2162 			OVERFLOW_CHECK_u64(array);
2163 			data->user_stack.size = *array++;
2164 			if (WARN_ONCE(data->user_stack.size > sz,
2165 				      "user stack dump failure\n"))
2166 				return -EFAULT;
2167 		}
2168 	}
2169 
2170 	if (type & PERF_SAMPLE_WEIGHT) {
2171 		OVERFLOW_CHECK_u64(array);
2172 		data->weight = *array;
2173 		array++;
2174 	}
2175 
2176 	if (type & PERF_SAMPLE_DATA_SRC) {
2177 		OVERFLOW_CHECK_u64(array);
2178 		data->data_src = *array;
2179 		array++;
2180 	}
2181 
2182 	if (type & PERF_SAMPLE_TRANSACTION) {
2183 		OVERFLOW_CHECK_u64(array);
2184 		data->transaction = *array;
2185 		array++;
2186 	}
2187 
2188 	data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2189 	if (type & PERF_SAMPLE_REGS_INTR) {
2190 		OVERFLOW_CHECK_u64(array);
2191 		data->intr_regs.abi = *array;
2192 		array++;
2193 
2194 		if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2195 			u64 mask = evsel->core.attr.sample_regs_intr;
2196 
2197 			sz = hweight64(mask) * sizeof(u64);
2198 			OVERFLOW_CHECK(array, sz, max_size);
2199 			data->intr_regs.mask = mask;
2200 			data->intr_regs.regs = (u64 *)array;
2201 			array = (void *)array + sz;
2202 		}
2203 	}
2204 
2205 	data->phys_addr = 0;
2206 	if (type & PERF_SAMPLE_PHYS_ADDR) {
2207 		data->phys_addr = *array;
2208 		array++;
2209 	}
2210 
2211 	return 0;
2212 }
2213 
perf_evsel__parse_sample_timestamp(struct evsel * evsel,union perf_event * event,u64 * timestamp)2214 int perf_evsel__parse_sample_timestamp(struct evsel *evsel,
2215 				       union perf_event *event,
2216 				       u64 *timestamp)
2217 {
2218 	u64 type = evsel->core.attr.sample_type;
2219 	const __u64 *array;
2220 
2221 	if (!(type & PERF_SAMPLE_TIME))
2222 		return -1;
2223 
2224 	if (event->header.type != PERF_RECORD_SAMPLE) {
2225 		struct perf_sample data = {
2226 			.time = -1ULL,
2227 		};
2228 
2229 		if (!evsel->core.attr.sample_id_all)
2230 			return -1;
2231 		if (perf_evsel__parse_id_sample(evsel, event, &data))
2232 			return -1;
2233 
2234 		*timestamp = data.time;
2235 		return 0;
2236 	}
2237 
2238 	array = event->sample.array;
2239 
2240 	if (perf_event__check_size(event, evsel->sample_size))
2241 		return -EFAULT;
2242 
2243 	if (type & PERF_SAMPLE_IDENTIFIER)
2244 		array++;
2245 
2246 	if (type & PERF_SAMPLE_IP)
2247 		array++;
2248 
2249 	if (type & PERF_SAMPLE_TID)
2250 		array++;
2251 
2252 	if (type & PERF_SAMPLE_TIME)
2253 		*timestamp = *array;
2254 
2255 	return 0;
2256 }
2257 
perf_evsel__field(struct evsel * evsel,const char * name)2258 struct tep_format_field *perf_evsel__field(struct evsel *evsel, const char *name)
2259 {
2260 	return tep_find_field(evsel->tp_format, name);
2261 }
2262 
perf_evsel__rawptr(struct evsel * evsel,struct perf_sample * sample,const char * name)2263 void *perf_evsel__rawptr(struct evsel *evsel, struct perf_sample *sample,
2264 			 const char *name)
2265 {
2266 	struct tep_format_field *field = perf_evsel__field(evsel, name);
2267 	int offset;
2268 
2269 	if (!field)
2270 		return NULL;
2271 
2272 	offset = field->offset;
2273 
2274 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2275 		offset = *(int *)(sample->raw_data + field->offset);
2276 		offset &= 0xffff;
2277 	}
2278 
2279 	return sample->raw_data + offset;
2280 }
2281 
format_field__intval(struct tep_format_field * field,struct perf_sample * sample,bool needs_swap)2282 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
2283 			 bool needs_swap)
2284 {
2285 	u64 value;
2286 	void *ptr = sample->raw_data + field->offset;
2287 
2288 	switch (field->size) {
2289 	case 1:
2290 		return *(u8 *)ptr;
2291 	case 2:
2292 		value = *(u16 *)ptr;
2293 		break;
2294 	case 4:
2295 		value = *(u32 *)ptr;
2296 		break;
2297 	case 8:
2298 		memcpy(&value, ptr, sizeof(u64));
2299 		break;
2300 	default:
2301 		return 0;
2302 	}
2303 
2304 	if (!needs_swap)
2305 		return value;
2306 
2307 	switch (field->size) {
2308 	case 2:
2309 		return bswap_16(value);
2310 	case 4:
2311 		return bswap_32(value);
2312 	case 8:
2313 		return bswap_64(value);
2314 	default:
2315 		return 0;
2316 	}
2317 
2318 	return 0;
2319 }
2320 
perf_evsel__intval(struct evsel * evsel,struct perf_sample * sample,const char * name)2321 u64 perf_evsel__intval(struct evsel *evsel, struct perf_sample *sample,
2322 		       const char *name)
2323 {
2324 	struct tep_format_field *field = perf_evsel__field(evsel, name);
2325 
2326 	if (!field)
2327 		return 0;
2328 
2329 	return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2330 }
2331 
perf_evsel__fallback(struct evsel * evsel,int err,char * msg,size_t msgsize)2332 bool perf_evsel__fallback(struct evsel *evsel, int err,
2333 			  char *msg, size_t msgsize)
2334 {
2335 	int paranoid;
2336 
2337 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2338 	    evsel->core.attr.type   == PERF_TYPE_HARDWARE &&
2339 	    evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2340 		/*
2341 		 * If it's cycles then fall back to hrtimer based
2342 		 * cpu-clock-tick sw counter, which is always available even if
2343 		 * no PMU support.
2344 		 *
2345 		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2346 		 * b0a873e).
2347 		 */
2348 		scnprintf(msg, msgsize, "%s",
2349 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2350 
2351 		evsel->core.attr.type   = PERF_TYPE_SOFTWARE;
2352 		evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
2353 
2354 		zfree(&evsel->name);
2355 		return true;
2356 	} else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
2357 		   (paranoid = perf_event_paranoid()) > 1) {
2358 		const char *name = perf_evsel__name(evsel);
2359 		char *new_name;
2360 		const char *sep = ":";
2361 
2362 		/* If event has exclude user then don't exclude kernel. */
2363 		if (evsel->core.attr.exclude_user)
2364 			return false;
2365 
2366 		/* Is there already the separator in the name. */
2367 		if (strchr(name, '/') ||
2368 		    strchr(name, ':'))
2369 			sep = "";
2370 
2371 		if (asprintf(&new_name, "%s%su", name, sep) < 0)
2372 			return false;
2373 
2374 		if (evsel->name)
2375 			free(evsel->name);
2376 		evsel->name = new_name;
2377 		scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
2378 			  "to fall back to excluding kernel and hypervisor "
2379 			  " samples", paranoid);
2380 		evsel->core.attr.exclude_kernel = 1;
2381 		evsel->core.attr.exclude_hv     = 1;
2382 
2383 		return true;
2384 	}
2385 
2386 	return false;
2387 }
2388 
find_process(const char * name)2389 static bool find_process(const char *name)
2390 {
2391 	size_t len = strlen(name);
2392 	DIR *dir;
2393 	struct dirent *d;
2394 	int ret = -1;
2395 
2396 	dir = opendir(procfs__mountpoint());
2397 	if (!dir)
2398 		return false;
2399 
2400 	/* Walk through the directory. */
2401 	while (ret && (d = readdir(dir)) != NULL) {
2402 		char path[PATH_MAX];
2403 		char *data;
2404 		size_t size;
2405 
2406 		if ((d->d_type != DT_DIR) ||
2407 		     !strcmp(".", d->d_name) ||
2408 		     !strcmp("..", d->d_name))
2409 			continue;
2410 
2411 		scnprintf(path, sizeof(path), "%s/%s/comm",
2412 			  procfs__mountpoint(), d->d_name);
2413 
2414 		if (filename__read_str(path, &data, &size))
2415 			continue;
2416 
2417 		ret = strncmp(name, data, len);
2418 		free(data);
2419 	}
2420 
2421 	closedir(dir);
2422 	return ret ? false : true;
2423 }
2424 
perf_evsel__open_strerror(struct evsel * evsel,struct target * target,int err,char * msg,size_t size)2425 int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
2426 			      int err, char *msg, size_t size)
2427 {
2428 	char sbuf[STRERR_BUFSIZE];
2429 	int printed = 0;
2430 
2431 	switch (err) {
2432 	case EPERM:
2433 	case EACCES:
2434 		if (err == EPERM)
2435 			printed = scnprintf(msg, size,
2436 				"No permission to enable %s event.\n\n",
2437 				perf_evsel__name(evsel));
2438 
2439 		return scnprintf(msg + printed, size - printed,
2440 		 "You may not have permission to collect %sstats.\n\n"
2441 		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2442 		 "which controls use of the performance events system by\n"
2443 		 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2444 		 "The current value is %d:\n\n"
2445 		 "  -1: Allow use of (almost) all events by all users\n"
2446 		 "      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2447 		 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2448 		 "      Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2449 		 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2450 		 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2451 		 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2452 		 "	kernel.perf_event_paranoid = -1\n" ,
2453 				 target->system_wide ? "system-wide " : "",
2454 				 perf_event_paranoid());
2455 	case ENOENT:
2456 		return scnprintf(msg, size, "The %s event is not supported.",
2457 				 perf_evsel__name(evsel));
2458 	case EMFILE:
2459 		return scnprintf(msg, size, "%s",
2460 			 "Too many events are opened.\n"
2461 			 "Probably the maximum number of open file descriptors has been reached.\n"
2462 			 "Hint: Try again after reducing the number of events.\n"
2463 			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2464 	case ENOMEM:
2465 		if (evsel__has_callchain(evsel) &&
2466 		    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2467 			return scnprintf(msg, size,
2468 					 "Not enough memory to setup event with callchain.\n"
2469 					 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2470 					 "Hint: Current value: %d", sysctl__max_stack());
2471 		break;
2472 	case ENODEV:
2473 		if (target->cpu_list)
2474 			return scnprintf(msg, size, "%s",
2475 	 "No such device - did you specify an out-of-range profile CPU?");
2476 		break;
2477 	case EOPNOTSUPP:
2478 		if (evsel->core.attr.sample_period != 0)
2479 			return scnprintf(msg, size,
2480 	"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
2481 					 perf_evsel__name(evsel));
2482 		if (evsel->core.attr.precise_ip)
2483 			return scnprintf(msg, size, "%s",
2484 	"\'precise\' request may not be supported. Try removing 'p' modifier.");
2485 #if defined(__i386__) || defined(__x86_64__)
2486 		if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
2487 			return scnprintf(msg, size, "%s",
2488 	"No hardware sampling interrupt available.\n");
2489 #endif
2490 		break;
2491 	case EBUSY:
2492 		if (find_process("oprofiled"))
2493 			return scnprintf(msg, size,
2494 	"The PMU counters are busy/taken by another profiler.\n"
2495 	"We found oprofile daemon running, please stop it and try again.");
2496 		break;
2497 	case EINVAL:
2498 		if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
2499 			return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
2500 		if (perf_missing_features.clockid)
2501 			return scnprintf(msg, size, "clockid feature not supported.");
2502 		if (perf_missing_features.clockid_wrong)
2503 			return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2504 		if (perf_missing_features.aux_output)
2505 			return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
2506 		break;
2507 	default:
2508 		break;
2509 	}
2510 
2511 	return scnprintf(msg, size,
2512 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2513 	"/bin/dmesg | grep -i perf may provide additional information.\n",
2514 			 err, str_error_r(err, sbuf, sizeof(sbuf)),
2515 			 perf_evsel__name(evsel));
2516 }
2517 
perf_evsel__env(struct evsel * evsel)2518 struct perf_env *perf_evsel__env(struct evsel *evsel)
2519 {
2520 	if (evsel && evsel->evlist)
2521 		return evsel->evlist->env;
2522 	return &perf_env;
2523 }
2524 
store_evsel_ids(struct evsel * evsel,struct evlist * evlist)2525 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
2526 {
2527 	int cpu, thread;
2528 
2529 	for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) {
2530 		for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
2531 		     thread++) {
2532 			int fd = FD(evsel, cpu, thread);
2533 
2534 			if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
2535 						   cpu, thread, fd) < 0)
2536 				return -1;
2537 		}
2538 	}
2539 
2540 	return 0;
2541 }
2542 
perf_evsel__store_ids(struct evsel * evsel,struct evlist * evlist)2543 int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
2544 {
2545 	struct perf_cpu_map *cpus = evsel->core.cpus;
2546 	struct perf_thread_map *threads = evsel->core.threads;
2547 
2548 	if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
2549 		return -ENOMEM;
2550 
2551 	return store_evsel_ids(evsel, evlist);
2552 }
2553