• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * builtin-trace.c
3  *
4  * Builtin 'trace' command:
5  *
6  * Display a continuously updated trace of any workload, CPU, specific PID,
7  * system wide, etc.  Default format is loosely strace like, but any other
8  * event may be specified using --event.
9  *
10  * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11  *
12  * Initially based on the 'trace' prototype by Thomas Gleixner:
13  *
14  * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15  */
16 
17 #include "util/record.h"
18 #include <traceevent/event-parse.h>
19 #include <api/fs/tracing_path.h>
20 #include <bpf/bpf.h>
21 #include "util/bpf_map.h"
22 #include "util/rlimit.h"
23 #include "builtin.h"
24 #include "util/cgroup.h"
25 #include "util/color.h"
26 #include "util/config.h"
27 #include "util/debug.h"
28 #include "util/dso.h"
29 #include "util/env.h"
30 #include "util/event.h"
31 #include "util/evsel.h"
32 #include "util/evsel_fprintf.h"
33 #include "util/synthetic-events.h"
34 #include "util/evlist.h"
35 #include "util/evswitch.h"
36 #include "util/mmap.h"
37 #include <subcmd/pager.h>
38 #include <subcmd/exec-cmd.h>
39 #include "util/machine.h"
40 #include "util/map.h"
41 #include "util/symbol.h"
42 #include "util/path.h"
43 #include "util/session.h"
44 #include "util/thread.h"
45 #include <subcmd/parse-options.h>
46 #include "util/strlist.h"
47 #include "util/intlist.h"
48 #include "util/thread_map.h"
49 #include "util/stat.h"
50 #include "util/tool.h"
51 #include "util/util.h"
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
54 #include "util/parse-events.h"
55 #include "util/bpf-loader.h"
56 #include "callchain.h"
57 #include "print_binary.h"
58 #include "string2.h"
59 #include "syscalltbl.h"
60 #include "rb_resort.h"
61 #include "../perf.h"
62 
63 #include <errno.h>
64 #include <inttypes.h>
65 #include <poll.h>
66 #include <signal.h>
67 #include <stdlib.h>
68 #include <string.h>
69 #include <linux/err.h>
70 #include <linux/filter.h>
71 #include <linux/kernel.h>
72 #include <linux/random.h>
73 #include <linux/stringify.h>
74 #include <linux/time64.h>
75 #include <linux/zalloc.h>
76 #include <fcntl.h>
77 #include <sys/sysmacros.h>
78 
79 #include <linux/ctype.h>
80 
81 #ifndef O_CLOEXEC
82 # define O_CLOEXEC		02000000
83 #endif
84 
85 #ifndef F_LINUX_SPECIFIC_BASE
86 # define F_LINUX_SPECIFIC_BASE	1024
87 #endif
88 
89 #define RAW_SYSCALL_ARGS_NUM	6
90 
91 /*
92  * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
93  */
94 struct syscall_arg_fmt {
95 	size_t	   (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
96 	bool	   (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
97 	unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
98 	void	   *parm;
99 	const char *name;
100 	bool	   show_zero;
101 };
102 
103 struct syscall_fmt {
104 	const char *name;
105 	const char *alias;
106 	struct {
107 		const char *sys_enter,
108 			   *sys_exit;
109 	}	   bpf_prog_name;
110 	struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
111 	u8	   nr_args;
112 	bool	   errpid;
113 	bool	   timeout;
114 	bool	   hexret;
115 };
116 
117 struct trace {
118 	struct perf_tool	tool;
119 	struct syscalltbl	*sctbl;
120 	struct {
121 		struct syscall  *table;
122 		struct bpf_map  *map;
123 		struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
124 			struct bpf_map  *sys_enter,
125 					*sys_exit;
126 		}		prog_array;
127 		struct {
128 			struct evsel *sys_enter,
129 					  *sys_exit,
130 					  *augmented;
131 		}		events;
132 		struct bpf_program *unaugmented_prog;
133 	} syscalls;
134 	struct {
135 		struct bpf_map *map;
136 	} dump;
137 	struct record_opts	opts;
138 	struct evlist	*evlist;
139 	struct machine		*host;
140 	struct thread		*current;
141 	struct bpf_object	*bpf_obj;
142 	struct cgroup		*cgroup;
143 	u64			base_time;
144 	FILE			*output;
145 	unsigned long		nr_events;
146 	unsigned long		nr_events_printed;
147 	unsigned long		max_events;
148 	struct evswitch		evswitch;
149 	struct strlist		*ev_qualifier;
150 	struct {
151 		size_t		nr;
152 		int		*entries;
153 	}			ev_qualifier_ids;
154 	struct {
155 		size_t		nr;
156 		pid_t		*entries;
157 		struct bpf_map  *map;
158 	}			filter_pids;
159 	double			duration_filter;
160 	double			runtime_ms;
161 	struct {
162 		u64		vfs_getname,
163 				proc_getname;
164 	} stats;
165 	unsigned int		max_stack;
166 	unsigned int		min_stack;
167 	int			raw_augmented_syscalls_args_size;
168 	bool			raw_augmented_syscalls;
169 	bool			fd_path_disabled;
170 	bool			sort_events;
171 	bool			not_ev_qualifier;
172 	bool			live;
173 	bool			full_time;
174 	bool			sched;
175 	bool			multiple_threads;
176 	bool			summary;
177 	bool			summary_only;
178 	bool			failure_only;
179 	bool			show_comm;
180 	bool			print_sample;
181 	bool			show_tool_stats;
182 	bool			trace_syscalls;
183 	bool			kernel_syscallchains;
184 	s16			args_alignment;
185 	bool			show_tstamp;
186 	bool			show_duration;
187 	bool			show_zeros;
188 	bool			show_arg_names;
189 	bool			show_string_prefix;
190 	bool			force;
191 	bool			vfs_getname;
192 	int			trace_pgfaults;
193 	struct {
194 		struct ordered_events	data;
195 		u64			last;
196 	} oe;
197 };
198 
199 struct tp_field {
200 	int offset;
201 	union {
202 		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
203 		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
204 	};
205 };
206 
207 #define TP_UINT_FIELD(bits) \
208 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
209 { \
210 	u##bits value; \
211 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
212 	return value;  \
213 }
214 
215 TP_UINT_FIELD(8);
216 TP_UINT_FIELD(16);
217 TP_UINT_FIELD(32);
218 TP_UINT_FIELD(64);
219 
220 #define TP_UINT_FIELD__SWAPPED(bits) \
221 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
222 { \
223 	u##bits value; \
224 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
225 	return bswap_##bits(value);\
226 }
227 
228 TP_UINT_FIELD__SWAPPED(16);
229 TP_UINT_FIELD__SWAPPED(32);
230 TP_UINT_FIELD__SWAPPED(64);
231 
__tp_field__init_uint(struct tp_field * field,int size,int offset,bool needs_swap)232 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
233 {
234 	field->offset = offset;
235 
236 	switch (size) {
237 	case 1:
238 		field->integer = tp_field__u8;
239 		break;
240 	case 2:
241 		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
242 		break;
243 	case 4:
244 		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
245 		break;
246 	case 8:
247 		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
248 		break;
249 	default:
250 		return -1;
251 	}
252 
253 	return 0;
254 }
255 
tp_field__init_uint(struct tp_field * field,struct tep_format_field * format_field,bool needs_swap)256 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
257 {
258 	return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
259 }
260 
tp_field__ptr(struct tp_field * field,struct perf_sample * sample)261 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
262 {
263 	return sample->raw_data + field->offset;
264 }
265 
__tp_field__init_ptr(struct tp_field * field,int offset)266 static int __tp_field__init_ptr(struct tp_field *field, int offset)
267 {
268 	field->offset = offset;
269 	field->pointer = tp_field__ptr;
270 	return 0;
271 }
272 
tp_field__init_ptr(struct tp_field * field,struct tep_format_field * format_field)273 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
274 {
275 	return __tp_field__init_ptr(field, format_field->offset);
276 }
277 
278 struct syscall_tp {
279 	struct tp_field id;
280 	union {
281 		struct tp_field args, ret;
282 	};
283 };
284 
perf_evsel__init_tp_uint_field(struct evsel * evsel,struct tp_field * field,const char * name)285 static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
286 					  struct tp_field *field,
287 					  const char *name)
288 {
289 	struct tep_format_field *format_field = perf_evsel__field(evsel, name);
290 
291 	if (format_field == NULL)
292 		return -1;
293 
294 	return tp_field__init_uint(field, format_field, evsel->needs_swap);
295 }
296 
297 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
298 	({ struct syscall_tp *sc = evsel->priv;\
299 	   perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
300 
perf_evsel__init_tp_ptr_field(struct evsel * evsel,struct tp_field * field,const char * name)301 static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
302 					 struct tp_field *field,
303 					 const char *name)
304 {
305 	struct tep_format_field *format_field = perf_evsel__field(evsel, name);
306 
307 	if (format_field == NULL)
308 		return -1;
309 
310 	return tp_field__init_ptr(field, format_field);
311 }
312 
313 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
314 	({ struct syscall_tp *sc = evsel->priv;\
315 	   perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
316 
evsel__delete_priv(struct evsel * evsel)317 static void evsel__delete_priv(struct evsel *evsel)
318 {
319 	zfree(&evsel->priv);
320 	evsel__delete(evsel);
321 }
322 
perf_evsel__init_syscall_tp(struct evsel * evsel)323 static int perf_evsel__init_syscall_tp(struct evsel *evsel)
324 {
325 	struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
326 
327 	if (evsel->priv != NULL) {
328 		if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
329 		    perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
330 			goto out_delete;
331 		return 0;
332 	}
333 
334 	return -ENOMEM;
335 out_delete:
336 	zfree(&evsel->priv);
337 	return -ENOENT;
338 }
339 
perf_evsel__init_augmented_syscall_tp(struct evsel * evsel,struct evsel * tp)340 static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
341 {
342 	struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
343 
344 	if (evsel->priv != NULL) {
345 		struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
346 		if (syscall_id == NULL)
347 			syscall_id = perf_evsel__field(tp, "__syscall_nr");
348 		if (syscall_id == NULL)
349 			goto out_delete;
350 		if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
351 			goto out_delete;
352 
353 		return 0;
354 	}
355 
356 	return -ENOMEM;
357 out_delete:
358 	zfree(&evsel->priv);
359 	return -EINVAL;
360 }
361 
perf_evsel__init_augmented_syscall_tp_args(struct evsel * evsel)362 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
363 {
364 	struct syscall_tp *sc = evsel->priv;
365 
366 	return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
367 }
368 
perf_evsel__init_augmented_syscall_tp_ret(struct evsel * evsel)369 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
370 {
371 	struct syscall_tp *sc = evsel->priv;
372 
373 	return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
374 }
375 
perf_evsel__init_raw_syscall_tp(struct evsel * evsel,void * handler)376 static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
377 {
378 	evsel->priv = malloc(sizeof(struct syscall_tp));
379 	if (evsel->priv != NULL) {
380 		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
381 			goto out_delete;
382 
383 		evsel->handler = handler;
384 		return 0;
385 	}
386 
387 	return -ENOMEM;
388 
389 out_delete:
390 	zfree(&evsel->priv);
391 	return -ENOENT;
392 }
393 
perf_evsel__raw_syscall_newtp(const char * direction,void * handler)394 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
395 {
396 	struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
397 
398 	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
399 	if (IS_ERR(evsel))
400 		evsel = perf_evsel__newtp("syscalls", direction);
401 
402 	if (IS_ERR(evsel))
403 		return NULL;
404 
405 	if (perf_evsel__init_raw_syscall_tp(evsel, handler))
406 		goto out_delete;
407 
408 	return evsel;
409 
410 out_delete:
411 	evsel__delete_priv(evsel);
412 	return NULL;
413 }
414 
415 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
416 	({ struct syscall_tp *fields = evsel->priv; \
417 	   fields->name.integer(&fields->name, sample); })
418 
419 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
420 	({ struct syscall_tp *fields = evsel->priv; \
421 	   fields->name.pointer(&fields->name, sample); })
422 
strarray__scnprintf(struct strarray * sa,char * bf,size_t size,const char * intfmt,bool show_prefix,int val)423 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
424 {
425 	int idx = val - sa->offset;
426 
427 	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
428 		size_t printed = scnprintf(bf, size, intfmt, val);
429 		if (show_prefix)
430 			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
431 		return printed;
432 	}
433 
434 	return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
435 }
436 
__syscall_arg__scnprintf_strarray(char * bf,size_t size,const char * intfmt,struct syscall_arg * arg)437 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
438 						const char *intfmt,
439 					        struct syscall_arg *arg)
440 {
441 	return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
442 }
443 
syscall_arg__scnprintf_strarray(char * bf,size_t size,struct syscall_arg * arg)444 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
445 					      struct syscall_arg *arg)
446 {
447 	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
448 }
449 
450 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
451 
syscall_arg__scnprintf_strarray_flags(char * bf,size_t size,struct syscall_arg * arg)452 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
453 {
454 	return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
455 }
456 
strarrays__scnprintf(struct strarrays * sas,char * bf,size_t size,const char * intfmt,bool show_prefix,int val)457 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
458 {
459 	size_t printed;
460 	int i;
461 
462 	for (i = 0; i < sas->nr_entries; ++i) {
463 		struct strarray *sa = sas->entries[i];
464 		int idx = val - sa->offset;
465 
466 		if (idx >= 0 && idx < sa->nr_entries) {
467 			if (sa->entries[idx] == NULL)
468 				break;
469 			return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
470 		}
471 	}
472 
473 	printed = scnprintf(bf, size, intfmt, val);
474 	if (show_prefix)
475 		printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
476 	return printed;
477 }
478 
syscall_arg__scnprintf_strarrays(char * bf,size_t size,struct syscall_arg * arg)479 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
480 					struct syscall_arg *arg)
481 {
482 	return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
483 }
484 
485 #ifndef AT_FDCWD
486 #define AT_FDCWD	-100
487 #endif
488 
syscall_arg__scnprintf_fd_at(char * bf,size_t size,struct syscall_arg * arg)489 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
490 					   struct syscall_arg *arg)
491 {
492 	int fd = arg->val;
493 	const char *prefix = "AT_FD";
494 
495 	if (fd == AT_FDCWD)
496 		return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
497 
498 	return syscall_arg__scnprintf_fd(bf, size, arg);
499 }
500 
501 #define SCA_FDAT syscall_arg__scnprintf_fd_at
502 
503 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
504 					      struct syscall_arg *arg);
505 
506 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
507 
syscall_arg__scnprintf_hex(char * bf,size_t size,struct syscall_arg * arg)508 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
509 {
510 	return scnprintf(bf, size, "%#lx", arg->val);
511 }
512 
syscall_arg__scnprintf_ptr(char * bf,size_t size,struct syscall_arg * arg)513 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
514 {
515 	if (arg->val == 0)
516 		return scnprintf(bf, size, "NULL");
517 	return syscall_arg__scnprintf_hex(bf, size, arg);
518 }
519 
syscall_arg__scnprintf_int(char * bf,size_t size,struct syscall_arg * arg)520 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
521 {
522 	return scnprintf(bf, size, "%d", arg->val);
523 }
524 
syscall_arg__scnprintf_long(char * bf,size_t size,struct syscall_arg * arg)525 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
526 {
527 	return scnprintf(bf, size, "%ld", arg->val);
528 }
529 
530 static const char *bpf_cmd[] = {
531 	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
532 	"MAP_GET_NEXT_KEY", "PROG_LOAD",
533 };
534 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
535 
536 static const char *fsmount_flags[] = {
537 	[1] = "CLOEXEC",
538 };
539 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
540 
541 #include "trace/beauty/generated/fsconfig_arrays.c"
542 
543 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
544 
545 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
546 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
547 
548 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
549 static DEFINE_STRARRAY(itimers, "ITIMER_");
550 
551 static const char *keyctl_options[] = {
552 	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
553 	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
554 	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
555 	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
556 	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
557 };
558 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
559 
560 static const char *whences[] = { "SET", "CUR", "END",
561 #ifdef SEEK_DATA
562 "DATA",
563 #endif
564 #ifdef SEEK_HOLE
565 "HOLE",
566 #endif
567 };
568 static DEFINE_STRARRAY(whences, "SEEK_");
569 
570 static const char *fcntl_cmds[] = {
571 	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
572 	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
573 	"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
574 	"GETOWNER_UIDS",
575 };
576 static DEFINE_STRARRAY(fcntl_cmds, "F_");
577 
578 static const char *fcntl_linux_specific_cmds[] = {
579 	"SETLEASE", "GETLEASE", "NOTIFY", [5] =	"CANCELLK", "DUPFD_CLOEXEC",
580 	"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
581 	"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
582 };
583 
584 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
585 
586 static struct strarray *fcntl_cmds_arrays[] = {
587 	&strarray__fcntl_cmds,
588 	&strarray__fcntl_linux_specific_cmds,
589 };
590 
591 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
592 
593 static const char *rlimit_resources[] = {
594 	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
595 	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
596 	"RTTIME",
597 };
598 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
599 
600 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
601 static DEFINE_STRARRAY(sighow, "SIG_");
602 
603 static const char *clockid[] = {
604 	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
605 	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
606 	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
607 };
608 static DEFINE_STRARRAY(clockid, "CLOCK_");
609 
syscall_arg__scnprintf_access_mode(char * bf,size_t size,struct syscall_arg * arg)610 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
611 						 struct syscall_arg *arg)
612 {
613 	bool show_prefix = arg->show_string_prefix;
614 	const char *suffix = "_OK";
615 	size_t printed = 0;
616 	int mode = arg->val;
617 
618 	if (mode == F_OK) /* 0 */
619 		return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
620 #define	P_MODE(n) \
621 	if (mode & n##_OK) { \
622 		printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
623 		mode &= ~n##_OK; \
624 	}
625 
626 	P_MODE(R);
627 	P_MODE(W);
628 	P_MODE(X);
629 #undef P_MODE
630 
631 	if (mode)
632 		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
633 
634 	return printed;
635 }
636 
637 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
638 
639 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
640 					      struct syscall_arg *arg);
641 
642 #define SCA_FILENAME syscall_arg__scnprintf_filename
643 
syscall_arg__scnprintf_pipe_flags(char * bf,size_t size,struct syscall_arg * arg)644 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
645 						struct syscall_arg *arg)
646 {
647 	bool show_prefix = arg->show_string_prefix;
648 	const char *prefix = "O_";
649 	int printed = 0, flags = arg->val;
650 
651 #define	P_FLAG(n) \
652 	if (flags & O_##n) { \
653 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
654 		flags &= ~O_##n; \
655 	}
656 
657 	P_FLAG(CLOEXEC);
658 	P_FLAG(NONBLOCK);
659 #undef P_FLAG
660 
661 	if (flags)
662 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
663 
664 	return printed;
665 }
666 
667 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
668 
669 #ifndef GRND_NONBLOCK
670 #define GRND_NONBLOCK	0x0001
671 #endif
672 #ifndef GRND_RANDOM
673 #define GRND_RANDOM	0x0002
674 #endif
675 
syscall_arg__scnprintf_getrandom_flags(char * bf,size_t size,struct syscall_arg * arg)676 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
677 						   struct syscall_arg *arg)
678 {
679 	bool show_prefix = arg->show_string_prefix;
680 	const char *prefix = "GRND_";
681 	int printed = 0, flags = arg->val;
682 
683 #define	P_FLAG(n) \
684 	if (flags & GRND_##n) { \
685 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
686 		flags &= ~GRND_##n; \
687 	}
688 
689 	P_FLAG(RANDOM);
690 	P_FLAG(NONBLOCK);
691 #undef P_FLAG
692 
693 	if (flags)
694 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
695 
696 	return printed;
697 }
698 
699 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
700 
701 #define STRARRAY(name, array) \
702 	  { .scnprintf	= SCA_STRARRAY, \
703 	    .parm	= &strarray__##array, }
704 
705 #define STRARRAY_FLAGS(name, array) \
706 	  { .scnprintf	= SCA_STRARRAY_FLAGS, \
707 	    .parm	= &strarray__##array, }
708 
709 #include "trace/beauty/arch_errno_names.c"
710 #include "trace/beauty/eventfd.c"
711 #include "trace/beauty/futex_op.c"
712 #include "trace/beauty/futex_val3.c"
713 #include "trace/beauty/mmap.c"
714 #include "trace/beauty/mode_t.c"
715 #include "trace/beauty/msg_flags.c"
716 #include "trace/beauty/open_flags.c"
717 #include "trace/beauty/perf_event_open.c"
718 #include "trace/beauty/pid.c"
719 #include "trace/beauty/sched_policy.c"
720 #include "trace/beauty/seccomp.c"
721 #include "trace/beauty/signum.c"
722 #include "trace/beauty/socket_type.c"
723 #include "trace/beauty/waitid_options.c"
724 
725 static struct syscall_fmt syscall_fmts[] = {
726 	{ .name	    = "access",
727 	  .arg = { [1] = { .scnprintf = SCA_ACCMODE,  /* mode */ }, }, },
728 	{ .name	    = "arch_prctl",
729 	  .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
730 		   [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
731 	{ .name	    = "bind",
732 	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
733 		   [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
734 		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
735 	{ .name	    = "bpf",
736 	  .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
737 	{ .name	    = "brk",	    .hexret = true,
738 	  .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
739 	{ .name     = "clock_gettime",
740 	  .arg = { [0] = STRARRAY(clk_id, clockid), }, },
741 	{ .name	    = "clone",	    .errpid = true, .nr_args = 5,
742 	  .arg = { [0] = { .name = "flags",	    .scnprintf = SCA_CLONE_FLAGS, },
743 		   [1] = { .name = "child_stack",   .scnprintf = SCA_HEX, },
744 		   [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
745 		   [3] = { .name = "child_tidptr",  .scnprintf = SCA_HEX, },
746 		   [4] = { .name = "tls",	    .scnprintf = SCA_HEX, }, }, },
747 	{ .name	    = "close",
748 	  .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
749 	{ .name	    = "connect",
750 	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
751 		   [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
752 		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
753 	{ .name	    = "epoll_ctl",
754 	  .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
755 	{ .name	    = "eventfd2",
756 	  .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
757 	{ .name	    = "fchmodat",
758 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
759 	{ .name	    = "fchownat",
760 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
761 	{ .name	    = "fcntl",
762 	  .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
763 			   .parm      = &strarrays__fcntl_cmds_arrays,
764 			   .show_zero = true, },
765 		   [2] = { .scnprintf =  SCA_FCNTL_ARG, /* arg */ }, }, },
766 	{ .name	    = "flock",
767 	  .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
768 	{ .name     = "fsconfig",
769 	  .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
770 	{ .name     = "fsmount",
771 	  .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
772 		   [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
773 	{ .name     = "fspick",
774 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dfd */ },
775 		   [1] = { .scnprintf = SCA_FILENAME,	  /* path */ },
776 		   [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
777 	{ .name	    = "fstat", .alias = "newfstat", },
778 	{ .name	    = "fstatat", .alias = "newfstatat", },
779 	{ .name	    = "futex",
780 	  .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
781 		   [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
782 	{ .name	    = "futimesat",
783 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
784 	{ .name	    = "getitimer",
785 	  .arg = { [0] = STRARRAY(which, itimers), }, },
786 	{ .name	    = "getpid",	    .errpid = true, },
787 	{ .name	    = "getpgid",    .errpid = true, },
788 	{ .name	    = "getppid",    .errpid = true, },
789 	{ .name	    = "getrandom",
790 	  .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
791 	{ .name	    = "getrlimit",
792 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
793 	{ .name	    = "gettid",	    .errpid = true, },
794 	{ .name	    = "ioctl",
795 	  .arg = {
796 #if defined(__i386__) || defined(__x86_64__)
797 /*
798  * FIXME: Make this available to all arches.
799  */
800 		   [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
801 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
802 #else
803 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
804 #endif
805 	{ .name	    = "kcmp",	    .nr_args = 5,
806 	  .arg = { [0] = { .name = "pid1",	.scnprintf = SCA_PID, },
807 		   [1] = { .name = "pid2",	.scnprintf = SCA_PID, },
808 		   [2] = { .name = "type",	.scnprintf = SCA_KCMP_TYPE, },
809 		   [3] = { .name = "idx1",	.scnprintf = SCA_KCMP_IDX, },
810 		   [4] = { .name = "idx2",	.scnprintf = SCA_KCMP_IDX, }, }, },
811 	{ .name	    = "keyctl",
812 	  .arg = { [0] = STRARRAY(option, keyctl_options), }, },
813 	{ .name	    = "kill",
814 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
815 	{ .name	    = "linkat",
816 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
817 	{ .name	    = "lseek",
818 	  .arg = { [2] = STRARRAY(whence, whences), }, },
819 	{ .name	    = "lstat", .alias = "newlstat", },
820 	{ .name     = "madvise",
821 	  .arg = { [0] = { .scnprintf = SCA_HEX,      /* start */ },
822 		   [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
823 	{ .name	    = "mkdirat",
824 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
825 	{ .name	    = "mknodat",
826 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
827 	{ .name	    = "mmap",	    .hexret = true,
828 /* The standard mmap maps to old_mmap on s390x */
829 #if defined(__s390x__)
830 	.alias = "old_mmap",
831 #endif
832 	  .arg = { [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
833 		   [3] = { .scnprintf = SCA_MMAP_FLAGS,	/* flags */ },
834 		   [5] = { .scnprintf = SCA_HEX,	/* offset */ }, }, },
835 	{ .name	    = "mount",
836 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
837 		   [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
838 			   .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
839 	{ .name	    = "move_mount",
840 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* from_dfd */ },
841 		   [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
842 		   [2] = { .scnprintf = SCA_FDAT,	/* to_dfd */ },
843 		   [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
844 		   [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
845 	{ .name	    = "mprotect",
846 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
847 		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ }, }, },
848 	{ .name	    = "mq_unlink",
849 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
850 	{ .name	    = "mremap",	    .hexret = true,
851 	  .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
852 	{ .name	    = "name_to_handle_at",
853 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
854 	{ .name	    = "newfstatat",
855 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
856 	{ .name	    = "open",
857 	  .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
858 	{ .name	    = "open_by_handle_at",
859 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
860 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
861 	{ .name	    = "openat",
862 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
863 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
864 	{ .name	    = "perf_event_open",
865 	  .arg = { [2] = { .scnprintf = SCA_INT,	/* cpu */ },
866 		   [3] = { .scnprintf = SCA_FD,		/* group_fd */ },
867 		   [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
868 	{ .name	    = "pipe2",
869 	  .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
870 	{ .name	    = "pkey_alloc",
871 	  .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS,	/* access_rights */ }, }, },
872 	{ .name	    = "pkey_free",
873 	  .arg = { [0] = { .scnprintf = SCA_INT,	/* key */ }, }, },
874 	{ .name	    = "pkey_mprotect",
875 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
876 		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
877 		   [3] = { .scnprintf = SCA_INT,	/* pkey */ }, }, },
878 	{ .name	    = "poll", .timeout = true, },
879 	{ .name	    = "ppoll", .timeout = true, },
880 	{ .name	    = "prctl",
881 	  .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
882 		   [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
883 		   [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
884 	{ .name	    = "pread", .alias = "pread64", },
885 	{ .name	    = "preadv", .alias = "pread", },
886 	{ .name	    = "prlimit64",
887 	  .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
888 	{ .name	    = "pwrite", .alias = "pwrite64", },
889 	{ .name	    = "readlinkat",
890 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
891 	{ .name	    = "recvfrom",
892 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
893 	{ .name	    = "recvmmsg",
894 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
895 	{ .name	    = "recvmsg",
896 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
897 	{ .name	    = "renameat",
898 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
899 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
900 	{ .name	    = "renameat2",
901 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
902 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
903 		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
904 	{ .name	    = "rt_sigaction",
905 	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
906 	{ .name	    = "rt_sigprocmask",
907 	  .arg = { [0] = STRARRAY(how, sighow), }, },
908 	{ .name	    = "rt_sigqueueinfo",
909 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
910 	{ .name	    = "rt_tgsigqueueinfo",
911 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
912 	{ .name	    = "sched_setscheduler",
913 	  .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
914 	{ .name	    = "seccomp",
915 	  .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP,	   /* op */ },
916 		   [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
917 	{ .name	    = "select", .timeout = true, },
918 	{ .name	    = "sendfile", .alias = "sendfile64", },
919 	{ .name	    = "sendmmsg",
920 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
921 	{ .name	    = "sendmsg",
922 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
923 	{ .name	    = "sendto",
924 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
925 		   [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
926 	{ .name	    = "set_tid_address", .errpid = true, },
927 	{ .name	    = "setitimer",
928 	  .arg = { [0] = STRARRAY(which, itimers), }, },
929 	{ .name	    = "setrlimit",
930 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
931 	{ .name	    = "socket",
932 	  .arg = { [0] = STRARRAY(family, socket_families),
933 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
934 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
935 	{ .name	    = "socketpair",
936 	  .arg = { [0] = STRARRAY(family, socket_families),
937 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
938 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
939 	{ .name	    = "stat", .alias = "newstat", },
940 	{ .name	    = "statx",
941 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	 /* fdat */ },
942 		   [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
943 		   [3] = { .scnprintf = SCA_STATX_MASK,	 /* mask */ }, }, },
944 	{ .name	    = "swapoff",
945 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
946 	{ .name	    = "swapon",
947 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
948 	{ .name	    = "symlinkat",
949 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
950 	{ .name	    = "sync_file_range",
951 	  .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
952 	{ .name	    = "tgkill",
953 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
954 	{ .name	    = "tkill",
955 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
956 	{ .name     = "umount2", .alias = "umount",
957 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
958 	{ .name	    = "uname", .alias = "newuname", },
959 	{ .name	    = "unlinkat",
960 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
961 	{ .name	    = "utimensat",
962 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
963 	{ .name	    = "wait4",	    .errpid = true,
964 	  .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
965 	{ .name	    = "waitid",	    .errpid = true,
966 	  .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
967 };
968 
syscall_fmt__cmp(const void * name,const void * fmtp)969 static int syscall_fmt__cmp(const void *name, const void *fmtp)
970 {
971 	const struct syscall_fmt *fmt = fmtp;
972 	return strcmp(name, fmt->name);
973 }
974 
syscall_fmt__find(const char * name)975 static struct syscall_fmt *syscall_fmt__find(const char *name)
976 {
977 	const int nmemb = ARRAY_SIZE(syscall_fmts);
978 	return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
979 }
980 
syscall_fmt__find_by_alias(const char * alias)981 static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
982 {
983 	int i, nmemb = ARRAY_SIZE(syscall_fmts);
984 
985 	for (i = 0; i < nmemb; ++i) {
986 		if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
987 			return &syscall_fmts[i];
988 	}
989 
990 	return NULL;
991 }
992 
993 /*
994  * is_exit: is this "exit" or "exit_group"?
995  * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
996  * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
997  * nonexistent: Just a hole in the syscall table, syscall id not allocated
998  */
999 struct syscall {
1000 	struct tep_event    *tp_format;
1001 	int		    nr_args;
1002 	int		    args_size;
1003 	struct {
1004 		struct bpf_program *sys_enter,
1005 				   *sys_exit;
1006 	}		    bpf_prog;
1007 	bool		    is_exit;
1008 	bool		    is_open;
1009 	bool		    nonexistent;
1010 	struct tep_format_field *args;
1011 	const char	    *name;
1012 	struct syscall_fmt  *fmt;
1013 	struct syscall_arg_fmt *arg_fmt;
1014 };
1015 
1016 /*
1017  * Must match what is in the BPF program:
1018  *
1019  * tools/perf/examples/bpf/augmented_raw_syscalls.c
1020  */
1021 struct bpf_map_syscall_entry {
1022 	bool	enabled;
1023 	u16	string_args_len[RAW_SYSCALL_ARGS_NUM];
1024 };
1025 
1026 /*
1027  * We need to have this 'calculated' boolean because in some cases we really
1028  * don't know what is the duration of a syscall, for instance, when we start
1029  * a session and some threads are waiting for a syscall to finish, say 'poll',
1030  * in which case all we can do is to print "( ? ) for duration and for the
1031  * start timestamp.
1032  */
fprintf_duration(unsigned long t,bool calculated,FILE * fp)1033 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1034 {
1035 	double duration = (double)t / NSEC_PER_MSEC;
1036 	size_t printed = fprintf(fp, "(");
1037 
1038 	if (!calculated)
1039 		printed += fprintf(fp, "         ");
1040 	else if (duration >= 1.0)
1041 		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1042 	else if (duration >= 0.01)
1043 		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1044 	else
1045 		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1046 	return printed + fprintf(fp, "): ");
1047 }
1048 
1049 /**
1050  * filename.ptr: The filename char pointer that will be vfs_getname'd
1051  * filename.entry_str_pos: Where to insert the string translated from
1052  *                         filename.ptr by the vfs_getname tracepoint/kprobe.
1053  * ret_scnprintf: syscall args may set this to a different syscall return
1054  *                formatter, for instance, fcntl may return fds, file flags, etc.
1055  */
1056 struct thread_trace {
1057 	u64		  entry_time;
1058 	bool		  entry_pending;
1059 	unsigned long	  nr_events;
1060 	unsigned long	  pfmaj, pfmin;
1061 	char		  *entry_str;
1062 	double		  runtime_ms;
1063 	size_t		  (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1064         struct {
1065 		unsigned long ptr;
1066 		short int     entry_str_pos;
1067 		bool	      pending_open;
1068 		unsigned int  namelen;
1069 		char	      *name;
1070 	} filename;
1071 	struct {
1072 		int	      max;
1073 		struct file   *table;
1074 	} files;
1075 
1076 	struct intlist *syscall_stats;
1077 };
1078 
thread_trace__new(void)1079 static struct thread_trace *thread_trace__new(void)
1080 {
1081 	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
1082 
1083 	if (ttrace) {
1084 		ttrace->files.max = -1;
1085 		ttrace->syscall_stats = intlist__new(NULL);
1086 	}
1087 
1088 	return ttrace;
1089 }
1090 
thread__trace(struct thread * thread,FILE * fp)1091 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1092 {
1093 	struct thread_trace *ttrace;
1094 
1095 	if (thread == NULL)
1096 		goto fail;
1097 
1098 	if (thread__priv(thread) == NULL)
1099 		thread__set_priv(thread, thread_trace__new());
1100 
1101 	if (thread__priv(thread) == NULL)
1102 		goto fail;
1103 
1104 	ttrace = thread__priv(thread);
1105 	++ttrace->nr_events;
1106 
1107 	return ttrace;
1108 fail:
1109 	color_fprintf(fp, PERF_COLOR_RED,
1110 		      "WARNING: not enough memory, dropping samples!\n");
1111 	return NULL;
1112 }
1113 
1114 
syscall_arg__set_ret_scnprintf(struct syscall_arg * arg,size_t (* ret_scnprintf)(char * bf,size_t size,struct syscall_arg * arg))1115 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1116 				    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1117 {
1118 	struct thread_trace *ttrace = thread__priv(arg->thread);
1119 
1120 	ttrace->ret_scnprintf = ret_scnprintf;
1121 }
1122 
1123 #define TRACE_PFMAJ		(1 << 0)
1124 #define TRACE_PFMIN		(1 << 1)
1125 
1126 static const size_t trace__entry_str_size = 2048;
1127 
thread_trace__files_entry(struct thread_trace * ttrace,int fd)1128 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1129 {
1130 	if (fd < 0)
1131 		return NULL;
1132 
1133 	if (fd > ttrace->files.max) {
1134 		struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1135 
1136 		if (nfiles == NULL)
1137 			return NULL;
1138 
1139 		if (ttrace->files.max != -1) {
1140 			memset(nfiles + ttrace->files.max + 1, 0,
1141 			       (fd - ttrace->files.max) * sizeof(struct file));
1142 		} else {
1143 			memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1144 		}
1145 
1146 		ttrace->files.table = nfiles;
1147 		ttrace->files.max   = fd;
1148 	}
1149 
1150 	return ttrace->files.table + fd;
1151 }
1152 
thread__files_entry(struct thread * thread,int fd)1153 struct file *thread__files_entry(struct thread *thread, int fd)
1154 {
1155 	return thread_trace__files_entry(thread__priv(thread), fd);
1156 }
1157 
trace__set_fd_pathname(struct thread * thread,int fd,const char * pathname)1158 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1159 {
1160 	struct thread_trace *ttrace = thread__priv(thread);
1161 	struct file *file = thread_trace__files_entry(ttrace, fd);
1162 
1163 	if (file != NULL) {
1164 		struct stat st;
1165 		if (stat(pathname, &st) == 0)
1166 			file->dev_maj = major(st.st_rdev);
1167 		file->pathname = strdup(pathname);
1168 		if (file->pathname)
1169 			return 0;
1170 	}
1171 
1172 	return -1;
1173 }
1174 
thread__read_fd_path(struct thread * thread,int fd)1175 static int thread__read_fd_path(struct thread *thread, int fd)
1176 {
1177 	char linkname[PATH_MAX], pathname[PATH_MAX];
1178 	struct stat st;
1179 	int ret;
1180 
1181 	if (thread->pid_ == thread->tid) {
1182 		scnprintf(linkname, sizeof(linkname),
1183 			  "/proc/%d/fd/%d", thread->pid_, fd);
1184 	} else {
1185 		scnprintf(linkname, sizeof(linkname),
1186 			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1187 	}
1188 
1189 	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1190 		return -1;
1191 
1192 	ret = readlink(linkname, pathname, sizeof(pathname));
1193 
1194 	if (ret < 0 || ret > st.st_size)
1195 		return -1;
1196 
1197 	pathname[ret] = '\0';
1198 	return trace__set_fd_pathname(thread, fd, pathname);
1199 }
1200 
thread__fd_path(struct thread * thread,int fd,struct trace * trace)1201 static const char *thread__fd_path(struct thread *thread, int fd,
1202 				   struct trace *trace)
1203 {
1204 	struct thread_trace *ttrace = thread__priv(thread);
1205 
1206 	if (ttrace == NULL || trace->fd_path_disabled)
1207 		return NULL;
1208 
1209 	if (fd < 0)
1210 		return NULL;
1211 
1212 	if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1213 		if (!trace->live)
1214 			return NULL;
1215 		++trace->stats.proc_getname;
1216 		if (thread__read_fd_path(thread, fd))
1217 			return NULL;
1218 	}
1219 
1220 	return ttrace->files.table[fd].pathname;
1221 }
1222 
syscall_arg__scnprintf_fd(char * bf,size_t size,struct syscall_arg * arg)1223 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1224 {
1225 	int fd = arg->val;
1226 	size_t printed = scnprintf(bf, size, "%d", fd);
1227 	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1228 
1229 	if (path)
1230 		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1231 
1232 	return printed;
1233 }
1234 
pid__scnprintf_fd(struct trace * trace,pid_t pid,int fd,char * bf,size_t size)1235 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1236 {
1237         size_t printed = scnprintf(bf, size, "%d", fd);
1238 	struct thread *thread = machine__find_thread(trace->host, pid, pid);
1239 
1240 	if (thread) {
1241 		const char *path = thread__fd_path(thread, fd, trace);
1242 
1243 		if (path)
1244 			printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1245 
1246 		thread__put(thread);
1247 	}
1248 
1249         return printed;
1250 }
1251 
syscall_arg__scnprintf_close_fd(char * bf,size_t size,struct syscall_arg * arg)1252 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1253 					      struct syscall_arg *arg)
1254 {
1255 	int fd = arg->val;
1256 	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1257 	struct thread_trace *ttrace = thread__priv(arg->thread);
1258 
1259 	if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1260 		zfree(&ttrace->files.table[fd].pathname);
1261 
1262 	return printed;
1263 }
1264 
thread__set_filename_pos(struct thread * thread,const char * bf,unsigned long ptr)1265 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1266 				     unsigned long ptr)
1267 {
1268 	struct thread_trace *ttrace = thread__priv(thread);
1269 
1270 	ttrace->filename.ptr = ptr;
1271 	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1272 }
1273 
syscall_arg__scnprintf_augmented_string(struct syscall_arg * arg,char * bf,size_t size)1274 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1275 {
1276 	struct augmented_arg *augmented_arg = arg->augmented.args;
1277 	size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1278 	/*
1279 	 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1280 	 * we would have two strings, each prefixed by its size.
1281 	 */
1282 	int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1283 
1284 	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1285 	arg->augmented.size -= consumed;
1286 
1287 	return printed;
1288 }
1289 
syscall_arg__scnprintf_filename(char * bf,size_t size,struct syscall_arg * arg)1290 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1291 					      struct syscall_arg *arg)
1292 {
1293 	unsigned long ptr = arg->val;
1294 
1295 	if (arg->augmented.args)
1296 		return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1297 
1298 	if (!arg->trace->vfs_getname)
1299 		return scnprintf(bf, size, "%#x", ptr);
1300 
1301 	thread__set_filename_pos(arg->thread, bf, ptr);
1302 	return 0;
1303 }
1304 
trace__filter_duration(struct trace * trace,double t)1305 static bool trace__filter_duration(struct trace *trace, double t)
1306 {
1307 	return t < (trace->duration_filter * NSEC_PER_MSEC);
1308 }
1309 
__trace__fprintf_tstamp(struct trace * trace,u64 tstamp,FILE * fp)1310 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1311 {
1312 	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1313 
1314 	return fprintf(fp, "%10.3f ", ts);
1315 }
1316 
1317 /*
1318  * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1319  * using ttrace->entry_time for a thread that receives a sys_exit without
1320  * first having received a sys_enter ("poll" issued before tracing session
1321  * starts, lost sys_enter exit due to ring buffer overflow).
1322  */
trace__fprintf_tstamp(struct trace * trace,u64 tstamp,FILE * fp)1323 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1324 {
1325 	if (tstamp > 0)
1326 		return __trace__fprintf_tstamp(trace, tstamp, fp);
1327 
1328 	return fprintf(fp, "         ? ");
1329 }
1330 
1331 static bool done = false;
1332 static bool interrupted = false;
1333 
sig_handler(int sig)1334 static void sig_handler(int sig)
1335 {
1336 	done = true;
1337 	interrupted = sig == SIGINT;
1338 }
1339 
trace__fprintf_comm_tid(struct trace * trace,struct thread * thread,FILE * fp)1340 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1341 {
1342 	size_t printed = 0;
1343 
1344 	if (trace->multiple_threads) {
1345 		if (trace->show_comm)
1346 			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1347 		printed += fprintf(fp, "%d ", thread->tid);
1348 	}
1349 
1350 	return printed;
1351 }
1352 
trace__fprintf_entry_head(struct trace * trace,struct thread * thread,u64 duration,bool duration_calculated,u64 tstamp,FILE * fp)1353 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1354 					u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1355 {
1356 	size_t printed = 0;
1357 
1358 	if (trace->show_tstamp)
1359 		printed = trace__fprintf_tstamp(trace, tstamp, fp);
1360 	if (trace->show_duration)
1361 		printed += fprintf_duration(duration, duration_calculated, fp);
1362 	return printed + trace__fprintf_comm_tid(trace, thread, fp);
1363 }
1364 
trace__process_event(struct trace * trace,struct machine * machine,union perf_event * event,struct perf_sample * sample)1365 static int trace__process_event(struct trace *trace, struct machine *machine,
1366 				union perf_event *event, struct perf_sample *sample)
1367 {
1368 	int ret = 0;
1369 
1370 	switch (event->header.type) {
1371 	case PERF_RECORD_LOST:
1372 		color_fprintf(trace->output, PERF_COLOR_RED,
1373 			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1374 		ret = machine__process_lost_event(machine, event, sample);
1375 		break;
1376 	default:
1377 		ret = machine__process_event(machine, event, sample);
1378 		break;
1379 	}
1380 
1381 	return ret;
1382 }
1383 
trace__tool_process(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1384 static int trace__tool_process(struct perf_tool *tool,
1385 			       union perf_event *event,
1386 			       struct perf_sample *sample,
1387 			       struct machine *machine)
1388 {
1389 	struct trace *trace = container_of(tool, struct trace, tool);
1390 	return trace__process_event(trace, machine, event, sample);
1391 }
1392 
trace__machine__resolve_kernel_addr(void * vmachine,unsigned long long * addrp,char ** modp)1393 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1394 {
1395 	struct machine *machine = vmachine;
1396 
1397 	if (machine->kptr_restrict_warned)
1398 		return NULL;
1399 
1400 	if (symbol_conf.kptr_restrict) {
1401 		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1402 			   "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1403 			   "Kernel samples will not be resolved.\n");
1404 		machine->kptr_restrict_warned = true;
1405 		return NULL;
1406 	}
1407 
1408 	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1409 }
1410 
trace__symbols_init(struct trace * trace,struct evlist * evlist)1411 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1412 {
1413 	int err = symbol__init(NULL);
1414 
1415 	if (err)
1416 		return err;
1417 
1418 	trace->host = machine__new_host();
1419 	if (trace->host == NULL)
1420 		return -ENOMEM;
1421 
1422 	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1423 	if (err < 0)
1424 		goto out;
1425 
1426 	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1427 					    evlist->core.threads, trace__tool_process, false,
1428 					    1);
1429 out:
1430 	if (err)
1431 		symbol__exit();
1432 
1433 	return err;
1434 }
1435 
trace__symbols__exit(struct trace * trace)1436 static void trace__symbols__exit(struct trace *trace)
1437 {
1438 	machine__exit(trace->host);
1439 	trace->host = NULL;
1440 
1441 	symbol__exit();
1442 }
1443 
syscall__alloc_arg_fmts(struct syscall * sc,int nr_args)1444 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1445 {
1446 	int idx;
1447 
1448 	if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
1449 		nr_args = sc->fmt->nr_args;
1450 
1451 	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1452 	if (sc->arg_fmt == NULL)
1453 		return -1;
1454 
1455 	for (idx = 0; idx < nr_args; ++idx) {
1456 		if (sc->fmt)
1457 			sc->arg_fmt[idx] = sc->fmt->arg[idx];
1458 	}
1459 
1460 	sc->nr_args = nr_args;
1461 	return 0;
1462 }
1463 
1464 static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1465 };
1466 
syscall_arg_fmt__cmp(const void * name,const void * fmtp)1467 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1468 {
1469        const struct syscall_arg_fmt *fmt = fmtp;
1470        return strcmp(name, fmt->name);
1471 }
1472 
1473 static struct syscall_arg_fmt *
__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt * fmts,const int nmemb,const char * name)1474 __syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1475 {
1476        return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1477 }
1478 
syscall_arg_fmt__find_by_name(const char * name)1479 static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1480 {
1481        const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1482        return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1483 }
1484 
1485 static struct tep_format_field *
syscall_arg_fmt__init_array(struct syscall_arg_fmt * arg,struct tep_format_field * field)1486 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1487 {
1488 	struct tep_format_field *last_field = NULL;
1489 	int len;
1490 
1491 	for (; field; field = field->next, ++arg) {
1492 		last_field = field;
1493 
1494 		if (arg->scnprintf)
1495 			continue;
1496 
1497 		len = strlen(field->name);
1498 
1499 		if (strcmp(field->type, "const char *") == 0 &&
1500 		    ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1501 		     strstr(field->name, "path") != NULL))
1502 			arg->scnprintf = SCA_FILENAME;
1503 		else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1504 			arg->scnprintf = SCA_PTR;
1505 		else if (strcmp(field->type, "pid_t") == 0)
1506 			arg->scnprintf = SCA_PID;
1507 		else if (strcmp(field->type, "umode_t") == 0)
1508 			arg->scnprintf = SCA_MODE_T;
1509 		else if ((strcmp(field->type, "int") == 0 ||
1510 			  strcmp(field->type, "unsigned int") == 0 ||
1511 			  strcmp(field->type, "long") == 0) &&
1512 			 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1513 			/*
1514 			 * /sys/kernel/tracing/events/syscalls/sys_enter*
1515 			 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1516 			 * 65 int
1517 			 * 23 unsigned int
1518 			 * 7 unsigned long
1519 			 */
1520 			arg->scnprintf = SCA_FD;
1521                } else {
1522 			struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1523 
1524 			if (fmt) {
1525 				arg->scnprintf = fmt->scnprintf;
1526 				arg->strtoul   = fmt->strtoul;
1527 			}
1528 		}
1529 	}
1530 
1531 	return last_field;
1532 }
1533 
syscall__set_arg_fmts(struct syscall * sc)1534 static int syscall__set_arg_fmts(struct syscall *sc)
1535 {
1536 	struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1537 
1538 	if (last_field)
1539 		sc->args_size = last_field->offset + last_field->size;
1540 
1541 	return 0;
1542 }
1543 
trace__read_syscall_info(struct trace * trace,int id)1544 static int trace__read_syscall_info(struct trace *trace, int id)
1545 {
1546 	char tp_name[128];
1547 	struct syscall *sc;
1548 	const char *name = syscalltbl__name(trace->sctbl, id);
1549 
1550 	if (trace->syscalls.table == NULL) {
1551 		trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1552 		if (trace->syscalls.table == NULL)
1553 			return -ENOMEM;
1554 	}
1555 
1556 	sc = trace->syscalls.table + id;
1557 	if (sc->nonexistent)
1558 		return -EEXIST;
1559 
1560 	if (name == NULL) {
1561 		sc->nonexistent = true;
1562 		return -EEXIST;
1563 	}
1564 
1565 	sc->name = name;
1566 	sc->fmt  = syscall_fmt__find(sc->name);
1567 
1568 	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1569 	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1570 
1571 	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1572 		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1573 		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1574 	}
1575 
1576 	/*
1577 	 * Fails to read trace point format via sysfs node, so the trace point
1578 	 * doesn't exist.  Set the 'nonexistent' flag as true.
1579 	 */
1580 	if (IS_ERR(sc->tp_format)) {
1581 		sc->nonexistent = true;
1582 		return PTR_ERR(sc->tp_format);
1583 	}
1584 
1585 	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
1586 					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
1587 		return -ENOMEM;
1588 
1589 	sc->args = sc->tp_format->format.fields;
1590 	/*
1591 	 * We need to check and discard the first variable '__syscall_nr'
1592 	 * or 'nr' that mean the syscall number. It is needless here.
1593 	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1594 	 */
1595 	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1596 		sc->args = sc->args->next;
1597 		--sc->nr_args;
1598 	}
1599 
1600 	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1601 	sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1602 
1603 	return syscall__set_arg_fmts(sc);
1604 }
1605 
intcmp(const void * a,const void * b)1606 static int intcmp(const void *a, const void *b)
1607 {
1608 	const int *one = a, *another = b;
1609 
1610 	return *one - *another;
1611 }
1612 
trace__validate_ev_qualifier(struct trace * trace)1613 static int trace__validate_ev_qualifier(struct trace *trace)
1614 {
1615 	int err = 0;
1616 	bool printed_invalid_prefix = false;
1617 	struct str_node *pos;
1618 	size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1619 
1620 	trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1621 						 sizeof(trace->ev_qualifier_ids.entries[0]));
1622 
1623 	if (trace->ev_qualifier_ids.entries == NULL) {
1624 		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1625 		       trace->output);
1626 		err = -EINVAL;
1627 		goto out;
1628 	}
1629 
1630 	strlist__for_each_entry(pos, trace->ev_qualifier) {
1631 		const char *sc = pos->s;
1632 		int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1633 
1634 		if (id < 0) {
1635 			id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1636 			if (id >= 0)
1637 				goto matches;
1638 
1639 			if (!printed_invalid_prefix) {
1640 				pr_debug("Skipping unknown syscalls: ");
1641 				printed_invalid_prefix = true;
1642 			} else {
1643 				pr_debug(", ");
1644 			}
1645 
1646 			pr_debug("%s", sc);
1647 			continue;
1648 		}
1649 matches:
1650 		trace->ev_qualifier_ids.entries[nr_used++] = id;
1651 		if (match_next == -1)
1652 			continue;
1653 
1654 		while (1) {
1655 			id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1656 			if (id < 0)
1657 				break;
1658 			if (nr_allocated == nr_used) {
1659 				void *entries;
1660 
1661 				nr_allocated += 8;
1662 				entries = realloc(trace->ev_qualifier_ids.entries,
1663 						  nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1664 				if (entries == NULL) {
1665 					err = -ENOMEM;
1666 					fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1667 					goto out_free;
1668 				}
1669 				trace->ev_qualifier_ids.entries = entries;
1670 			}
1671 			trace->ev_qualifier_ids.entries[nr_used++] = id;
1672 		}
1673 	}
1674 
1675 	trace->ev_qualifier_ids.nr = nr_used;
1676 	qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1677 out:
1678 	if (printed_invalid_prefix)
1679 		pr_debug("\n");
1680 	return err;
1681 out_free:
1682 	zfree(&trace->ev_qualifier_ids.entries);
1683 	trace->ev_qualifier_ids.nr = 0;
1684 	goto out;
1685 }
1686 
trace__syscall_enabled(struct trace * trace,int id)1687 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1688 {
1689 	bool in_ev_qualifier;
1690 
1691 	if (trace->ev_qualifier_ids.nr == 0)
1692 		return true;
1693 
1694 	in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1695 				  trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1696 
1697 	if (in_ev_qualifier)
1698 	       return !trace->not_ev_qualifier;
1699 
1700 	return trace->not_ev_qualifier;
1701 }
1702 
1703 /*
1704  * args is to be interpreted as a series of longs but we need to handle
1705  * 8-byte unaligned accesses. args points to raw_data within the event
1706  * and raw_data is guaranteed to be 8-byte unaligned because it is
1707  * preceded by raw_size which is a u32. So we need to copy args to a temp
1708  * variable to read it. Most notably this avoids extended load instructions
1709  * on unaligned addresses
1710  */
syscall_arg__val(struct syscall_arg * arg,u8 idx)1711 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1712 {
1713 	unsigned long val;
1714 	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1715 
1716 	memcpy(&val, p, sizeof(val));
1717 	return val;
1718 }
1719 
syscall__scnprintf_name(struct syscall * sc,char * bf,size_t size,struct syscall_arg * arg)1720 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1721 				      struct syscall_arg *arg)
1722 {
1723 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1724 		return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1725 
1726 	return scnprintf(bf, size, "arg%d: ", arg->idx);
1727 }
1728 
1729 /*
1730  * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1731  * as mount 'flags' argument that needs ignoring some magic flag, see comment
1732  * in tools/perf/trace/beauty/mount_flags.c
1733  */
syscall__mask_val(struct syscall * sc,struct syscall_arg * arg,unsigned long val)1734 static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
1735 {
1736 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
1737 		return sc->arg_fmt[arg->idx].mask_val(arg, val);
1738 
1739 	return val;
1740 }
1741 
syscall__scnprintf_val(struct syscall * sc,char * bf,size_t size,struct syscall_arg * arg,unsigned long val)1742 static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
1743 				     struct syscall_arg *arg, unsigned long val)
1744 {
1745 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
1746 		arg->val = val;
1747 		if (sc->arg_fmt[arg->idx].parm)
1748 			arg->parm = sc->arg_fmt[arg->idx].parm;
1749 		return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
1750 	}
1751 	return scnprintf(bf, size, "%ld", val);
1752 }
1753 
syscall__scnprintf_args(struct syscall * sc,char * bf,size_t size,unsigned char * args,void * augmented_args,int augmented_args_size,struct trace * trace,struct thread * thread)1754 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1755 				      unsigned char *args, void *augmented_args, int augmented_args_size,
1756 				      struct trace *trace, struct thread *thread)
1757 {
1758 	size_t printed = 0;
1759 	unsigned long val;
1760 	u8 bit = 1;
1761 	struct syscall_arg arg = {
1762 		.args	= args,
1763 		.augmented = {
1764 			.size = augmented_args_size,
1765 			.args = augmented_args,
1766 		},
1767 		.idx	= 0,
1768 		.mask	= 0,
1769 		.trace  = trace,
1770 		.thread = thread,
1771 		.show_string_prefix = trace->show_string_prefix,
1772 	};
1773 	struct thread_trace *ttrace = thread__priv(thread);
1774 
1775 	/*
1776 	 * Things like fcntl will set this in its 'cmd' formatter to pick the
1777 	 * right formatter for the return value (an fd? file flags?), which is
1778 	 * not needed for syscalls that always return a given type, say an fd.
1779 	 */
1780 	ttrace->ret_scnprintf = NULL;
1781 
1782 	if (sc->args != NULL) {
1783 		struct tep_format_field *field;
1784 
1785 		for (field = sc->args; field;
1786 		     field = field->next, ++arg.idx, bit <<= 1) {
1787 			if (arg.mask & bit)
1788 				continue;
1789 
1790 			arg.fmt = &sc->arg_fmt[arg.idx];
1791 			val = syscall_arg__val(&arg, arg.idx);
1792 			/*
1793 			 * Some syscall args need some mask, most don't and
1794 			 * return val untouched.
1795 			 */
1796 			val = syscall__mask_val(sc, &arg, val);
1797 
1798 			/*
1799  			 * Suppress this argument if its value is zero and
1800  			 * and we don't have a string associated in an
1801  			 * strarray for it.
1802  			 */
1803 			if (val == 0 &&
1804 			    !trace->show_zeros &&
1805 			    !(sc->arg_fmt &&
1806 			      (sc->arg_fmt[arg.idx].show_zero ||
1807 			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
1808 			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
1809 			      sc->arg_fmt[arg.idx].parm))
1810 				continue;
1811 
1812 			printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
1813 
1814 			if (trace->show_arg_names)
1815 				printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
1816 
1817 			printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1818 		}
1819 	} else if (IS_ERR(sc->tp_format)) {
1820 		/*
1821 		 * If we managed to read the tracepoint /format file, then we
1822 		 * may end up not having any args, like with gettid(), so only
1823 		 * print the raw args when we didn't manage to read it.
1824 		 */
1825 		while (arg.idx < sc->nr_args) {
1826 			if (arg.mask & bit)
1827 				goto next_arg;
1828 			val = syscall_arg__val(&arg, arg.idx);
1829 			if (printed)
1830 				printed += scnprintf(bf + printed, size - printed, ", ");
1831 			printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
1832 			printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1833 next_arg:
1834 			++arg.idx;
1835 			bit <<= 1;
1836 		}
1837 	}
1838 
1839 	return printed;
1840 }
1841 
1842 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
1843 				  union perf_event *event,
1844 				  struct perf_sample *sample);
1845 
trace__syscall_info(struct trace * trace,struct evsel * evsel,int id)1846 static struct syscall *trace__syscall_info(struct trace *trace,
1847 					   struct evsel *evsel, int id)
1848 {
1849 	int err = 0;
1850 
1851 	if (id < 0) {
1852 
1853 		/*
1854 		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1855 		 * before that, leaving at a higher verbosity level till that is
1856 		 * explained. Reproduced with plain ftrace with:
1857 		 *
1858 		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1859 		 * grep "NR -1 " /t/trace_pipe
1860 		 *
1861 		 * After generating some load on the machine.
1862  		 */
1863 		if (verbose > 1) {
1864 			static u64 n;
1865 			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1866 				id, perf_evsel__name(evsel), ++n);
1867 		}
1868 		return NULL;
1869 	}
1870 
1871 	err = -EINVAL;
1872 
1873 	if (id > trace->sctbl->syscalls.max_id)
1874 		goto out_cant_read;
1875 
1876 	if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
1877 	    (err = trace__read_syscall_info(trace, id)) != 0)
1878 		goto out_cant_read;
1879 
1880 	if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
1881 		goto out_cant_read;
1882 
1883 	return &trace->syscalls.table[id];
1884 
1885 out_cant_read:
1886 	if (verbose > 0) {
1887 		char sbuf[STRERR_BUFSIZE];
1888 		fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
1889 		if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
1890 			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1891 		fputs(" information\n", trace->output);
1892 	}
1893 	return NULL;
1894 }
1895 
thread__update_stats(struct thread_trace * ttrace,int id,struct perf_sample * sample)1896 static void thread__update_stats(struct thread_trace *ttrace,
1897 				 int id, struct perf_sample *sample)
1898 {
1899 	struct int_node *inode;
1900 	struct stats *stats;
1901 	u64 duration = 0;
1902 
1903 	inode = intlist__findnew(ttrace->syscall_stats, id);
1904 	if (inode == NULL)
1905 		return;
1906 
1907 	stats = inode->priv;
1908 	if (stats == NULL) {
1909 		stats = malloc(sizeof(struct stats));
1910 		if (stats == NULL)
1911 			return;
1912 		init_stats(stats);
1913 		inode->priv = stats;
1914 	}
1915 
1916 	if (ttrace->entry_time && sample->time > ttrace->entry_time)
1917 		duration = sample->time - ttrace->entry_time;
1918 
1919 	update_stats(stats, duration);
1920 }
1921 
trace__printf_interrupted_entry(struct trace * trace)1922 static int trace__printf_interrupted_entry(struct trace *trace)
1923 {
1924 	struct thread_trace *ttrace;
1925 	size_t printed;
1926 	int len;
1927 
1928 	if (trace->failure_only || trace->current == NULL)
1929 		return 0;
1930 
1931 	ttrace = thread__priv(trace->current);
1932 
1933 	if (!ttrace->entry_pending)
1934 		return 0;
1935 
1936 	printed  = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1937 	printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
1938 
1939 	if (len < trace->args_alignment - 4)
1940 		printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
1941 
1942 	printed += fprintf(trace->output, " ...\n");
1943 
1944 	ttrace->entry_pending = false;
1945 	++trace->nr_events_printed;
1946 
1947 	return printed;
1948 }
1949 
trace__fprintf_sample(struct trace * trace,struct evsel * evsel,struct perf_sample * sample,struct thread * thread)1950 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
1951 				 struct perf_sample *sample, struct thread *thread)
1952 {
1953 	int printed = 0;
1954 
1955 	if (trace->print_sample) {
1956 		double ts = (double)sample->time / NSEC_PER_MSEC;
1957 
1958 		printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
1959 				   perf_evsel__name(evsel), ts,
1960 				   thread__comm_str(thread),
1961 				   sample->pid, sample->tid, sample->cpu);
1962 	}
1963 
1964 	return printed;
1965 }
1966 
syscall__augmented_args(struct syscall * sc,struct perf_sample * sample,int * augmented_args_size,int raw_augmented_args_size)1967 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
1968 {
1969 	void *augmented_args = NULL;
1970 	/*
1971 	 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1972 	 * and there we get all 6 syscall args plus the tracepoint common fields
1973 	 * that gets calculated at the start and the syscall_nr (another long).
1974 	 * So we check if that is the case and if so don't look after the
1975 	 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1976 	 * which is fixed.
1977 	 *
1978 	 * We'll revisit this later to pass s->args_size to the BPF augmenter
1979 	 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1980 	 * copies only what we need for each syscall, like what happens when we
1981 	 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1982 	 * traffic to just what is needed for each syscall.
1983 	 */
1984 	int args_size = raw_augmented_args_size ?: sc->args_size;
1985 
1986 	*augmented_args_size = sample->raw_size - args_size;
1987 	if (*augmented_args_size > 0)
1988 		augmented_args = sample->raw_data + args_size;
1989 
1990 	return augmented_args;
1991 }
1992 
trace__sys_enter(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)1993 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
1994 			    union perf_event *event __maybe_unused,
1995 			    struct perf_sample *sample)
1996 {
1997 	char *msg;
1998 	void *args;
1999 	int printed = 0;
2000 	struct thread *thread;
2001 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2002 	int augmented_args_size = 0;
2003 	void *augmented_args = NULL;
2004 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2005 	struct thread_trace *ttrace;
2006 
2007 	if (sc == NULL)
2008 		return -1;
2009 
2010 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2011 	ttrace = thread__trace(thread, trace->output);
2012 	if (ttrace == NULL)
2013 		goto out_put;
2014 
2015 	trace__fprintf_sample(trace, evsel, sample, thread);
2016 
2017 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2018 
2019 	if (ttrace->entry_str == NULL) {
2020 		ttrace->entry_str = malloc(trace__entry_str_size);
2021 		if (!ttrace->entry_str)
2022 			goto out_put;
2023 	}
2024 
2025 	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2026 		trace__printf_interrupted_entry(trace);
2027 	/*
2028 	 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2029 	 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2030 	 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2031 	 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2032 	 * so when handling, say the openat syscall, we end up getting 6 args for the
2033 	 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2034 	 * thinking that the extra 2 u64 args are the augmented filename, so just check
2035 	 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2036 	 */
2037 	if (evsel != trace->syscalls.events.sys_enter)
2038 		augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2039 	ttrace->entry_time = sample->time;
2040 	msg = ttrace->entry_str;
2041 	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2042 
2043 	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2044 					   args, augmented_args, augmented_args_size, trace, thread);
2045 
2046 	if (sc->is_exit) {
2047 		if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2048 			int alignment = 0;
2049 
2050 			trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2051 			printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2052 			if (trace->args_alignment > printed)
2053 				alignment = trace->args_alignment - printed;
2054 			fprintf(trace->output, "%*s= ?\n", alignment, " ");
2055 		}
2056 	} else {
2057 		ttrace->entry_pending = true;
2058 		/* See trace__vfs_getname & trace__sys_exit */
2059 		ttrace->filename.pending_open = false;
2060 	}
2061 
2062 	if (trace->current != thread) {
2063 		thread__put(trace->current);
2064 		trace->current = thread__get(thread);
2065 	}
2066 	err = 0;
2067 out_put:
2068 	thread__put(thread);
2069 	return err;
2070 }
2071 
trace__fprintf_sys_enter(struct trace * trace,struct evsel * evsel,struct perf_sample * sample)2072 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2073 				    struct perf_sample *sample)
2074 {
2075 	struct thread_trace *ttrace;
2076 	struct thread *thread;
2077 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2078 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2079 	char msg[1024];
2080 	void *args, *augmented_args = NULL;
2081 	int augmented_args_size;
2082 
2083 	if (sc == NULL)
2084 		return -1;
2085 
2086 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2087 	ttrace = thread__trace(thread, trace->output);
2088 	/*
2089 	 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2090 	 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2091 	 */
2092 	if (ttrace == NULL)
2093 		goto out_put;
2094 
2095 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2096 	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2097 	syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2098 	fprintf(trace->output, "%s", msg);
2099 	err = 0;
2100 out_put:
2101 	thread__put(thread);
2102 	return err;
2103 }
2104 
trace__resolve_callchain(struct trace * trace,struct evsel * evsel,struct perf_sample * sample,struct callchain_cursor * cursor)2105 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2106 				    struct perf_sample *sample,
2107 				    struct callchain_cursor *cursor)
2108 {
2109 	struct addr_location al;
2110 	int max_stack = evsel->core.attr.sample_max_stack ?
2111 			evsel->core.attr.sample_max_stack :
2112 			trace->max_stack;
2113 	int err;
2114 
2115 	if (machine__resolve(trace->host, &al, sample) < 0)
2116 		return -1;
2117 
2118 	err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2119 	addr_location__put(&al);
2120 	return err;
2121 }
2122 
trace__fprintf_callchain(struct trace * trace,struct perf_sample * sample)2123 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2124 {
2125 	/* TODO: user-configurable print_opts */
2126 	const unsigned int print_opts = EVSEL__PRINT_SYM |
2127 				        EVSEL__PRINT_DSO |
2128 				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
2129 
2130 	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2131 }
2132 
errno_to_name(struct evsel * evsel,int err)2133 static const char *errno_to_name(struct evsel *evsel, int err)
2134 {
2135 	struct perf_env *env = perf_evsel__env(evsel);
2136 	const char *arch_name = perf_env__arch(env);
2137 
2138 	return arch_syscalls__strerrno(arch_name, err);
2139 }
2140 
trace__sys_exit(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2141 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2142 			   union perf_event *event __maybe_unused,
2143 			   struct perf_sample *sample)
2144 {
2145 	long ret;
2146 	u64 duration = 0;
2147 	bool duration_calculated = false;
2148 	struct thread *thread;
2149 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2150 	int alignment = trace->args_alignment;
2151 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2152 	struct thread_trace *ttrace;
2153 
2154 	if (sc == NULL)
2155 		return -1;
2156 
2157 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2158 	ttrace = thread__trace(thread, trace->output);
2159 	if (ttrace == NULL)
2160 		goto out_put;
2161 
2162 	trace__fprintf_sample(trace, evsel, sample, thread);
2163 
2164 	if (trace->summary)
2165 		thread__update_stats(ttrace, id, sample);
2166 
2167 	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2168 
2169 	if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2170 		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2171 		ttrace->filename.pending_open = false;
2172 		++trace->stats.vfs_getname;
2173 	}
2174 
2175 	if (ttrace->entry_time) {
2176 		duration = sample->time - ttrace->entry_time;
2177 		if (trace__filter_duration(trace, duration))
2178 			goto out;
2179 		duration_calculated = true;
2180 	} else if (trace->duration_filter)
2181 		goto out;
2182 
2183 	if (sample->callchain) {
2184 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2185 		if (callchain_ret == 0) {
2186 			if (callchain_cursor.nr < trace->min_stack)
2187 				goto out;
2188 			callchain_ret = 1;
2189 		}
2190 	}
2191 
2192 	if (trace->summary_only || (ret >= 0 && trace->failure_only))
2193 		goto out;
2194 
2195 	trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2196 
2197 	if (ttrace->entry_pending) {
2198 		printed = fprintf(trace->output, "%s", ttrace->entry_str);
2199 	} else {
2200 		printed += fprintf(trace->output, " ... [");
2201 		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2202 		printed += 9;
2203 		printed += fprintf(trace->output, "]: %s()", sc->name);
2204 	}
2205 
2206 	printed++; /* the closing ')' */
2207 
2208 	if (alignment > printed)
2209 		alignment -= printed;
2210 	else
2211 		alignment = 0;
2212 
2213 	fprintf(trace->output, ")%*s= ", alignment, " ");
2214 
2215 	if (sc->fmt == NULL) {
2216 		if (ret < 0)
2217 			goto errno_print;
2218 signed_print:
2219 		fprintf(trace->output, "%ld", ret);
2220 	} else if (ret < 0) {
2221 errno_print: {
2222 		char bf[STRERR_BUFSIZE];
2223 		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2224 			   *e = errno_to_name(evsel, -ret);
2225 
2226 		fprintf(trace->output, "-1 %s (%s)", e, emsg);
2227 	}
2228 	} else if (ret == 0 && sc->fmt->timeout)
2229 		fprintf(trace->output, "0 (Timeout)");
2230 	else if (ttrace->ret_scnprintf) {
2231 		char bf[1024];
2232 		struct syscall_arg arg = {
2233 			.val	= ret,
2234 			.thread	= thread,
2235 			.trace	= trace,
2236 		};
2237 		ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2238 		ttrace->ret_scnprintf = NULL;
2239 		fprintf(trace->output, "%s", bf);
2240 	} else if (sc->fmt->hexret)
2241 		fprintf(trace->output, "%#lx", ret);
2242 	else if (sc->fmt->errpid) {
2243 		struct thread *child = machine__find_thread(trace->host, ret, ret);
2244 
2245 		if (child != NULL) {
2246 			fprintf(trace->output, "%ld", ret);
2247 			if (child->comm_set)
2248 				fprintf(trace->output, " (%s)", thread__comm_str(child));
2249 			thread__put(child);
2250 		}
2251 	} else
2252 		goto signed_print;
2253 
2254 	fputc('\n', trace->output);
2255 
2256 	/*
2257 	 * We only consider an 'event' for the sake of --max-events a non-filtered
2258 	 * sys_enter + sys_exit and other tracepoint events.
2259 	 */
2260 	if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2261 		interrupted = true;
2262 
2263 	if (callchain_ret > 0)
2264 		trace__fprintf_callchain(trace, sample);
2265 	else if (callchain_ret < 0)
2266 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2267 out:
2268 	ttrace->entry_pending = false;
2269 	err = 0;
2270 out_put:
2271 	thread__put(thread);
2272 	return err;
2273 }
2274 
trace__vfs_getname(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2275 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2276 			      union perf_event *event __maybe_unused,
2277 			      struct perf_sample *sample)
2278 {
2279 	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2280 	struct thread_trace *ttrace;
2281 	size_t filename_len, entry_str_len, to_move;
2282 	ssize_t remaining_space;
2283 	char *pos;
2284 	const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2285 
2286 	if (!thread)
2287 		goto out;
2288 
2289 	ttrace = thread__priv(thread);
2290 	if (!ttrace)
2291 		goto out_put;
2292 
2293 	filename_len = strlen(filename);
2294 	if (filename_len == 0)
2295 		goto out_put;
2296 
2297 	if (ttrace->filename.namelen < filename_len) {
2298 		char *f = realloc(ttrace->filename.name, filename_len + 1);
2299 
2300 		if (f == NULL)
2301 			goto out_put;
2302 
2303 		ttrace->filename.namelen = filename_len;
2304 		ttrace->filename.name = f;
2305 	}
2306 
2307 	strcpy(ttrace->filename.name, filename);
2308 	ttrace->filename.pending_open = true;
2309 
2310 	if (!ttrace->filename.ptr)
2311 		goto out_put;
2312 
2313 	entry_str_len = strlen(ttrace->entry_str);
2314 	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2315 	if (remaining_space <= 0)
2316 		goto out_put;
2317 
2318 	if (filename_len > (size_t)remaining_space) {
2319 		filename += filename_len - remaining_space;
2320 		filename_len = remaining_space;
2321 	}
2322 
2323 	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2324 	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2325 	memmove(pos + filename_len, pos, to_move);
2326 	memcpy(pos, filename, filename_len);
2327 
2328 	ttrace->filename.ptr = 0;
2329 	ttrace->filename.entry_str_pos = 0;
2330 out_put:
2331 	thread__put(thread);
2332 out:
2333 	return 0;
2334 }
2335 
trace__sched_stat_runtime(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2336 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2337 				     union perf_event *event __maybe_unused,
2338 				     struct perf_sample *sample)
2339 {
2340         u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2341 	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2342 	struct thread *thread = machine__findnew_thread(trace->host,
2343 							sample->pid,
2344 							sample->tid);
2345 	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2346 
2347 	if (ttrace == NULL)
2348 		goto out_dump;
2349 
2350 	ttrace->runtime_ms += runtime_ms;
2351 	trace->runtime_ms += runtime_ms;
2352 out_put:
2353 	thread__put(thread);
2354 	return 0;
2355 
2356 out_dump:
2357 	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2358 	       evsel->name,
2359 	       perf_evsel__strval(evsel, sample, "comm"),
2360 	       (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2361 	       runtime,
2362 	       perf_evsel__intval(evsel, sample, "vruntime"));
2363 	goto out_put;
2364 }
2365 
bpf_output__printer(enum binary_printer_ops op,unsigned int val,void * extra __maybe_unused,FILE * fp)2366 static int bpf_output__printer(enum binary_printer_ops op,
2367 			       unsigned int val, void *extra __maybe_unused, FILE *fp)
2368 {
2369 	unsigned char ch = (unsigned char)val;
2370 
2371 	switch (op) {
2372 	case BINARY_PRINT_CHAR_DATA:
2373 		return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2374 	case BINARY_PRINT_DATA_BEGIN:
2375 	case BINARY_PRINT_LINE_BEGIN:
2376 	case BINARY_PRINT_ADDR:
2377 	case BINARY_PRINT_NUM_DATA:
2378 	case BINARY_PRINT_NUM_PAD:
2379 	case BINARY_PRINT_SEP:
2380 	case BINARY_PRINT_CHAR_PAD:
2381 	case BINARY_PRINT_LINE_END:
2382 	case BINARY_PRINT_DATA_END:
2383 	default:
2384 		break;
2385 	}
2386 
2387 	return 0;
2388 }
2389 
bpf_output__fprintf(struct trace * trace,struct perf_sample * sample)2390 static void bpf_output__fprintf(struct trace *trace,
2391 				struct perf_sample *sample)
2392 {
2393 	binary__fprintf(sample->raw_data, sample->raw_size, 8,
2394 			bpf_output__printer, NULL, trace->output);
2395 	++trace->nr_events_printed;
2396 }
2397 
trace__event_handler(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2398 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2399 				union perf_event *event __maybe_unused,
2400 				struct perf_sample *sample)
2401 {
2402 	struct thread *thread;
2403 	int callchain_ret = 0;
2404 	/*
2405 	 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2406 	 * this event's max_events having been hit and this is an entry coming
2407 	 * from the ring buffer that we should discard, since the max events
2408 	 * have already been considered/printed.
2409 	 */
2410 	if (evsel->disabled)
2411 		return 0;
2412 
2413 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2414 
2415 	if (sample->callchain) {
2416 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2417 		if (callchain_ret == 0) {
2418 			if (callchain_cursor.nr < trace->min_stack)
2419 				goto out;
2420 			callchain_ret = 1;
2421 		}
2422 	}
2423 
2424 	trace__printf_interrupted_entry(trace);
2425 	trace__fprintf_tstamp(trace, sample->time, trace->output);
2426 
2427 	if (trace->trace_syscalls && trace->show_duration)
2428 		fprintf(trace->output, "(         ): ");
2429 
2430 	if (thread)
2431 		trace__fprintf_comm_tid(trace, thread, trace->output);
2432 
2433 	if (evsel == trace->syscalls.events.augmented) {
2434 		int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2435 		struct syscall *sc = trace__syscall_info(trace, evsel, id);
2436 
2437 		if (sc) {
2438 			fprintf(trace->output, "%s(", sc->name);
2439 			trace__fprintf_sys_enter(trace, evsel, sample);
2440 			fputc(')', trace->output);
2441 			goto newline;
2442 		}
2443 
2444 		/*
2445 		 * XXX: Not having the associated syscall info or not finding/adding
2446 		 * 	the thread should never happen, but if it does...
2447 		 * 	fall thru and print it as a bpf_output event.
2448 		 */
2449 	}
2450 
2451 	fprintf(trace->output, "%s:", evsel->name);
2452 
2453 	if (perf_evsel__is_bpf_output(evsel)) {
2454 		bpf_output__fprintf(trace, sample);
2455 	} else if (evsel->tp_format) {
2456 		if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2457 		    trace__fprintf_sys_enter(trace, evsel, sample)) {
2458 			event_format__fprintf(evsel->tp_format, sample->cpu,
2459 					      sample->raw_data, sample->raw_size,
2460 					      trace->output);
2461 			++trace->nr_events_printed;
2462 
2463 			if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2464 				evsel__disable(evsel);
2465 				evsel__close(evsel);
2466 			}
2467 		}
2468 	}
2469 
2470 newline:
2471 	fprintf(trace->output, "\n");
2472 
2473 	if (callchain_ret > 0)
2474 		trace__fprintf_callchain(trace, sample);
2475 	else if (callchain_ret < 0)
2476 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2477 out:
2478 	thread__put(thread);
2479 	return 0;
2480 }
2481 
print_location(FILE * f,struct perf_sample * sample,struct addr_location * al,bool print_dso,bool print_sym)2482 static void print_location(FILE *f, struct perf_sample *sample,
2483 			   struct addr_location *al,
2484 			   bool print_dso, bool print_sym)
2485 {
2486 
2487 	if ((verbose > 0 || print_dso) && al->map)
2488 		fprintf(f, "%s@", al->map->dso->long_name);
2489 
2490 	if ((verbose > 0 || print_sym) && al->sym)
2491 		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2492 			al->addr - al->sym->start);
2493 	else if (al->map)
2494 		fprintf(f, "0x%" PRIx64, al->addr);
2495 	else
2496 		fprintf(f, "0x%" PRIx64, sample->addr);
2497 }
2498 
trace__pgfault(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2499 static int trace__pgfault(struct trace *trace,
2500 			  struct evsel *evsel,
2501 			  union perf_event *event __maybe_unused,
2502 			  struct perf_sample *sample)
2503 {
2504 	struct thread *thread;
2505 	struct addr_location al;
2506 	char map_type = 'd';
2507 	struct thread_trace *ttrace;
2508 	int err = -1;
2509 	int callchain_ret = 0;
2510 
2511 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2512 
2513 	if (sample->callchain) {
2514 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2515 		if (callchain_ret == 0) {
2516 			if (callchain_cursor.nr < trace->min_stack)
2517 				goto out_put;
2518 			callchain_ret = 1;
2519 		}
2520 	}
2521 
2522 	ttrace = thread__trace(thread, trace->output);
2523 	if (ttrace == NULL)
2524 		goto out_put;
2525 
2526 	if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2527 		ttrace->pfmaj++;
2528 	else
2529 		ttrace->pfmin++;
2530 
2531 	if (trace->summary_only)
2532 		goto out;
2533 
2534 	thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2535 
2536 	trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2537 
2538 	fprintf(trace->output, "%sfault [",
2539 		evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2540 		"maj" : "min");
2541 
2542 	print_location(trace->output, sample, &al, false, true);
2543 
2544 	fprintf(trace->output, "] => ");
2545 
2546 	thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2547 
2548 	if (!al.map) {
2549 		thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2550 
2551 		if (al.map)
2552 			map_type = 'x';
2553 		else
2554 			map_type = '?';
2555 	}
2556 
2557 	print_location(trace->output, sample, &al, true, false);
2558 
2559 	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2560 
2561 	if (callchain_ret > 0)
2562 		trace__fprintf_callchain(trace, sample);
2563 	else if (callchain_ret < 0)
2564 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2565 
2566 	++trace->nr_events_printed;
2567 out:
2568 	err = 0;
2569 out_put:
2570 	thread__put(thread);
2571 	return err;
2572 }
2573 
trace__set_base_time(struct trace * trace,struct evsel * evsel,struct perf_sample * sample)2574 static void trace__set_base_time(struct trace *trace,
2575 				 struct evsel *evsel,
2576 				 struct perf_sample *sample)
2577 {
2578 	/*
2579 	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2580 	 * and don't use sample->time unconditionally, we may end up having
2581 	 * some other event in the future without PERF_SAMPLE_TIME for good
2582 	 * reason, i.e. we may not be interested in its timestamps, just in
2583 	 * it taking place, picking some piece of information when it
2584 	 * appears in our event stream (vfs_getname comes to mind).
2585 	 */
2586 	if (trace->base_time == 0 && !trace->full_time &&
2587 	    (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2588 		trace->base_time = sample->time;
2589 }
2590 
trace__process_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine __maybe_unused)2591 static int trace__process_sample(struct perf_tool *tool,
2592 				 union perf_event *event,
2593 				 struct perf_sample *sample,
2594 				 struct evsel *evsel,
2595 				 struct machine *machine __maybe_unused)
2596 {
2597 	struct trace *trace = container_of(tool, struct trace, tool);
2598 	struct thread *thread;
2599 	int err = 0;
2600 
2601 	tracepoint_handler handler = evsel->handler;
2602 
2603 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2604 	if (thread && thread__is_filtered(thread))
2605 		goto out;
2606 
2607 	trace__set_base_time(trace, evsel, sample);
2608 
2609 	if (handler) {
2610 		++trace->nr_events;
2611 		handler(trace, evsel, event, sample);
2612 	}
2613 out:
2614 	thread__put(thread);
2615 	return err;
2616 }
2617 
trace__record(struct trace * trace,int argc,const char ** argv)2618 static int trace__record(struct trace *trace, int argc, const char **argv)
2619 {
2620 	unsigned int rec_argc, i, j;
2621 	const char **rec_argv;
2622 	const char * const record_args[] = {
2623 		"record",
2624 		"-R",
2625 		"-m", "1024",
2626 		"-c", "1",
2627 	};
2628 
2629 	const char * const sc_args[] = { "-e", };
2630 	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2631 	const char * const majpf_args[] = { "-e", "major-faults" };
2632 	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2633 	const char * const minpf_args[] = { "-e", "minor-faults" };
2634 	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2635 
2636 	/* +1 is for the event string below */
2637 	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2638 		majpf_args_nr + minpf_args_nr + argc;
2639 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2640 
2641 	if (rec_argv == NULL)
2642 		return -ENOMEM;
2643 
2644 	j = 0;
2645 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2646 		rec_argv[j++] = record_args[i];
2647 
2648 	if (trace->trace_syscalls) {
2649 		for (i = 0; i < sc_args_nr; i++)
2650 			rec_argv[j++] = sc_args[i];
2651 
2652 		/* event string may be different for older kernels - e.g., RHEL6 */
2653 		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2654 			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2655 		else if (is_valid_tracepoint("syscalls:sys_enter"))
2656 			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2657 		else {
2658 			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2659 			free(rec_argv);
2660 			return -1;
2661 		}
2662 	}
2663 
2664 	if (trace->trace_pgfaults & TRACE_PFMAJ)
2665 		for (i = 0; i < majpf_args_nr; i++)
2666 			rec_argv[j++] = majpf_args[i];
2667 
2668 	if (trace->trace_pgfaults & TRACE_PFMIN)
2669 		for (i = 0; i < minpf_args_nr; i++)
2670 			rec_argv[j++] = minpf_args[i];
2671 
2672 	for (i = 0; i < (unsigned int)argc; i++)
2673 		rec_argv[j++] = argv[i];
2674 
2675 	return cmd_record(j, rec_argv);
2676 }
2677 
2678 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2679 
evlist__add_vfs_getname(struct evlist * evlist)2680 static bool evlist__add_vfs_getname(struct evlist *evlist)
2681 {
2682 	bool found = false;
2683 	struct evsel *evsel, *tmp;
2684 	struct parse_events_error err = { .idx = 0, };
2685 	int ret = parse_events(evlist, "probe:vfs_getname*", &err);
2686 
2687 	if (ret)
2688 		return false;
2689 
2690 	evlist__for_each_entry_safe(evlist, evsel, tmp) {
2691 		if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
2692 			continue;
2693 
2694 		if (perf_evsel__field(evsel, "pathname")) {
2695 			evsel->handler = trace__vfs_getname;
2696 			found = true;
2697 			continue;
2698 		}
2699 
2700 		list_del_init(&evsel->core.node);
2701 		evsel->evlist = NULL;
2702 		evsel__delete(evsel);
2703 	}
2704 
2705 	return found;
2706 }
2707 
perf_evsel__new_pgfault(u64 config)2708 static struct evsel *perf_evsel__new_pgfault(u64 config)
2709 {
2710 	struct evsel *evsel;
2711 	struct perf_event_attr attr = {
2712 		.type = PERF_TYPE_SOFTWARE,
2713 		.mmap_data = 1,
2714 	};
2715 
2716 	attr.config = config;
2717 	attr.sample_period = 1;
2718 
2719 	event_attr_init(&attr);
2720 
2721 	evsel = evsel__new(&attr);
2722 	if (evsel)
2723 		evsel->handler = trace__pgfault;
2724 
2725 	return evsel;
2726 }
2727 
trace__handle_event(struct trace * trace,union perf_event * event,struct perf_sample * sample)2728 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2729 {
2730 	const u32 type = event->header.type;
2731 	struct evsel *evsel;
2732 
2733 	if (type != PERF_RECORD_SAMPLE) {
2734 		trace__process_event(trace, trace->host, event, sample);
2735 		return;
2736 	}
2737 
2738 	evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2739 	if (evsel == NULL) {
2740 		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2741 		return;
2742 	}
2743 
2744 	if (evswitch__discard(&trace->evswitch, evsel))
2745 		return;
2746 
2747 	trace__set_base_time(trace, evsel, sample);
2748 
2749 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
2750 	    sample->raw_data == NULL) {
2751 		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2752 		       perf_evsel__name(evsel), sample->tid,
2753 		       sample->cpu, sample->raw_size);
2754 	} else {
2755 		tracepoint_handler handler = evsel->handler;
2756 		handler(trace, evsel, event, sample);
2757 	}
2758 
2759 	if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
2760 		interrupted = true;
2761 }
2762 
trace__add_syscall_newtp(struct trace * trace)2763 static int trace__add_syscall_newtp(struct trace *trace)
2764 {
2765 	int ret = -1;
2766 	struct evlist *evlist = trace->evlist;
2767 	struct evsel *sys_enter, *sys_exit;
2768 
2769 	sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
2770 	if (sys_enter == NULL)
2771 		goto out;
2772 
2773 	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2774 		goto out_delete_sys_enter;
2775 
2776 	sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
2777 	if (sys_exit == NULL)
2778 		goto out_delete_sys_enter;
2779 
2780 	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2781 		goto out_delete_sys_exit;
2782 
2783 	perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
2784 	perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
2785 
2786 	evlist__add(evlist, sys_enter);
2787 	evlist__add(evlist, sys_exit);
2788 
2789 	if (callchain_param.enabled && !trace->kernel_syscallchains) {
2790 		/*
2791 		 * We're interested only in the user space callchain
2792 		 * leading to the syscall, allow overriding that for
2793 		 * debugging reasons using --kernel_syscall_callchains
2794 		 */
2795 		sys_exit->core.attr.exclude_callchain_kernel = 1;
2796 	}
2797 
2798 	trace->syscalls.events.sys_enter = sys_enter;
2799 	trace->syscalls.events.sys_exit  = sys_exit;
2800 
2801 	ret = 0;
2802 out:
2803 	return ret;
2804 
2805 out_delete_sys_exit:
2806 	evsel__delete_priv(sys_exit);
2807 out_delete_sys_enter:
2808 	evsel__delete_priv(sys_enter);
2809 	goto out;
2810 }
2811 
trace__set_ev_qualifier_tp_filter(struct trace * trace)2812 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
2813 {
2814 	int err = -1;
2815 	struct evsel *sys_exit;
2816 	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2817 						trace->ev_qualifier_ids.nr,
2818 						trace->ev_qualifier_ids.entries);
2819 
2820 	if (filter == NULL)
2821 		goto out_enomem;
2822 
2823 	if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2824 					  filter)) {
2825 		sys_exit = trace->syscalls.events.sys_exit;
2826 		err = perf_evsel__append_tp_filter(sys_exit, filter);
2827 	}
2828 
2829 	free(filter);
2830 out:
2831 	return err;
2832 out_enomem:
2833 	errno = ENOMEM;
2834 	goto out;
2835 }
2836 
2837 #ifdef HAVE_LIBBPF_SUPPORT
trace__find_bpf_program_by_title(struct trace * trace,const char * name)2838 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
2839 {
2840 	if (trace->bpf_obj == NULL)
2841 		return NULL;
2842 
2843 	return bpf_object__find_program_by_title(trace->bpf_obj, name);
2844 }
2845 
trace__find_syscall_bpf_prog(struct trace * trace,struct syscall * sc,const char * prog_name,const char * type)2846 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
2847 							const char *prog_name, const char *type)
2848 {
2849 	struct bpf_program *prog;
2850 
2851 	if (prog_name == NULL) {
2852 		char default_prog_name[256];
2853 		scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
2854 		prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2855 		if (prog != NULL)
2856 			goto out_found;
2857 		if (sc->fmt && sc->fmt->alias) {
2858 			scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
2859 			prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2860 			if (prog != NULL)
2861 				goto out_found;
2862 		}
2863 		goto out_unaugmented;
2864 	}
2865 
2866 	prog = trace__find_bpf_program_by_title(trace, prog_name);
2867 
2868 	if (prog != NULL) {
2869 out_found:
2870 		return prog;
2871 	}
2872 
2873 	pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2874 		 prog_name, type, sc->name);
2875 out_unaugmented:
2876 	return trace->syscalls.unaugmented_prog;
2877 }
2878 
trace__init_syscall_bpf_progs(struct trace * trace,int id)2879 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
2880 {
2881 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2882 
2883 	if (sc == NULL)
2884 		return;
2885 
2886 	sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
2887 	sc->bpf_prog.sys_exit  = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit  : NULL,  "exit");
2888 }
2889 
trace__bpf_prog_sys_enter_fd(struct trace * trace,int id)2890 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
2891 {
2892 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2893 	return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2894 }
2895 
trace__bpf_prog_sys_exit_fd(struct trace * trace,int id)2896 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
2897 {
2898 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2899 	return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2900 }
2901 
trace__init_bpf_map_syscall_args(struct trace * trace,int id,struct bpf_map_syscall_entry * entry)2902 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
2903 {
2904 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2905 	int arg = 0;
2906 
2907 	if (sc == NULL)
2908 		goto out;
2909 
2910 	for (; arg < sc->nr_args; ++arg) {
2911 		entry->string_args_len[arg] = 0;
2912 		if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
2913 			/* Should be set like strace -s strsize */
2914 			entry->string_args_len[arg] = PATH_MAX;
2915 		}
2916 	}
2917 out:
2918 	for (; arg < 6; ++arg)
2919 		entry->string_args_len[arg] = 0;
2920 }
trace__set_ev_qualifier_bpf_filter(struct trace * trace)2921 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
2922 {
2923 	int fd = bpf_map__fd(trace->syscalls.map);
2924 	struct bpf_map_syscall_entry value = {
2925 		.enabled = !trace->not_ev_qualifier,
2926 	};
2927 	int err = 0;
2928 	size_t i;
2929 
2930 	for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
2931 		int key = trace->ev_qualifier_ids.entries[i];
2932 
2933 		if (value.enabled) {
2934 			trace__init_bpf_map_syscall_args(trace, key, &value);
2935 			trace__init_syscall_bpf_progs(trace, key);
2936 		}
2937 
2938 		err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
2939 		if (err)
2940 			break;
2941 	}
2942 
2943 	return err;
2944 }
2945 
__trace__init_syscalls_bpf_map(struct trace * trace,bool enabled)2946 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
2947 {
2948 	int fd = bpf_map__fd(trace->syscalls.map);
2949 	struct bpf_map_syscall_entry value = {
2950 		.enabled = enabled,
2951 	};
2952 	int err = 0, key;
2953 
2954 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
2955 		if (enabled)
2956 			trace__init_bpf_map_syscall_args(trace, key, &value);
2957 
2958 		err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
2959 		if (err)
2960 			break;
2961 	}
2962 
2963 	return err;
2964 }
2965 
trace__init_syscalls_bpf_map(struct trace * trace)2966 static int trace__init_syscalls_bpf_map(struct trace *trace)
2967 {
2968 	bool enabled = true;
2969 
2970 	if (trace->ev_qualifier_ids.nr)
2971 		enabled = trace->not_ev_qualifier;
2972 
2973 	return __trace__init_syscalls_bpf_map(trace, enabled);
2974 }
2975 
trace__find_usable_bpf_prog_entry(struct trace * trace,struct syscall * sc)2976 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
2977 {
2978 	struct tep_format_field *field, *candidate_field;
2979 	int id;
2980 
2981 	/*
2982 	 * We're only interested in syscalls that have a pointer:
2983 	 */
2984 	for (field = sc->args; field; field = field->next) {
2985 		if (field->flags & TEP_FIELD_IS_POINTER)
2986 			goto try_to_find_pair;
2987 	}
2988 
2989 	return NULL;
2990 
2991 try_to_find_pair:
2992 	for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
2993 		struct syscall *pair = trace__syscall_info(trace, NULL, id);
2994 		struct bpf_program *pair_prog;
2995 		bool is_candidate = false;
2996 
2997 		if (pair == NULL || pair == sc ||
2998 		    pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
2999 			continue;
3000 
3001 		for (field = sc->args, candidate_field = pair->args;
3002 		     field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3003 			bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3004 			     candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3005 
3006 			if (is_pointer) {
3007 			       if (!candidate_is_pointer) {
3008 					// The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3009 					continue;
3010 			       }
3011 			} else {
3012 				if (candidate_is_pointer) {
3013 					// The candidate might copy a pointer we don't have, skip it.
3014 					goto next_candidate;
3015 				}
3016 				continue;
3017 			}
3018 
3019 			if (strcmp(field->type, candidate_field->type))
3020 				goto next_candidate;
3021 
3022 			is_candidate = true;
3023 		}
3024 
3025 		if (!is_candidate)
3026 			goto next_candidate;
3027 
3028 		/*
3029 		 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3030 		 * then it may be collecting that and we then can't use it, as it would collect
3031 		 * more than what is common to the two syscalls.
3032 		 */
3033 		if (candidate_field) {
3034 			for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3035 				if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3036 					goto next_candidate;
3037 		}
3038 
3039 		pair_prog = pair->bpf_prog.sys_enter;
3040 		/*
3041 		 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3042 		 * have been searched for, so search it here and if it returns the
3043 		 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3044 		 * program for a filtered syscall on a non-filtered one.
3045 		 *
3046 		 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3047 		 * useful for "renameat2".
3048 		 */
3049 		if (pair_prog == NULL) {
3050 			pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3051 			if (pair_prog == trace->syscalls.unaugmented_prog)
3052 				goto next_candidate;
3053 		}
3054 
3055 		pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3056 		return pair_prog;
3057 	next_candidate:
3058 		continue;
3059 	}
3060 
3061 	return NULL;
3062 }
3063 
trace__init_syscalls_bpf_prog_array_maps(struct trace * trace)3064 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3065 {
3066 	int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3067 	    map_exit_fd  = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3068 	int err = 0, key;
3069 
3070 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3071 		int prog_fd;
3072 
3073 		if (!trace__syscall_enabled(trace, key))
3074 			continue;
3075 
3076 		trace__init_syscall_bpf_progs(trace, key);
3077 
3078 		// It'll get at least the "!raw_syscalls:unaugmented"
3079 		prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3080 		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3081 		if (err)
3082 			break;
3083 		prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3084 		err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3085 		if (err)
3086 			break;
3087 	}
3088 
3089 	/*
3090 	 * Now lets do a second pass looking for enabled syscalls without
3091 	 * an augmenter that have a signature that is a superset of another
3092 	 * syscall with an augmenter so that we can auto-reuse it.
3093 	 *
3094 	 * I.e. if we have an augmenter for the "open" syscall that has
3095 	 * this signature:
3096 	 *
3097 	 *   int open(const char *pathname, int flags, mode_t mode);
3098 	 *
3099 	 * I.e. that will collect just the first string argument, then we
3100 	 * can reuse it for the 'creat' syscall, that has this signature:
3101 	 *
3102 	 *   int creat(const char *pathname, mode_t mode);
3103 	 *
3104 	 * and for:
3105 	 *
3106 	 *   int stat(const char *pathname, struct stat *statbuf);
3107 	 *   int lstat(const char *pathname, struct stat *statbuf);
3108 	 *
3109 	 * Because the 'open' augmenter will collect the first arg as a string,
3110 	 * and leave alone all the other args, which already helps with
3111 	 * beautifying 'stat' and 'lstat''s pathname arg.
3112 	 *
3113 	 * Then, in time, when 'stat' gets an augmenter that collects both
3114 	 * first and second arg (this one on the raw_syscalls:sys_exit prog
3115 	 * array tail call, then that one will be used.
3116 	 */
3117 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3118 		struct syscall *sc = trace__syscall_info(trace, NULL, key);
3119 		struct bpf_program *pair_prog;
3120 		int prog_fd;
3121 
3122 		if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3123 			continue;
3124 
3125 		/*
3126 		 * For now we're just reusing the sys_enter prog, and if it
3127 		 * already has an augmenter, we don't need to find one.
3128 		 */
3129 		if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3130 			continue;
3131 
3132 		/*
3133 		 * Look at all the other syscalls for one that has a signature
3134 		 * that is close enough that we can share:
3135 		 */
3136 		pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3137 		if (pair_prog == NULL)
3138 			continue;
3139 
3140 		sc->bpf_prog.sys_enter = pair_prog;
3141 
3142 		/*
3143 		 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3144 		 * with the fd for the program we're reusing:
3145 		 */
3146 		prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3147 		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3148 		if (err)
3149 			break;
3150 	}
3151 
3152 
3153 	return err;
3154 }
3155 #else
trace__set_ev_qualifier_bpf_filter(struct trace * trace __maybe_unused)3156 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3157 {
3158 	return 0;
3159 }
3160 
trace__init_syscalls_bpf_map(struct trace * trace __maybe_unused)3161 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3162 {
3163 	return 0;
3164 }
3165 
trace__find_bpf_program_by_title(struct trace * trace __maybe_unused,const char * name __maybe_unused)3166 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3167 							    const char *name __maybe_unused)
3168 {
3169 	return NULL;
3170 }
3171 
trace__init_syscalls_bpf_prog_array_maps(struct trace * trace __maybe_unused)3172 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3173 {
3174 	return 0;
3175 }
3176 #endif // HAVE_LIBBPF_SUPPORT
3177 
trace__set_ev_qualifier_filter(struct trace * trace)3178 static int trace__set_ev_qualifier_filter(struct trace *trace)
3179 {
3180 	if (trace->syscalls.map)
3181 		return trace__set_ev_qualifier_bpf_filter(trace);
3182 	if (trace->syscalls.events.sys_enter)
3183 		return trace__set_ev_qualifier_tp_filter(trace);
3184 	return 0;
3185 }
3186 
bpf_map__set_filter_pids(struct bpf_map * map __maybe_unused,size_t npids __maybe_unused,pid_t * pids __maybe_unused)3187 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3188 				    size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3189 {
3190 	int err = 0;
3191 #ifdef HAVE_LIBBPF_SUPPORT
3192 	bool value = true;
3193 	int map_fd = bpf_map__fd(map);
3194 	size_t i;
3195 
3196 	for (i = 0; i < npids; ++i) {
3197 		err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3198 		if (err)
3199 			break;
3200 	}
3201 #endif
3202 	return err;
3203 }
3204 
trace__set_filter_loop_pids(struct trace * trace)3205 static int trace__set_filter_loop_pids(struct trace *trace)
3206 {
3207 	unsigned int nr = 1, err;
3208 	pid_t pids[32] = {
3209 		getpid(),
3210 	};
3211 	struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3212 
3213 	while (thread && nr < ARRAY_SIZE(pids)) {
3214 		struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3215 
3216 		if (parent == NULL)
3217 			break;
3218 
3219 		if (!strcmp(thread__comm_str(parent), "sshd") ||
3220 		    strstarts(thread__comm_str(parent), "gnome-terminal")) {
3221 			pids[nr++] = parent->tid;
3222 			break;
3223 		}
3224 		thread = parent;
3225 	}
3226 
3227 	err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
3228 	if (!err && trace->filter_pids.map)
3229 		err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3230 
3231 	return err;
3232 }
3233 
trace__set_filter_pids(struct trace * trace)3234 static int trace__set_filter_pids(struct trace *trace)
3235 {
3236 	int err = 0;
3237 	/*
3238 	 * Better not use !target__has_task() here because we need to cover the
3239 	 * case where no threads were specified in the command line, but a
3240 	 * workload was, and in that case we will fill in the thread_map when
3241 	 * we fork the workload in perf_evlist__prepare_workload.
3242 	 */
3243 	if (trace->filter_pids.nr > 0) {
3244 		err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3245 						      trace->filter_pids.entries);
3246 		if (!err && trace->filter_pids.map) {
3247 			err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3248 						       trace->filter_pids.entries);
3249 		}
3250 	} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3251 		err = trace__set_filter_loop_pids(trace);
3252 	}
3253 
3254 	return err;
3255 }
3256 
__trace__deliver_event(struct trace * trace,union perf_event * event)3257 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3258 {
3259 	struct evlist *evlist = trace->evlist;
3260 	struct perf_sample sample;
3261 	int err;
3262 
3263 	err = perf_evlist__parse_sample(evlist, event, &sample);
3264 	if (err)
3265 		fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3266 	else
3267 		trace__handle_event(trace, event, &sample);
3268 
3269 	return 0;
3270 }
3271 
__trace__flush_events(struct trace * trace)3272 static int __trace__flush_events(struct trace *trace)
3273 {
3274 	u64 first = ordered_events__first_time(&trace->oe.data);
3275 	u64 flush = trace->oe.last - NSEC_PER_SEC;
3276 
3277 	/* Is there some thing to flush.. */
3278 	if (first && first < flush)
3279 		return ordered_events__flush_time(&trace->oe.data, flush);
3280 
3281 	return 0;
3282 }
3283 
trace__flush_events(struct trace * trace)3284 static int trace__flush_events(struct trace *trace)
3285 {
3286 	return !trace->sort_events ? 0 : __trace__flush_events(trace);
3287 }
3288 
trace__deliver_event(struct trace * trace,union perf_event * event)3289 static int trace__deliver_event(struct trace *trace, union perf_event *event)
3290 {
3291 	int err;
3292 
3293 	if (!trace->sort_events)
3294 		return __trace__deliver_event(trace, event);
3295 
3296 	err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3297 	if (err && err != -1)
3298 		return err;
3299 
3300 	err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3301 	if (err)
3302 		return err;
3303 
3304 	return trace__flush_events(trace);
3305 }
3306 
ordered_events__deliver_event(struct ordered_events * oe,struct ordered_event * event)3307 static int ordered_events__deliver_event(struct ordered_events *oe,
3308 					 struct ordered_event *event)
3309 {
3310 	struct trace *trace = container_of(oe, struct trace, oe.data);
3311 
3312 	return __trace__deliver_event(trace, event->event);
3313 }
3314 
trace__run(struct trace * trace,int argc,const char ** argv)3315 static int trace__run(struct trace *trace, int argc, const char **argv)
3316 {
3317 	struct evlist *evlist = trace->evlist;
3318 	struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3319 	int err = -1, i;
3320 	unsigned long before;
3321 	const bool forks = argc > 0;
3322 	bool draining = false;
3323 
3324 	trace->live = true;
3325 
3326 	if (!trace->raw_augmented_syscalls) {
3327 		if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3328 			goto out_error_raw_syscalls;
3329 
3330 		if (trace->trace_syscalls)
3331 			trace->vfs_getname = evlist__add_vfs_getname(evlist);
3332 	}
3333 
3334 	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3335 		pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3336 		if (pgfault_maj == NULL)
3337 			goto out_error_mem;
3338 		perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3339 		evlist__add(evlist, pgfault_maj);
3340 	}
3341 
3342 	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3343 		pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3344 		if (pgfault_min == NULL)
3345 			goto out_error_mem;
3346 		perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3347 		evlist__add(evlist, pgfault_min);
3348 	}
3349 
3350 	if (trace->sched &&
3351 	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
3352 				   trace__sched_stat_runtime))
3353 		goto out_error_sched_stat_runtime;
3354 
3355 	/*
3356 	 * If a global cgroup was set, apply it to all the events without an
3357 	 * explicit cgroup. I.e.:
3358 	 *
3359 	 * 	trace -G A -e sched:*switch
3360 	 *
3361 	 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3362 	 * _and_ sched:sched_switch to the 'A' cgroup, while:
3363 	 *
3364 	 * trace -e sched:*switch -G A
3365 	 *
3366 	 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3367 	 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3368 	 * a cgroup (on the root cgroup, sys wide, etc).
3369 	 *
3370 	 * Multiple cgroups:
3371 	 *
3372 	 * trace -G A -e sched:*switch -G B
3373 	 *
3374 	 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3375 	 * to the 'B' cgroup.
3376 	 *
3377 	 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3378 	 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3379 	 */
3380 	if (trace->cgroup)
3381 		evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3382 
3383 	err = perf_evlist__create_maps(evlist, &trace->opts.target);
3384 	if (err < 0) {
3385 		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3386 		goto out_delete_evlist;
3387 	}
3388 
3389 	err = trace__symbols_init(trace, evlist);
3390 	if (err < 0) {
3391 		fprintf(trace->output, "Problems initializing symbol libraries!\n");
3392 		goto out_delete_evlist;
3393 	}
3394 
3395 	perf_evlist__config(evlist, &trace->opts, &callchain_param);
3396 
3397 	signal(SIGCHLD, sig_handler);
3398 	signal(SIGINT, sig_handler);
3399 
3400 	if (forks) {
3401 		err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3402 						    argv, false, NULL);
3403 		if (err < 0) {
3404 			fprintf(trace->output, "Couldn't run the workload!\n");
3405 			goto out_delete_evlist;
3406 		}
3407 	}
3408 
3409 	err = evlist__open(evlist);
3410 	if (err < 0)
3411 		goto out_error_open;
3412 
3413 	err = bpf__apply_obj_config();
3414 	if (err) {
3415 		char errbuf[BUFSIZ];
3416 
3417 		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3418 		pr_err("ERROR: Apply config to BPF failed: %s\n",
3419 			 errbuf);
3420 		goto out_error_open;
3421 	}
3422 
3423 	err = trace__set_filter_pids(trace);
3424 	if (err < 0)
3425 		goto out_error_mem;
3426 
3427 	if (trace->syscalls.map)
3428 		trace__init_syscalls_bpf_map(trace);
3429 
3430 	if (trace->syscalls.prog_array.sys_enter)
3431 		trace__init_syscalls_bpf_prog_array_maps(trace);
3432 
3433 	if (trace->ev_qualifier_ids.nr > 0) {
3434 		err = trace__set_ev_qualifier_filter(trace);
3435 		if (err < 0)
3436 			goto out_errno;
3437 
3438 		if (trace->syscalls.events.sys_exit) {
3439 			pr_debug("event qualifier tracepoint filter: %s\n",
3440 				 trace->syscalls.events.sys_exit->filter);
3441 		}
3442 	}
3443 
3444 	/*
3445 	 * If the "close" syscall is not traced, then we will not have the
3446 	 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3447 	 * fd->pathname table and were ending up showing the last value set by
3448 	 * syscalls opening a pathname and associating it with a descriptor or
3449 	 * reading it from /proc/pid/fd/ in cases where that doesn't make
3450 	 * sense.
3451 	 *
3452 	 *  So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3453 	 *  not in use.
3454 	 */
3455 	trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3456 
3457 	err = perf_evlist__apply_filters(evlist, &evsel);
3458 	if (err < 0)
3459 		goto out_error_apply_filters;
3460 
3461 	if (trace->dump.map)
3462 		bpf_map__fprintf(trace->dump.map, trace->output);
3463 
3464 	err = evlist__mmap(evlist, trace->opts.mmap_pages);
3465 	if (err < 0)
3466 		goto out_error_mmap;
3467 
3468 	if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
3469 		evlist__enable(evlist);
3470 
3471 	if (forks)
3472 		perf_evlist__start_workload(evlist);
3473 
3474 	if (trace->opts.initial_delay) {
3475 		usleep(trace->opts.initial_delay * 1000);
3476 		evlist__enable(evlist);
3477 	}
3478 
3479 	trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3480 				  evlist->core.threads->nr > 1 ||
3481 				  evlist__first(evlist)->core.attr.inherit;
3482 
3483 	/*
3484 	 * Now that we already used evsel->core.attr to ask the kernel to setup the
3485 	 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3486 	 * trace__resolve_callchain(), allowing per-event max-stack settings
3487 	 * to override an explicitly set --max-stack global setting.
3488 	 */
3489 	evlist__for_each_entry(evlist, evsel) {
3490 		if (evsel__has_callchain(evsel) &&
3491 		    evsel->core.attr.sample_max_stack == 0)
3492 			evsel->core.attr.sample_max_stack = trace->max_stack;
3493 	}
3494 again:
3495 	before = trace->nr_events;
3496 
3497 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
3498 		union perf_event *event;
3499 		struct mmap *md;
3500 
3501 		md = &evlist->mmap[i];
3502 		if (perf_mmap__read_init(md) < 0)
3503 			continue;
3504 
3505 		while ((event = perf_mmap__read_event(md)) != NULL) {
3506 			++trace->nr_events;
3507 
3508 			err = trace__deliver_event(trace, event);
3509 			if (err)
3510 				goto out_disable;
3511 
3512 			perf_mmap__consume(md);
3513 
3514 			if (interrupted)
3515 				goto out_disable;
3516 
3517 			if (done && !draining) {
3518 				evlist__disable(evlist);
3519 				draining = true;
3520 			}
3521 		}
3522 		perf_mmap__read_done(md);
3523 	}
3524 
3525 	if (trace->nr_events == before) {
3526 		int timeout = done ? 100 : -1;
3527 
3528 		if (!draining && evlist__poll(evlist, timeout) > 0) {
3529 			if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
3530 				draining = true;
3531 
3532 			goto again;
3533 		} else {
3534 			if (trace__flush_events(trace))
3535 				goto out_disable;
3536 		}
3537 	} else {
3538 		goto again;
3539 	}
3540 
3541 out_disable:
3542 	thread__zput(trace->current);
3543 
3544 	evlist__disable(evlist);
3545 
3546 	if (trace->sort_events)
3547 		ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
3548 
3549 	if (!err) {
3550 		if (trace->summary)
3551 			trace__fprintf_thread_summary(trace, trace->output);
3552 
3553 		if (trace->show_tool_stats) {
3554 			fprintf(trace->output, "Stats:\n "
3555 					       " vfs_getname : %" PRIu64 "\n"
3556 					       " proc_getname: %" PRIu64 "\n",
3557 				trace->stats.vfs_getname,
3558 				trace->stats.proc_getname);
3559 		}
3560 	}
3561 
3562 out_delete_evlist:
3563 	trace__symbols__exit(trace);
3564 
3565 	evlist__delete(evlist);
3566 	cgroup__put(trace->cgroup);
3567 	trace->evlist = NULL;
3568 	trace->live = false;
3569 	return err;
3570 {
3571 	char errbuf[BUFSIZ];
3572 
3573 out_error_sched_stat_runtime:
3574 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
3575 	goto out_error;
3576 
3577 out_error_raw_syscalls:
3578 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
3579 	goto out_error;
3580 
3581 out_error_mmap:
3582 	perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
3583 	goto out_error;
3584 
3585 out_error_open:
3586 	perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
3587 
3588 out_error:
3589 	fprintf(trace->output, "%s\n", errbuf);
3590 	goto out_delete_evlist;
3591 
3592 out_error_apply_filters:
3593 	fprintf(trace->output,
3594 		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
3595 		evsel->filter, perf_evsel__name(evsel), errno,
3596 		str_error_r(errno, errbuf, sizeof(errbuf)));
3597 	goto out_delete_evlist;
3598 }
3599 out_error_mem:
3600 	fprintf(trace->output, "Not enough memory to run!\n");
3601 	goto out_delete_evlist;
3602 
3603 out_errno:
3604 	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
3605 	goto out_delete_evlist;
3606 }
3607 
trace__replay(struct trace * trace)3608 static int trace__replay(struct trace *trace)
3609 {
3610 	const struct evsel_str_handler handlers[] = {
3611 		{ "probe:vfs_getname",	     trace__vfs_getname, },
3612 	};
3613 	struct perf_data data = {
3614 		.path  = input_name,
3615 		.mode  = PERF_DATA_MODE_READ,
3616 		.force = trace->force,
3617 	};
3618 	struct perf_session *session;
3619 	struct evsel *evsel;
3620 	int err = -1;
3621 
3622 	trace->tool.sample	  = trace__process_sample;
3623 	trace->tool.mmap	  = perf_event__process_mmap;
3624 	trace->tool.mmap2	  = perf_event__process_mmap2;
3625 	trace->tool.comm	  = perf_event__process_comm;
3626 	trace->tool.exit	  = perf_event__process_exit;
3627 	trace->tool.fork	  = perf_event__process_fork;
3628 	trace->tool.attr	  = perf_event__process_attr;
3629 	trace->tool.tracing_data  = perf_event__process_tracing_data;
3630 	trace->tool.build_id	  = perf_event__process_build_id;
3631 	trace->tool.namespaces	  = perf_event__process_namespaces;
3632 
3633 	trace->tool.ordered_events = true;
3634 	trace->tool.ordering_requires_timestamps = true;
3635 
3636 	/* add tid to output */
3637 	trace->multiple_threads = true;
3638 
3639 	session = perf_session__new(&data, false, &trace->tool);
3640 	if (IS_ERR(session))
3641 		return PTR_ERR(session);
3642 
3643 	if (trace->opts.target.pid)
3644 		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
3645 
3646 	if (trace->opts.target.tid)
3647 		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
3648 
3649 	if (symbol__init(&session->header.env) < 0)
3650 		goto out;
3651 
3652 	trace->host = &session->machines.host;
3653 
3654 	err = perf_session__set_tracepoints_handlers(session, handlers);
3655 	if (err)
3656 		goto out;
3657 
3658 	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3659 						     "raw_syscalls:sys_enter");
3660 	/* older kernels have syscalls tp versus raw_syscalls */
3661 	if (evsel == NULL)
3662 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3663 							     "syscalls:sys_enter");
3664 
3665 	if (evsel &&
3666 	    (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
3667 	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
3668 		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3669 		goto out;
3670 	}
3671 
3672 	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3673 						     "raw_syscalls:sys_exit");
3674 	if (evsel == NULL)
3675 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3676 							     "syscalls:sys_exit");
3677 	if (evsel &&
3678 	    (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
3679 	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
3680 		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3681 		goto out;
3682 	}
3683 
3684 	evlist__for_each_entry(session->evlist, evsel) {
3685 		if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
3686 		    (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
3687 		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
3688 		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
3689 			evsel->handler = trace__pgfault;
3690 	}
3691 
3692 	setup_pager();
3693 
3694 	err = perf_session__process_events(session);
3695 	if (err)
3696 		pr_err("Failed to process events, error %d", err);
3697 
3698 	else if (trace->summary)
3699 		trace__fprintf_thread_summary(trace, trace->output);
3700 
3701 out:
3702 	perf_session__delete(session);
3703 
3704 	return err;
3705 }
3706 
trace__fprintf_threads_header(FILE * fp)3707 static size_t trace__fprintf_threads_header(FILE *fp)
3708 {
3709 	size_t printed;
3710 
3711 	printed  = fprintf(fp, "\n Summary of events:\n\n");
3712 
3713 	return printed;
3714 }
3715 
3716 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
3717 	struct stats 	*stats;
3718 	double		msecs;
3719 	int		syscall;
3720 )
3721 {
3722 	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
3723 	struct stats *stats = source->priv;
3724 
3725 	entry->syscall = source->i;
3726 	entry->stats   = stats;
3727 	entry->msecs   = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
3728 }
3729 
thread__dump_stats(struct thread_trace * ttrace,struct trace * trace,FILE * fp)3730 static size_t thread__dump_stats(struct thread_trace *ttrace,
3731 				 struct trace *trace, FILE *fp)
3732 {
3733 	size_t printed = 0;
3734 	struct syscall *sc;
3735 	struct rb_node *nd;
3736 	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
3737 
3738 	if (syscall_stats == NULL)
3739 		return 0;
3740 
3741 	printed += fprintf(fp, "\n");
3742 
3743 	printed += fprintf(fp, "   syscall            calls    total       min       avg       max      stddev\n");
3744 	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
3745 	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
3746 
3747 	resort_rb__for_each_entry(nd, syscall_stats) {
3748 		struct stats *stats = syscall_stats_entry->stats;
3749 		if (stats) {
3750 			double min = (double)(stats->min) / NSEC_PER_MSEC;
3751 			double max = (double)(stats->max) / NSEC_PER_MSEC;
3752 			double avg = avg_stats(stats);
3753 			double pct;
3754 			u64 n = (u64) stats->n;
3755 
3756 			pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
3757 			avg /= NSEC_PER_MSEC;
3758 
3759 			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
3760 			printed += fprintf(fp, "   %-15s", sc->name);
3761 			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
3762 					   n, syscall_stats_entry->msecs, min, avg);
3763 			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
3764 		}
3765 	}
3766 
3767 	resort_rb__delete(syscall_stats);
3768 	printed += fprintf(fp, "\n\n");
3769 
3770 	return printed;
3771 }
3772 
trace__fprintf_thread(FILE * fp,struct thread * thread,struct trace * trace)3773 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
3774 {
3775 	size_t printed = 0;
3776 	struct thread_trace *ttrace = thread__priv(thread);
3777 	double ratio;
3778 
3779 	if (ttrace == NULL)
3780 		return 0;
3781 
3782 	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
3783 
3784 	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
3785 	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
3786 	printed += fprintf(fp, "%.1f%%", ratio);
3787 	if (ttrace->pfmaj)
3788 		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
3789 	if (ttrace->pfmin)
3790 		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
3791 	if (trace->sched)
3792 		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
3793 	else if (fputc('\n', fp) != EOF)
3794 		++printed;
3795 
3796 	printed += thread__dump_stats(ttrace, trace, fp);
3797 
3798 	return printed;
3799 }
3800 
thread__nr_events(struct thread_trace * ttrace)3801 static unsigned long thread__nr_events(struct thread_trace *ttrace)
3802 {
3803 	return ttrace ? ttrace->nr_events : 0;
3804 }
3805 
3806 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
3807 	struct thread *thread;
3808 )
3809 {
3810 	entry->thread = rb_entry(nd, struct thread, rb_node);
3811 }
3812 
trace__fprintf_thread_summary(struct trace * trace,FILE * fp)3813 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
3814 {
3815 	size_t printed = trace__fprintf_threads_header(fp);
3816 	struct rb_node *nd;
3817 	int i;
3818 
3819 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3820 		DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
3821 
3822 		if (threads == NULL) {
3823 			fprintf(fp, "%s", "Error sorting output by nr_events!\n");
3824 			return 0;
3825 		}
3826 
3827 		resort_rb__for_each_entry(nd, threads)
3828 			printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
3829 
3830 		resort_rb__delete(threads);
3831 	}
3832 	return printed;
3833 }
3834 
trace__set_duration(const struct option * opt,const char * str,int unset __maybe_unused)3835 static int trace__set_duration(const struct option *opt, const char *str,
3836 			       int unset __maybe_unused)
3837 {
3838 	struct trace *trace = opt->value;
3839 
3840 	trace->duration_filter = atof(str);
3841 	return 0;
3842 }
3843 
trace__set_filter_pids_from_option(const struct option * opt,const char * str,int unset __maybe_unused)3844 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
3845 					      int unset __maybe_unused)
3846 {
3847 	int ret = -1;
3848 	size_t i;
3849 	struct trace *trace = opt->value;
3850 	/*
3851 	 * FIXME: introduce a intarray class, plain parse csv and create a
3852 	 * { int nr, int entries[] } struct...
3853 	 */
3854 	struct intlist *list = intlist__new(str);
3855 
3856 	if (list == NULL)
3857 		return -1;
3858 
3859 	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3860 	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3861 
3862 	if (trace->filter_pids.entries == NULL)
3863 		goto out;
3864 
3865 	trace->filter_pids.entries[0] = getpid();
3866 
3867 	for (i = 1; i < trace->filter_pids.nr; ++i)
3868 		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3869 
3870 	intlist__delete(list);
3871 	ret = 0;
3872 out:
3873 	return ret;
3874 }
3875 
trace__open_output(struct trace * trace,const char * filename)3876 static int trace__open_output(struct trace *trace, const char *filename)
3877 {
3878 	struct stat st;
3879 
3880 	if (!stat(filename, &st) && st.st_size) {
3881 		char oldname[PATH_MAX];
3882 
3883 		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3884 		unlink(oldname);
3885 		rename(filename, oldname);
3886 	}
3887 
3888 	trace->output = fopen(filename, "w");
3889 
3890 	return trace->output == NULL ? -errno : 0;
3891 }
3892 
parse_pagefaults(const struct option * opt,const char * str,int unset __maybe_unused)3893 static int parse_pagefaults(const struct option *opt, const char *str,
3894 			    int unset __maybe_unused)
3895 {
3896 	int *trace_pgfaults = opt->value;
3897 
3898 	if (strcmp(str, "all") == 0)
3899 		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3900 	else if (strcmp(str, "maj") == 0)
3901 		*trace_pgfaults |= TRACE_PFMAJ;
3902 	else if (strcmp(str, "min") == 0)
3903 		*trace_pgfaults |= TRACE_PFMIN;
3904 	else
3905 		return -1;
3906 
3907 	return 0;
3908 }
3909 
evlist__set_evsel_handler(struct evlist * evlist,void * handler)3910 static void evlist__set_evsel_handler(struct evlist *evlist, void *handler)
3911 {
3912 	struct evsel *evsel;
3913 
3914 	evlist__for_each_entry(evlist, evsel)
3915 		evsel->handler = handler;
3916 }
3917 
evlist__set_syscall_tp_fields(struct evlist * evlist)3918 static int evlist__set_syscall_tp_fields(struct evlist *evlist)
3919 {
3920 	struct evsel *evsel;
3921 
3922 	evlist__for_each_entry(evlist, evsel) {
3923 		if (evsel->priv || !evsel->tp_format)
3924 			continue;
3925 
3926 		if (strcmp(evsel->tp_format->system, "syscalls"))
3927 			continue;
3928 
3929 		if (perf_evsel__init_syscall_tp(evsel))
3930 			return -1;
3931 
3932 		if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
3933 			struct syscall_tp *sc = evsel->priv;
3934 
3935 			if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
3936 				return -1;
3937 		} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
3938 			struct syscall_tp *sc = evsel->priv;
3939 
3940 			if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
3941 				return -1;
3942 		}
3943 	}
3944 
3945 	return 0;
3946 }
3947 
3948 /*
3949  * XXX: Hackish, just splitting the combined -e+--event (syscalls
3950  * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3951  * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3952  *
3953  * It'd be better to introduce a parse_options() variant that would return a
3954  * list with the terms it didn't match to an event...
3955  */
trace__parse_events_option(const struct option * opt,const char * str,int unset __maybe_unused)3956 static int trace__parse_events_option(const struct option *opt, const char *str,
3957 				      int unset __maybe_unused)
3958 {
3959 	struct trace *trace = (struct trace *)opt->value;
3960 	const char *s = str;
3961 	char *sep = NULL, *lists[2] = { NULL, NULL, };
3962 	int len = strlen(str) + 1, err = -1, list, idx;
3963 	char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
3964 	char group_name[PATH_MAX];
3965 	struct syscall_fmt *fmt;
3966 
3967 	if (strace_groups_dir == NULL)
3968 		return -1;
3969 
3970 	if (*s == '!') {
3971 		++s;
3972 		trace->not_ev_qualifier = true;
3973 	}
3974 
3975 	while (1) {
3976 		if ((sep = strchr(s, ',')) != NULL)
3977 			*sep = '\0';
3978 
3979 		list = 0;
3980 		if (syscalltbl__id(trace->sctbl, s) >= 0 ||
3981 		    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
3982 			list = 1;
3983 			goto do_concat;
3984 		}
3985 
3986 		fmt = syscall_fmt__find_by_alias(s);
3987 		if (fmt != NULL) {
3988 			list = 1;
3989 			s = fmt->name;
3990 		} else {
3991 			path__join(group_name, sizeof(group_name), strace_groups_dir, s);
3992 			if (access(group_name, R_OK) == 0)
3993 				list = 1;
3994 		}
3995 do_concat:
3996 		if (lists[list]) {
3997 			sprintf(lists[list] + strlen(lists[list]), ",%s", s);
3998 		} else {
3999 			lists[list] = malloc(len);
4000 			if (lists[list] == NULL)
4001 				goto out;
4002 			strcpy(lists[list], s);
4003 		}
4004 
4005 		if (!sep)
4006 			break;
4007 
4008 		*sep = ',';
4009 		s = sep + 1;
4010 	}
4011 
4012 	if (lists[1] != NULL) {
4013 		struct strlist_config slist_config = {
4014 			.dirname = strace_groups_dir,
4015 		};
4016 
4017 		trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4018 		if (trace->ev_qualifier == NULL) {
4019 			fputs("Not enough memory to parse event qualifier", trace->output);
4020 			goto out;
4021 		}
4022 
4023 		if (trace__validate_ev_qualifier(trace))
4024 			goto out;
4025 		trace->trace_syscalls = true;
4026 	}
4027 
4028 	err = 0;
4029 
4030 	if (lists[0]) {
4031 		struct option o = {
4032 			.value = &trace->evlist,
4033 		};
4034 		err = parse_events_option(&o, lists[0], 0);
4035 	}
4036 out:
4037 	if (sep)
4038 		*sep = ',';
4039 
4040 	return err;
4041 }
4042 
trace__parse_cgroups(const struct option * opt,const char * str,int unset)4043 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4044 {
4045 	struct trace *trace = opt->value;
4046 
4047 	if (!list_empty(&trace->evlist->core.entries)) {
4048 		struct option o = {
4049 			.value = &trace->evlist,
4050 		};
4051 		return parse_cgroups(&o, str, unset);
4052 	}
4053 	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4054 
4055 	return 0;
4056 }
4057 
trace__find_bpf_map_by_name(struct trace * trace,const char * name)4058 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
4059 {
4060 	if (trace->bpf_obj == NULL)
4061 		return NULL;
4062 
4063 	return bpf_object__find_map_by_name(trace->bpf_obj, name);
4064 }
4065 
trace__set_bpf_map_filtered_pids(struct trace * trace)4066 static void trace__set_bpf_map_filtered_pids(struct trace *trace)
4067 {
4068 	trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
4069 }
4070 
trace__set_bpf_map_syscalls(struct trace * trace)4071 static void trace__set_bpf_map_syscalls(struct trace *trace)
4072 {
4073 	trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
4074 	trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
4075 	trace->syscalls.prog_array.sys_exit  = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
4076 }
4077 
trace__config(const char * var,const char * value,void * arg)4078 static int trace__config(const char *var, const char *value, void *arg)
4079 {
4080 	struct trace *trace = arg;
4081 	int err = 0;
4082 
4083 	if (!strcmp(var, "trace.add_events")) {
4084 		struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
4085 					       "event selector. use 'perf list' to list available events",
4086 					       parse_events_option);
4087 		/*
4088 		 * We can't propagate parse_event_option() return, as it is 1
4089 		 * for failure while perf_config() expects -1.
4090 		 */
4091 		if (parse_events_option(&o, value, 0))
4092 			err = -1;
4093 	} else if (!strcmp(var, "trace.show_timestamp")) {
4094 		trace->show_tstamp = perf_config_bool(var, value);
4095 	} else if (!strcmp(var, "trace.show_duration")) {
4096 		trace->show_duration = perf_config_bool(var, value);
4097 	} else if (!strcmp(var, "trace.show_arg_names")) {
4098 		trace->show_arg_names = perf_config_bool(var, value);
4099 		if (!trace->show_arg_names)
4100 			trace->show_zeros = true;
4101 	} else if (!strcmp(var, "trace.show_zeros")) {
4102 		bool new_show_zeros = perf_config_bool(var, value);
4103 		if (!trace->show_arg_names && !new_show_zeros) {
4104 			pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4105 			goto out;
4106 		}
4107 		trace->show_zeros = new_show_zeros;
4108 	} else if (!strcmp(var, "trace.show_prefix")) {
4109 		trace->show_string_prefix = perf_config_bool(var, value);
4110 	} else if (!strcmp(var, "trace.no_inherit")) {
4111 		trace->opts.no_inherit = perf_config_bool(var, value);
4112 	} else if (!strcmp(var, "trace.args_alignment")) {
4113 		int args_alignment = 0;
4114 		if (perf_config_int(&args_alignment, var, value) == 0)
4115 			trace->args_alignment = args_alignment;
4116 	}
4117 out:
4118 	return err;
4119 }
4120 
cmd_trace(int argc,const char ** argv)4121 int cmd_trace(int argc, const char **argv)
4122 {
4123 	const char *trace_usage[] = {
4124 		"perf trace [<options>] [<command>]",
4125 		"perf trace [<options>] -- <command> [<options>]",
4126 		"perf trace record [<options>] [<command>]",
4127 		"perf trace record [<options>] -- <command> [<options>]",
4128 		NULL
4129 	};
4130 	struct trace trace = {
4131 		.opts = {
4132 			.target = {
4133 				.uid	   = UINT_MAX,
4134 				.uses_mmap = true,
4135 			},
4136 			.user_freq     = UINT_MAX,
4137 			.user_interval = ULLONG_MAX,
4138 			.no_buffering  = true,
4139 			.mmap_pages    = UINT_MAX,
4140 		},
4141 		.output = stderr,
4142 		.show_comm = true,
4143 		.show_tstamp = true,
4144 		.show_duration = true,
4145 		.show_arg_names = true,
4146 		.args_alignment = 70,
4147 		.trace_syscalls = false,
4148 		.kernel_syscallchains = false,
4149 		.max_stack = UINT_MAX,
4150 		.max_events = ULONG_MAX,
4151 	};
4152 	const char *map_dump_str = NULL;
4153 	const char *output_name = NULL;
4154 	const struct option trace_options[] = {
4155 	OPT_CALLBACK('e', "event", &trace, "event",
4156 		     "event/syscall selector. use 'perf list' to list available events",
4157 		     trace__parse_events_option),
4158 	OPT_BOOLEAN(0, "comm", &trace.show_comm,
4159 		    "show the thread COMM next to its id"),
4160 	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4161 	OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4162 		     trace__parse_events_option),
4163 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
4164 	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4165 	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4166 		    "trace events on existing process id"),
4167 	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4168 		    "trace events on existing thread id"),
4169 	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4170 		     "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4171 	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4172 		    "system-wide collection from all CPUs"),
4173 	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4174 		    "list of cpus to monitor"),
4175 	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4176 		    "child tasks do not inherit counters"),
4177 	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4178 		     "number of mmap data pages",
4179 		     perf_evlist__parse_mmap_pages),
4180 	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4181 		   "user to profile"),
4182 	OPT_CALLBACK(0, "duration", &trace, "float",
4183 		     "show only events with duration > N.M ms",
4184 		     trace__set_duration),
4185 #ifdef HAVE_LIBBPF_SUPPORT
4186 	OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4187 #endif
4188 	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4189 	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4190 	OPT_BOOLEAN('T', "time", &trace.full_time,
4191 		    "Show full timestamp, not time relative to first start"),
4192 	OPT_BOOLEAN(0, "failure", &trace.failure_only,
4193 		    "Show only syscalls that failed"),
4194 	OPT_BOOLEAN('s', "summary", &trace.summary_only,
4195 		    "Show only syscall summary with statistics"),
4196 	OPT_BOOLEAN('S', "with-summary", &trace.summary,
4197 		    "Show all syscalls and summary with statistics"),
4198 	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4199 		     "Trace pagefaults", parse_pagefaults, "maj"),
4200 	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4201 	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4202 	OPT_CALLBACK(0, "call-graph", &trace.opts,
4203 		     "record_mode[,record_size]", record_callchain_help,
4204 		     &record_parse_callchain_opt),
4205 	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4206 		    "Show the kernel callchains on the syscall exit path"),
4207 	OPT_ULONG(0, "max-events", &trace.max_events,
4208 		"Set the maximum number of events to print, exit after that is reached. "),
4209 	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4210 		     "Set the minimum stack depth when parsing the callchain, "
4211 		     "anything below the specified depth will be ignored."),
4212 	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4213 		     "Set the maximum stack depth when parsing the callchain, "
4214 		     "anything beyond the specified depth will be ignored. "
4215 		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4216 	OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4217 			"Sort batch of events before processing, use if getting out of order events"),
4218 	OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4219 			"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4220 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4221 			"per thread proc mmap processing timeout in ms"),
4222 	OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4223 		     trace__parse_cgroups),
4224 	OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
4225 		     "ms to wait before starting measurement after program "
4226 		     "start"),
4227 	OPTS_EVSWITCH(&trace.evswitch),
4228 	OPT_END()
4229 	};
4230 	bool __maybe_unused max_stack_user_set = true;
4231 	bool mmap_pages_user_set = true;
4232 	struct evsel *evsel;
4233 	const char * const trace_subcommands[] = { "record", NULL };
4234 	int err = -1;
4235 	char bf[BUFSIZ];
4236 
4237 	signal(SIGSEGV, sighandler_dump_stack);
4238 	signal(SIGFPE, sighandler_dump_stack);
4239 
4240 	trace.evlist = evlist__new();
4241 	trace.sctbl = syscalltbl__new();
4242 
4243 	if (trace.evlist == NULL || trace.sctbl == NULL) {
4244 		pr_err("Not enough memory to run!\n");
4245 		err = -ENOMEM;
4246 		goto out;
4247 	}
4248 
4249 	/*
4250 	 * Parsing .perfconfig may entail creating a BPF event, that may need
4251 	 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4252 	 * is too small. This affects just this process, not touching the
4253 	 * global setting. If it fails we'll get something in 'perf trace -v'
4254 	 * to help diagnose the problem.
4255 	 */
4256 	rlimit__bump_memlock();
4257 
4258 	err = perf_config(trace__config, &trace);
4259 	if (err)
4260 		goto out;
4261 
4262 	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4263 				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4264 
4265 	if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4266 		usage_with_options_msg(trace_usage, trace_options,
4267 				       "cgroup monitoring only available in system-wide mode");
4268 	}
4269 
4270 	evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4271 	if (IS_ERR(evsel)) {
4272 		bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4273 		pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4274 		goto out;
4275 	}
4276 
4277 	if (evsel) {
4278 		trace.syscalls.events.augmented = evsel;
4279 
4280 		evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4281 		if (evsel == NULL) {
4282 			pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4283 			goto out;
4284 		}
4285 
4286 		if (evsel->bpf_obj == NULL) {
4287 			pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4288 			goto out;
4289 		}
4290 
4291 		trace.bpf_obj = evsel->bpf_obj;
4292 
4293 		trace__set_bpf_map_filtered_pids(&trace);
4294 		trace__set_bpf_map_syscalls(&trace);
4295 		trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4296 	}
4297 
4298 	err = bpf__setup_stdout(trace.evlist);
4299 	if (err) {
4300 		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4301 		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4302 		goto out;
4303 	}
4304 
4305 	err = -1;
4306 
4307 	if (map_dump_str) {
4308 		trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4309 		if (trace.dump.map == NULL) {
4310 			pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4311 			goto out;
4312 		}
4313 	}
4314 
4315 	if (trace.trace_pgfaults) {
4316 		trace.opts.sample_address = true;
4317 		trace.opts.sample_time = true;
4318 	}
4319 
4320 	if (trace.opts.mmap_pages == UINT_MAX)
4321 		mmap_pages_user_set = false;
4322 
4323 	if (trace.max_stack == UINT_MAX) {
4324 		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4325 		max_stack_user_set = false;
4326 	}
4327 
4328 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4329 	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4330 		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4331 	}
4332 #endif
4333 
4334 	if (callchain_param.enabled) {
4335 		if (!mmap_pages_user_set && geteuid() == 0)
4336 			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4337 
4338 		symbol_conf.use_callchain = true;
4339 	}
4340 
4341 	if (trace.evlist->core.nr_entries > 0) {
4342 		evlist__set_evsel_handler(trace.evlist, trace__event_handler);
4343 		if (evlist__set_syscall_tp_fields(trace.evlist)) {
4344 			perror("failed to set syscalls:* tracepoint fields");
4345 			goto out;
4346 		}
4347 	}
4348 
4349 	if (trace.sort_events) {
4350 		ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4351 		ordered_events__set_copy_on_queue(&trace.oe.data, true);
4352 	}
4353 
4354 	/*
4355 	 * If we are augmenting syscalls, then combine what we put in the
4356 	 * __augmented_syscalls__ BPF map with what is in the
4357 	 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4358 	 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4359 	 *
4360 	 * We'll switch to look at two BPF maps, one for sys_enter and the
4361 	 * other for sys_exit when we start augmenting the sys_exit paths with
4362 	 * buffers that are being copied from kernel to userspace, think 'read'
4363 	 * syscall.
4364 	 */
4365 	if (trace.syscalls.events.augmented) {
4366 		evlist__for_each_entry(trace.evlist, evsel) {
4367 			bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
4368 
4369 			if (raw_syscalls_sys_exit) {
4370 				trace.raw_augmented_syscalls = true;
4371 				goto init_augmented_syscall_tp;
4372 			}
4373 
4374 			if (trace.syscalls.events.augmented->priv == NULL &&
4375 			    strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
4376 				struct evsel *augmented = trace.syscalls.events.augmented;
4377 				if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
4378 				    perf_evsel__init_augmented_syscall_tp_args(augmented))
4379 					goto out;
4380 				/*
4381 				 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4382 				 * Above we made sure we can get from the payload the tp fields
4383 				 * that we get from syscalls:sys_enter tracefs format file.
4384 				 */
4385 				augmented->handler = trace__sys_enter;
4386 				/*
4387 				 * Now we do the same for the *syscalls:sys_enter event so that
4388 				 * if we handle it directly, i.e. if the BPF prog returns 0 so
4389 				 * as not to filter it, then we'll handle it just like we would
4390 				 * for the BPF_OUTPUT one:
4391 				 */
4392 				if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) ||
4393 				    perf_evsel__init_augmented_syscall_tp_args(evsel))
4394 					goto out;
4395 				evsel->handler = trace__sys_enter;
4396 			}
4397 
4398 			if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
4399 				struct syscall_tp *sc;
4400 init_augmented_syscall_tp:
4401 				if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
4402 					goto out;
4403 				sc = evsel->priv;
4404 				/*
4405 				 * For now with BPF raw_augmented we hook into
4406 				 * raw_syscalls:sys_enter and there we get all
4407 				 * 6 syscall args plus the tracepoint common
4408 				 * fields and the syscall_nr (another long).
4409 				 * So we check if that is the case and if so
4410 				 * don't look after the sc->args_size but
4411 				 * always after the full raw_syscalls:sys_enter
4412 				 * payload, which is fixed.
4413 				 *
4414 				 * We'll revisit this later to pass
4415 				 * s->args_size to the BPF augmenter (now
4416 				 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4417 				 * so that it copies only what we need for each
4418 				 * syscall, like what happens when we use
4419 				 * syscalls:sys_enter_NAME, so that we reduce
4420 				 * the kernel/userspace traffic to just what is
4421 				 * needed for each syscall.
4422 				 */
4423 				if (trace.raw_augmented_syscalls)
4424 					trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
4425 				perf_evsel__init_augmented_syscall_tp_ret(evsel);
4426 				evsel->handler = trace__sys_exit;
4427 			}
4428 		}
4429 	}
4430 
4431 	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
4432 		return trace__record(&trace, argc-1, &argv[1]);
4433 
4434 	/* summary_only implies summary option, but don't overwrite summary if set */
4435 	if (trace.summary_only)
4436 		trace.summary = trace.summary_only;
4437 
4438 	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4439 	    trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4440 		trace.trace_syscalls = true;
4441 	}
4442 
4443 	if (output_name != NULL) {
4444 		err = trace__open_output(&trace, output_name);
4445 		if (err < 0) {
4446 			perror("failed to create output file");
4447 			goto out;
4448 		}
4449 	}
4450 
4451 	err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
4452 	if (err)
4453 		goto out_close;
4454 
4455 	err = target__validate(&trace.opts.target);
4456 	if (err) {
4457 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4458 		fprintf(trace.output, "%s", bf);
4459 		goto out_close;
4460 	}
4461 
4462 	err = target__parse_uid(&trace.opts.target);
4463 	if (err) {
4464 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4465 		fprintf(trace.output, "%s", bf);
4466 		goto out_close;
4467 	}
4468 
4469 	if (!argc && target__none(&trace.opts.target))
4470 		trace.opts.target.system_wide = true;
4471 
4472 	if (input_name)
4473 		err = trace__replay(&trace);
4474 	else
4475 		err = trace__run(&trace, argc, argv);
4476 
4477 out_close:
4478 	if (output_name != NULL)
4479 		fclose(trace.output);
4480 out:
4481 	return err;
4482 }
4483