1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * event tracer
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 * - Added format output of fields of the trace point.
8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 *
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27 #include <trace/syscall.h>
28
29 #include <asm/setup.h>
30
31 #include "trace_output.h"
32
33 #undef TRACE_SYSTEM
34 #define TRACE_SYSTEM "TRACE_SYSTEM"
35
36 DEFINE_MUTEX(event_mutex);
37
38 LIST_HEAD(ftrace_events);
39 static LIST_HEAD(ftrace_generic_fields);
40 static LIST_HEAD(ftrace_common_fields);
41 static bool eventdir_initialized;
42
43 static LIST_HEAD(module_strings);
44
45 struct module_string {
46 struct list_head next;
47 struct module *module;
48 char *str;
49 };
50
51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
52
53 static struct kmem_cache *field_cachep;
54 static struct kmem_cache *file_cachep;
55
system_refcount(struct event_subsystem * system)56 static inline int system_refcount(struct event_subsystem *system)
57 {
58 return system->ref_count;
59 }
60
system_refcount_inc(struct event_subsystem * system)61 static int system_refcount_inc(struct event_subsystem *system)
62 {
63 return system->ref_count++;
64 }
65
system_refcount_dec(struct event_subsystem * system)66 static int system_refcount_dec(struct event_subsystem *system)
67 {
68 return --system->ref_count;
69 }
70
71 /* Double loops, do not use break, only goto's work */
72 #define do_for_each_event_file(tr, file) \
73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
74 list_for_each_entry(file, &tr->events, list)
75
76 #define do_for_each_event_file_safe(tr, file) \
77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
78 struct trace_event_file *___n; \
79 list_for_each_entry_safe(file, ___n, &tr->events, list)
80
81 #define while_for_each_event_file() \
82 }
83
84 static struct ftrace_event_field *
__find_event_field(struct list_head * head,char * name)85 __find_event_field(struct list_head *head, char *name)
86 {
87 struct ftrace_event_field *field;
88
89 list_for_each_entry(field, head, link) {
90 if (!strcmp(field->name, name))
91 return field;
92 }
93
94 return NULL;
95 }
96
97 struct ftrace_event_field *
trace_find_event_field(struct trace_event_call * call,char * name)98 trace_find_event_field(struct trace_event_call *call, char *name)
99 {
100 struct ftrace_event_field *field;
101 struct list_head *head;
102
103 head = trace_get_fields(call);
104 field = __find_event_field(head, name);
105 if (field)
106 return field;
107
108 field = __find_event_field(&ftrace_generic_fields, name);
109 if (field)
110 return field;
111
112 return __find_event_field(&ftrace_common_fields, name);
113 }
114
__trace_define_field(struct list_head * head,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)115 static int __trace_define_field(struct list_head *head, const char *type,
116 const char *name, int offset, int size,
117 int is_signed, int filter_type)
118 {
119 struct ftrace_event_field *field;
120
121 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
122 if (!field)
123 return -ENOMEM;
124
125 field->name = name;
126 field->type = type;
127
128 if (filter_type == FILTER_OTHER)
129 field->filter_type = filter_assign_type(type);
130 else
131 field->filter_type = filter_type;
132
133 field->offset = offset;
134 field->size = size;
135 field->is_signed = is_signed;
136
137 list_add(&field->link, head);
138
139 return 0;
140 }
141
trace_define_field(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)142 int trace_define_field(struct trace_event_call *call, const char *type,
143 const char *name, int offset, int size, int is_signed,
144 int filter_type)
145 {
146 struct list_head *head;
147
148 if (WARN_ON(!call->class))
149 return 0;
150
151 head = trace_get_fields(call);
152 return __trace_define_field(head, type, name, offset, size,
153 is_signed, filter_type);
154 }
155 EXPORT_SYMBOL_GPL(trace_define_field);
156
157 #define __generic_field(type, item, filter_type) \
158 ret = __trace_define_field(&ftrace_generic_fields, #type, \
159 #item, 0, 0, is_signed_type(type), \
160 filter_type); \
161 if (ret) \
162 return ret;
163
164 #define __common_field(type, item) \
165 ret = __trace_define_field(&ftrace_common_fields, #type, \
166 "common_" #item, \
167 offsetof(typeof(ent), item), \
168 sizeof(ent.item), \
169 is_signed_type(type), FILTER_OTHER); \
170 if (ret) \
171 return ret;
172
trace_define_generic_fields(void)173 static int trace_define_generic_fields(void)
174 {
175 int ret;
176
177 __generic_field(int, CPU, FILTER_CPU);
178 __generic_field(int, cpu, FILTER_CPU);
179 __generic_field(int, common_cpu, FILTER_CPU);
180 __generic_field(char *, COMM, FILTER_COMM);
181 __generic_field(char *, comm, FILTER_COMM);
182
183 return ret;
184 }
185
trace_define_common_fields(void)186 static int trace_define_common_fields(void)
187 {
188 int ret;
189 struct trace_entry ent;
190
191 __common_field(unsigned short, type);
192 __common_field(unsigned char, flags);
193 /* Holds both preempt_count and migrate_disable */
194 __common_field(unsigned char, preempt_count);
195 __common_field(int, pid);
196
197 return ret;
198 }
199
trace_destroy_fields(struct trace_event_call * call)200 static void trace_destroy_fields(struct trace_event_call *call)
201 {
202 struct ftrace_event_field *field, *next;
203 struct list_head *head;
204
205 head = trace_get_fields(call);
206 list_for_each_entry_safe(field, next, head, link) {
207 list_del(&field->link);
208 kmem_cache_free(field_cachep, field);
209 }
210 }
211
212 /*
213 * run-time version of trace_event_get_offsets_<call>() that returns the last
214 * accessible offset of trace fields excluding __dynamic_array bytes
215 */
trace_event_get_offsets(struct trace_event_call * call)216 int trace_event_get_offsets(struct trace_event_call *call)
217 {
218 struct ftrace_event_field *tail;
219 struct list_head *head;
220
221 head = trace_get_fields(call);
222 /*
223 * head->next points to the last field with the largest offset,
224 * since it was added last by trace_define_field()
225 */
226 tail = list_first_entry(head, struct ftrace_event_field, link);
227 return tail->offset + tail->size;
228 }
229
230 /*
231 * Check if the referenced field is an array and return true,
232 * as arrays are OK to dereference.
233 */
test_field(const char * fmt,struct trace_event_call * call)234 static bool test_field(const char *fmt, struct trace_event_call *call)
235 {
236 struct trace_event_fields *field = call->class->fields_array;
237 const char *array_descriptor;
238 const char *p = fmt;
239 int len;
240
241 if (!(len = str_has_prefix(fmt, "REC->")))
242 return false;
243 fmt += len;
244 for (p = fmt; *p; p++) {
245 if (!isalnum(*p) && *p != '_')
246 break;
247 }
248 len = p - fmt;
249
250 for (; field->type; field++) {
251 if (strncmp(field->name, fmt, len) ||
252 field->name[len])
253 continue;
254 array_descriptor = strchr(field->type, '[');
255 /* This is an array and is OK to dereference. */
256 return array_descriptor != NULL;
257 }
258 return false;
259 }
260
261 /*
262 * Examine the print fmt of the event looking for unsafe dereference
263 * pointers using %p* that could be recorded in the trace event and
264 * much later referenced after the pointer was freed. Dereferencing
265 * pointers are OK, if it is dereferenced into the event itself.
266 */
test_event_printk(struct trace_event_call * call)267 static void test_event_printk(struct trace_event_call *call)
268 {
269 u64 dereference_flags = 0;
270 bool first = true;
271 const char *fmt, *c, *r, *a;
272 int parens = 0;
273 char in_quote = 0;
274 int start_arg = 0;
275 int arg = 0;
276 int i;
277
278 fmt = call->print_fmt;
279
280 if (!fmt)
281 return;
282
283 for (i = 0; fmt[i]; i++) {
284 switch (fmt[i]) {
285 case '\\':
286 i++;
287 if (!fmt[i])
288 return;
289 continue;
290 case '"':
291 case '\'':
292 /*
293 * The print fmt starts with a string that
294 * is processed first to find %p* usage,
295 * then after the first string, the print fmt
296 * contains arguments that are used to check
297 * if the dereferenced %p* usage is safe.
298 */
299 if (first) {
300 if (fmt[i] == '\'')
301 continue;
302 if (in_quote) {
303 arg = 0;
304 first = false;
305 /*
306 * If there was no %p* uses
307 * the fmt is OK.
308 */
309 if (!dereference_flags)
310 return;
311 }
312 }
313 if (in_quote) {
314 if (in_quote == fmt[i])
315 in_quote = 0;
316 } else {
317 in_quote = fmt[i];
318 }
319 continue;
320 case '%':
321 if (!first || !in_quote)
322 continue;
323 i++;
324 if (!fmt[i])
325 return;
326 switch (fmt[i]) {
327 case '%':
328 continue;
329 case 'p':
330 /* Find dereferencing fields */
331 switch (fmt[i + 1]) {
332 case 'B': case 'R': case 'r':
333 case 'b': case 'M': case 'm':
334 case 'I': case 'i': case 'E':
335 case 'U': case 'V': case 'N':
336 case 'a': case 'd': case 'D':
337 case 'g': case 't': case 'C':
338 case 'O': case 'f':
339 if (WARN_ONCE(arg == 63,
340 "Too many args for event: %s",
341 trace_event_name(call)))
342 return;
343 dereference_flags |= 1ULL << arg;
344 }
345 break;
346 default:
347 {
348 bool star = false;
349 int j;
350
351 /* Increment arg if %*s exists. */
352 for (j = 0; fmt[i + j]; j++) {
353 if (isdigit(fmt[i + j]) ||
354 fmt[i + j] == '.')
355 continue;
356 if (fmt[i + j] == '*') {
357 star = true;
358 continue;
359 }
360 if ((fmt[i + j] == 's') && star)
361 arg++;
362 break;
363 }
364 break;
365 } /* default */
366
367 } /* switch */
368 arg++;
369 continue;
370 case '(':
371 if (in_quote)
372 continue;
373 parens++;
374 continue;
375 case ')':
376 if (in_quote)
377 continue;
378 parens--;
379 if (WARN_ONCE(parens < 0,
380 "Paren mismatch for event: %s\narg='%s'\n%*s",
381 trace_event_name(call),
382 fmt + start_arg,
383 (i - start_arg) + 5, "^"))
384 return;
385 continue;
386 case ',':
387 if (in_quote || parens)
388 continue;
389 i++;
390 while (isspace(fmt[i]))
391 i++;
392 start_arg = i;
393 if (!(dereference_flags & (1ULL << arg)))
394 goto next_arg;
395
396 /* Find the REC-> in the argument */
397 c = strchr(fmt + i, ',');
398 r = strstr(fmt + i, "REC->");
399 if (r && (!c || r < c)) {
400 /*
401 * Addresses of events on the buffer,
402 * or an array on the buffer is
403 * OK to dereference.
404 * There's ways to fool this, but
405 * this is to catch common mistakes,
406 * not malicious code.
407 */
408 a = strchr(fmt + i, '&');
409 if ((a && (a < r)) || test_field(r, call))
410 dereference_flags &= ~(1ULL << arg);
411 } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) &&
412 (!c || r < c)) {
413 dereference_flags &= ~(1ULL << arg);
414 } else if ((r = strstr(fmt + i, "__get_sockaddr(")) &&
415 (!c || r < c)) {
416 dereference_flags &= ~(1ULL << arg);
417 }
418
419 next_arg:
420 i--;
421 arg++;
422 }
423 }
424
425 /*
426 * If you triggered the below warning, the trace event reported
427 * uses an unsafe dereference pointer %p*. As the data stored
428 * at the trace event time may no longer exist when the trace
429 * event is printed, dereferencing to the original source is
430 * unsafe. The source of the dereference must be copied into the
431 * event itself, and the dereference must access the copy instead.
432 */
433 if (WARN_ON_ONCE(dereference_flags)) {
434 arg = 1;
435 while (!(dereference_flags & 1)) {
436 dereference_flags >>= 1;
437 arg++;
438 }
439 pr_warn("event %s has unsafe dereference of argument %d\n",
440 trace_event_name(call), arg);
441 pr_warn("print_fmt: %s\n", fmt);
442 }
443 }
444
trace_event_raw_init(struct trace_event_call * call)445 int trace_event_raw_init(struct trace_event_call *call)
446 {
447 int id;
448
449 id = register_trace_event(&call->event);
450 if (!id)
451 return -ENODEV;
452
453 test_event_printk(call);
454
455 return 0;
456 }
457 EXPORT_SYMBOL_GPL(trace_event_raw_init);
458
trace_event_ignore_this_pid(struct trace_event_file * trace_file)459 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
460 {
461 struct trace_array *tr = trace_file->tr;
462 struct trace_array_cpu *data;
463 struct trace_pid_list *no_pid_list;
464 struct trace_pid_list *pid_list;
465
466 pid_list = rcu_dereference_raw(tr->filtered_pids);
467 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
468
469 if (!pid_list && !no_pid_list)
470 return false;
471
472 data = this_cpu_ptr(tr->array_buffer.data);
473
474 return data->ignore_pid;
475 }
476 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
477
trace_event_buffer_reserve(struct trace_event_buffer * fbuffer,struct trace_event_file * trace_file,unsigned long len)478 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
479 struct trace_event_file *trace_file,
480 unsigned long len)
481 {
482 struct trace_event_call *event_call = trace_file->event_call;
483
484 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
485 trace_event_ignore_this_pid(trace_file))
486 return NULL;
487
488 /*
489 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
490 * preemption (adding one to the preempt_count). Since we are
491 * interested in the preempt_count at the time the tracepoint was
492 * hit, we need to subtract one to offset the increment.
493 */
494 fbuffer->trace_ctx = tracing_gen_ctx_dec();
495 fbuffer->trace_file = trace_file;
496
497 fbuffer->event =
498 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
499 event_call->event.type, len,
500 fbuffer->trace_ctx);
501 if (!fbuffer->event)
502 return NULL;
503
504 fbuffer->regs = NULL;
505 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
506 return fbuffer->entry;
507 }
508 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
509
trace_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)510 int trace_event_reg(struct trace_event_call *call,
511 enum trace_reg type, void *data)
512 {
513 struct trace_event_file *file = data;
514
515 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
516 switch (type) {
517 case TRACE_REG_REGISTER:
518 return tracepoint_probe_register(call->tp,
519 call->class->probe,
520 file);
521 case TRACE_REG_UNREGISTER:
522 tracepoint_probe_unregister(call->tp,
523 call->class->probe,
524 file);
525 return 0;
526
527 #ifdef CONFIG_PERF_EVENTS
528 case TRACE_REG_PERF_REGISTER:
529 return tracepoint_probe_register(call->tp,
530 call->class->perf_probe,
531 call);
532 case TRACE_REG_PERF_UNREGISTER:
533 tracepoint_probe_unregister(call->tp,
534 call->class->perf_probe,
535 call);
536 return 0;
537 case TRACE_REG_PERF_OPEN:
538 case TRACE_REG_PERF_CLOSE:
539 case TRACE_REG_PERF_ADD:
540 case TRACE_REG_PERF_DEL:
541 return 0;
542 #endif
543 }
544 return 0;
545 }
546 EXPORT_SYMBOL_GPL(trace_event_reg);
547
trace_event_enable_cmd_record(bool enable)548 void trace_event_enable_cmd_record(bool enable)
549 {
550 struct trace_event_file *file;
551 struct trace_array *tr;
552
553 lockdep_assert_held(&event_mutex);
554
555 do_for_each_event_file(tr, file) {
556
557 if (!(file->flags & EVENT_FILE_FL_ENABLED))
558 continue;
559
560 if (enable) {
561 tracing_start_cmdline_record();
562 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
563 } else {
564 tracing_stop_cmdline_record();
565 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
566 }
567 } while_for_each_event_file();
568 }
569
trace_event_enable_tgid_record(bool enable)570 void trace_event_enable_tgid_record(bool enable)
571 {
572 struct trace_event_file *file;
573 struct trace_array *tr;
574
575 lockdep_assert_held(&event_mutex);
576
577 do_for_each_event_file(tr, file) {
578 if (!(file->flags & EVENT_FILE_FL_ENABLED))
579 continue;
580
581 if (enable) {
582 tracing_start_tgid_record();
583 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
584 } else {
585 tracing_stop_tgid_record();
586 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
587 &file->flags);
588 }
589 } while_for_each_event_file();
590 }
591
__ftrace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)592 static int __ftrace_event_enable_disable(struct trace_event_file *file,
593 int enable, int soft_disable)
594 {
595 struct trace_event_call *call = file->event_call;
596 struct trace_array *tr = file->tr;
597 int ret = 0;
598 int disable;
599
600 switch (enable) {
601 case 0:
602 /*
603 * When soft_disable is set and enable is cleared, the sm_ref
604 * reference counter is decremented. If it reaches 0, we want
605 * to clear the SOFT_DISABLED flag but leave the event in the
606 * state that it was. That is, if the event was enabled and
607 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
608 * is set we do not want the event to be enabled before we
609 * clear the bit.
610 *
611 * When soft_disable is not set but the SOFT_MODE flag is,
612 * we do nothing. Do not disable the tracepoint, otherwise
613 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
614 */
615 if (soft_disable) {
616 if (atomic_dec_return(&file->sm_ref) > 0)
617 break;
618 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
619 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
620 /* Disable use of trace_buffered_event */
621 trace_buffered_event_disable();
622 } else
623 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
624
625 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
626 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
627 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
628 tracing_stop_cmdline_record();
629 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
630 }
631
632 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
633 tracing_stop_tgid_record();
634 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
635 }
636
637 call->class->reg(call, TRACE_REG_UNREGISTER, file);
638 }
639 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
640 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
641 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
642 else
643 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
644 break;
645 case 1:
646 /*
647 * When soft_disable is set and enable is set, we want to
648 * register the tracepoint for the event, but leave the event
649 * as is. That means, if the event was already enabled, we do
650 * nothing (but set SOFT_MODE). If the event is disabled, we
651 * set SOFT_DISABLED before enabling the event tracepoint, so
652 * it still seems to be disabled.
653 */
654 if (!soft_disable)
655 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
656 else {
657 if (atomic_inc_return(&file->sm_ref) > 1)
658 break;
659 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
660 /* Enable use of trace_buffered_event */
661 trace_buffered_event_enable();
662 }
663
664 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
665 bool cmd = false, tgid = false;
666
667 /* Keep the event disabled, when going to SOFT_MODE. */
668 if (soft_disable)
669 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
670
671 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
672 cmd = true;
673 tracing_start_cmdline_record();
674 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
675 }
676
677 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
678 tgid = true;
679 tracing_start_tgid_record();
680 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
681 }
682
683 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
684 if (ret) {
685 if (cmd)
686 tracing_stop_cmdline_record();
687 if (tgid)
688 tracing_stop_tgid_record();
689 pr_info("event trace: Could not enable event "
690 "%s\n", trace_event_name(call));
691 break;
692 }
693 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
694
695 /* WAS_ENABLED gets set but never cleared. */
696 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
697 }
698 break;
699 }
700
701 return ret;
702 }
703
trace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)704 int trace_event_enable_disable(struct trace_event_file *file,
705 int enable, int soft_disable)
706 {
707 return __ftrace_event_enable_disable(file, enable, soft_disable);
708 }
709
ftrace_event_enable_disable(struct trace_event_file * file,int enable)710 static int ftrace_event_enable_disable(struct trace_event_file *file,
711 int enable)
712 {
713 return __ftrace_event_enable_disable(file, enable, 0);
714 }
715
ftrace_clear_events(struct trace_array * tr)716 static void ftrace_clear_events(struct trace_array *tr)
717 {
718 struct trace_event_file *file;
719
720 mutex_lock(&event_mutex);
721 list_for_each_entry(file, &tr->events, list) {
722 ftrace_event_enable_disable(file, 0);
723 }
724 mutex_unlock(&event_mutex);
725 }
726
727 static void
event_filter_pid_sched_process_exit(void * data,struct task_struct * task)728 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
729 {
730 struct trace_pid_list *pid_list;
731 struct trace_array *tr = data;
732
733 pid_list = rcu_dereference_raw(tr->filtered_pids);
734 trace_filter_add_remove_task(pid_list, NULL, task);
735
736 pid_list = rcu_dereference_raw(tr->filtered_no_pids);
737 trace_filter_add_remove_task(pid_list, NULL, task);
738 }
739
740 static void
event_filter_pid_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)741 event_filter_pid_sched_process_fork(void *data,
742 struct task_struct *self,
743 struct task_struct *task)
744 {
745 struct trace_pid_list *pid_list;
746 struct trace_array *tr = data;
747
748 pid_list = rcu_dereference_sched(tr->filtered_pids);
749 trace_filter_add_remove_task(pid_list, self, task);
750
751 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
752 trace_filter_add_remove_task(pid_list, self, task);
753 }
754
trace_event_follow_fork(struct trace_array * tr,bool enable)755 void trace_event_follow_fork(struct trace_array *tr, bool enable)
756 {
757 if (enable) {
758 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
759 tr, INT_MIN);
760 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
761 tr, INT_MAX);
762 } else {
763 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
764 tr);
765 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
766 tr);
767 }
768 }
769
770 static void
event_filter_pid_sched_switch_probe_pre(void * data,bool preempt,struct task_struct * prev,struct task_struct * next)771 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
772 struct task_struct *prev, struct task_struct *next)
773 {
774 struct trace_array *tr = data;
775 struct trace_pid_list *no_pid_list;
776 struct trace_pid_list *pid_list;
777 bool ret;
778
779 pid_list = rcu_dereference_sched(tr->filtered_pids);
780 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
781
782 /*
783 * Sched switch is funny, as we only want to ignore it
784 * in the notrace case if both prev and next should be ignored.
785 */
786 ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
787 trace_ignore_this_task(NULL, no_pid_list, next);
788
789 this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
790 (trace_ignore_this_task(pid_list, NULL, prev) &&
791 trace_ignore_this_task(pid_list, NULL, next)));
792 }
793
794 static void
event_filter_pid_sched_switch_probe_post(void * data,bool preempt,struct task_struct * prev,struct task_struct * next)795 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
796 struct task_struct *prev, struct task_struct *next)
797 {
798 struct trace_array *tr = data;
799 struct trace_pid_list *no_pid_list;
800 struct trace_pid_list *pid_list;
801
802 pid_list = rcu_dereference_sched(tr->filtered_pids);
803 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
804
805 this_cpu_write(tr->array_buffer.data->ignore_pid,
806 trace_ignore_this_task(pid_list, no_pid_list, next));
807 }
808
809 static void
event_filter_pid_sched_wakeup_probe_pre(void * data,struct task_struct * task)810 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
811 {
812 struct trace_array *tr = data;
813 struct trace_pid_list *no_pid_list;
814 struct trace_pid_list *pid_list;
815
816 /* Nothing to do if we are already tracing */
817 if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
818 return;
819
820 pid_list = rcu_dereference_sched(tr->filtered_pids);
821 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
822
823 this_cpu_write(tr->array_buffer.data->ignore_pid,
824 trace_ignore_this_task(pid_list, no_pid_list, task));
825 }
826
827 static void
event_filter_pid_sched_wakeup_probe_post(void * data,struct task_struct * task)828 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
829 {
830 struct trace_array *tr = data;
831 struct trace_pid_list *no_pid_list;
832 struct trace_pid_list *pid_list;
833
834 /* Nothing to do if we are not tracing */
835 if (this_cpu_read(tr->array_buffer.data->ignore_pid))
836 return;
837
838 pid_list = rcu_dereference_sched(tr->filtered_pids);
839 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
840
841 /* Set tracing if current is enabled */
842 this_cpu_write(tr->array_buffer.data->ignore_pid,
843 trace_ignore_this_task(pid_list, no_pid_list, current));
844 }
845
unregister_pid_events(struct trace_array * tr)846 static void unregister_pid_events(struct trace_array *tr)
847 {
848 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
849 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
850
851 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
852 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
853
854 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
855 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
856
857 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
858 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
859 }
860
__ftrace_clear_event_pids(struct trace_array * tr,int type)861 static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
862 {
863 struct trace_pid_list *pid_list;
864 struct trace_pid_list *no_pid_list;
865 struct trace_event_file *file;
866 int cpu;
867
868 pid_list = rcu_dereference_protected(tr->filtered_pids,
869 lockdep_is_held(&event_mutex));
870 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
871 lockdep_is_held(&event_mutex));
872
873 /* Make sure there's something to do */
874 if (!pid_type_enabled(type, pid_list, no_pid_list))
875 return;
876
877 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
878 unregister_pid_events(tr);
879
880 list_for_each_entry(file, &tr->events, list) {
881 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
882 }
883
884 for_each_possible_cpu(cpu)
885 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
886 }
887
888 if (type & TRACE_PIDS)
889 rcu_assign_pointer(tr->filtered_pids, NULL);
890
891 if (type & TRACE_NO_PIDS)
892 rcu_assign_pointer(tr->filtered_no_pids, NULL);
893
894 /* Wait till all users are no longer using pid filtering */
895 tracepoint_synchronize_unregister();
896
897 if ((type & TRACE_PIDS) && pid_list)
898 trace_pid_list_free(pid_list);
899
900 if ((type & TRACE_NO_PIDS) && no_pid_list)
901 trace_pid_list_free(no_pid_list);
902 }
903
ftrace_clear_event_pids(struct trace_array * tr,int type)904 static void ftrace_clear_event_pids(struct trace_array *tr, int type)
905 {
906 mutex_lock(&event_mutex);
907 __ftrace_clear_event_pids(tr, type);
908 mutex_unlock(&event_mutex);
909 }
910
__put_system(struct event_subsystem * system)911 static void __put_system(struct event_subsystem *system)
912 {
913 struct event_filter *filter = system->filter;
914
915 WARN_ON_ONCE(system_refcount(system) == 0);
916 if (system_refcount_dec(system))
917 return;
918
919 list_del(&system->list);
920
921 if (filter) {
922 kfree(filter->filter_string);
923 kfree(filter);
924 }
925 kfree_const(system->name);
926 kfree(system);
927 }
928
__get_system(struct event_subsystem * system)929 static void __get_system(struct event_subsystem *system)
930 {
931 WARN_ON_ONCE(system_refcount(system) == 0);
932 system_refcount_inc(system);
933 }
934
__get_system_dir(struct trace_subsystem_dir * dir)935 static void __get_system_dir(struct trace_subsystem_dir *dir)
936 {
937 WARN_ON_ONCE(dir->ref_count == 0);
938 dir->ref_count++;
939 __get_system(dir->subsystem);
940 }
941
__put_system_dir(struct trace_subsystem_dir * dir)942 static void __put_system_dir(struct trace_subsystem_dir *dir)
943 {
944 WARN_ON_ONCE(dir->ref_count == 0);
945 /* If the subsystem is about to be freed, the dir must be too */
946 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
947
948 __put_system(dir->subsystem);
949 if (!--dir->ref_count)
950 kfree(dir);
951 }
952
put_system(struct trace_subsystem_dir * dir)953 static void put_system(struct trace_subsystem_dir *dir)
954 {
955 mutex_lock(&event_mutex);
956 __put_system_dir(dir);
957 mutex_unlock(&event_mutex);
958 }
959
remove_subsystem(struct trace_subsystem_dir * dir)960 static void remove_subsystem(struct trace_subsystem_dir *dir)
961 {
962 if (!dir)
963 return;
964
965 if (!--dir->nr_events) {
966 tracefs_remove(dir->entry);
967 list_del(&dir->list);
968 __put_system_dir(dir);
969 }
970 }
971
remove_event_file_dir(struct trace_event_file * file)972 static void remove_event_file_dir(struct trace_event_file *file)
973 {
974 struct dentry *dir = file->dir;
975 struct dentry *child;
976
977 if (dir) {
978 spin_lock(&dir->d_lock); /* probably unneeded */
979 list_for_each_entry(child, &dir->d_subdirs, d_child) {
980 if (d_really_is_positive(child)) /* probably unneeded */
981 d_inode(child)->i_private = NULL;
982 }
983 spin_unlock(&dir->d_lock);
984
985 tracefs_remove(dir);
986 }
987
988 list_del(&file->list);
989 remove_subsystem(file->system);
990 free_event_filter(file->filter);
991 kmem_cache_free(file_cachep, file);
992 }
993
994 /*
995 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
996 */
997 static int
__ftrace_set_clr_event_nolock(struct trace_array * tr,const char * match,const char * sub,const char * event,int set)998 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
999 const char *sub, const char *event, int set)
1000 {
1001 struct trace_event_file *file;
1002 struct trace_event_call *call;
1003 const char *name;
1004 int ret = -EINVAL;
1005 int eret = 0;
1006
1007 list_for_each_entry(file, &tr->events, list) {
1008
1009 call = file->event_call;
1010 name = trace_event_name(call);
1011
1012 if (!name || !call->class || !call->class->reg)
1013 continue;
1014
1015 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1016 continue;
1017
1018 if (match &&
1019 strcmp(match, name) != 0 &&
1020 strcmp(match, call->class->system) != 0)
1021 continue;
1022
1023 if (sub && strcmp(sub, call->class->system) != 0)
1024 continue;
1025
1026 if (event && strcmp(event, name) != 0)
1027 continue;
1028
1029 ret = ftrace_event_enable_disable(file, set);
1030
1031 /*
1032 * Save the first error and return that. Some events
1033 * may still have been enabled, but let the user
1034 * know that something went wrong.
1035 */
1036 if (ret && !eret)
1037 eret = ret;
1038
1039 ret = eret;
1040 }
1041
1042 return ret;
1043 }
1044
__ftrace_set_clr_event(struct trace_array * tr,const char * match,const char * sub,const char * event,int set)1045 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
1046 const char *sub, const char *event, int set)
1047 {
1048 int ret;
1049
1050 mutex_lock(&event_mutex);
1051 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
1052 mutex_unlock(&event_mutex);
1053
1054 return ret;
1055 }
1056
ftrace_set_clr_event(struct trace_array * tr,char * buf,int set)1057 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
1058 {
1059 char *event = NULL, *sub = NULL, *match;
1060 int ret;
1061
1062 if (!tr)
1063 return -ENOENT;
1064 /*
1065 * The buf format can be <subsystem>:<event-name>
1066 * *:<event-name> means any event by that name.
1067 * :<event-name> is the same.
1068 *
1069 * <subsystem>:* means all events in that subsystem
1070 * <subsystem>: means the same.
1071 *
1072 * <name> (no ':') means all events in a subsystem with
1073 * the name <name> or any event that matches <name>
1074 */
1075
1076 match = strsep(&buf, ":");
1077 if (buf) {
1078 sub = match;
1079 event = buf;
1080 match = NULL;
1081
1082 if (!strlen(sub) || strcmp(sub, "*") == 0)
1083 sub = NULL;
1084 if (!strlen(event) || strcmp(event, "*") == 0)
1085 event = NULL;
1086 }
1087
1088 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
1089
1090 /* Put back the colon to allow this to be called again */
1091 if (buf)
1092 *(buf - 1) = ':';
1093
1094 return ret;
1095 }
1096
1097 /**
1098 * trace_set_clr_event - enable or disable an event
1099 * @system: system name to match (NULL for any system)
1100 * @event: event name to match (NULL for all events, within system)
1101 * @set: 1 to enable, 0 to disable
1102 *
1103 * This is a way for other parts of the kernel to enable or disable
1104 * event recording.
1105 *
1106 * Returns 0 on success, -EINVAL if the parameters do not match any
1107 * registered events.
1108 */
trace_set_clr_event(const char * system,const char * event,int set)1109 int trace_set_clr_event(const char *system, const char *event, int set)
1110 {
1111 struct trace_array *tr = top_trace_array();
1112
1113 if (!tr)
1114 return -ENODEV;
1115
1116 return __ftrace_set_clr_event(tr, NULL, system, event, set);
1117 }
1118 EXPORT_SYMBOL_GPL(trace_set_clr_event);
1119
1120 /**
1121 * trace_array_set_clr_event - enable or disable an event for a trace array.
1122 * @tr: concerned trace array.
1123 * @system: system name to match (NULL for any system)
1124 * @event: event name to match (NULL for all events, within system)
1125 * @enable: true to enable, false to disable
1126 *
1127 * This is a way for other parts of the kernel to enable or disable
1128 * event recording.
1129 *
1130 * Returns 0 on success, -EINVAL if the parameters do not match any
1131 * registered events.
1132 */
trace_array_set_clr_event(struct trace_array * tr,const char * system,const char * event,bool enable)1133 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
1134 const char *event, bool enable)
1135 {
1136 int set;
1137
1138 if (!tr)
1139 return -ENOENT;
1140
1141 set = (enable == true) ? 1 : 0;
1142 return __ftrace_set_clr_event(tr, NULL, system, event, set);
1143 }
1144 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
1145
1146 /* 128 should be much more than enough */
1147 #define EVENT_BUF_SIZE 127
1148
1149 static ssize_t
ftrace_event_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1150 ftrace_event_write(struct file *file, const char __user *ubuf,
1151 size_t cnt, loff_t *ppos)
1152 {
1153 struct trace_parser parser;
1154 struct seq_file *m = file->private_data;
1155 struct trace_array *tr = m->private;
1156 ssize_t read, ret;
1157
1158 if (!cnt)
1159 return 0;
1160
1161 ret = tracing_update_buffers();
1162 if (ret < 0)
1163 return ret;
1164
1165 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1166 return -ENOMEM;
1167
1168 read = trace_get_user(&parser, ubuf, cnt, ppos);
1169
1170 if (read >= 0 && trace_parser_loaded((&parser))) {
1171 int set = 1;
1172
1173 if (*parser.buffer == '!')
1174 set = 0;
1175
1176 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
1177 if (ret)
1178 goto out_put;
1179 }
1180
1181 ret = read;
1182
1183 out_put:
1184 trace_parser_put(&parser);
1185
1186 return ret;
1187 }
1188
1189 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)1190 t_next(struct seq_file *m, void *v, loff_t *pos)
1191 {
1192 struct trace_event_file *file = v;
1193 struct trace_event_call *call;
1194 struct trace_array *tr = m->private;
1195
1196 (*pos)++;
1197
1198 list_for_each_entry_continue(file, &tr->events, list) {
1199 call = file->event_call;
1200 /*
1201 * The ftrace subsystem is for showing formats only.
1202 * They can not be enabled or disabled via the event files.
1203 */
1204 if (call->class && call->class->reg &&
1205 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1206 return file;
1207 }
1208
1209 return NULL;
1210 }
1211
t_start(struct seq_file * m,loff_t * pos)1212 static void *t_start(struct seq_file *m, loff_t *pos)
1213 {
1214 struct trace_event_file *file;
1215 struct trace_array *tr = m->private;
1216 loff_t l;
1217
1218 mutex_lock(&event_mutex);
1219
1220 file = list_entry(&tr->events, struct trace_event_file, list);
1221 for (l = 0; l <= *pos; ) {
1222 file = t_next(m, file, &l);
1223 if (!file)
1224 break;
1225 }
1226 return file;
1227 }
1228
1229 static void *
s_next(struct seq_file * m,void * v,loff_t * pos)1230 s_next(struct seq_file *m, void *v, loff_t *pos)
1231 {
1232 struct trace_event_file *file = v;
1233 struct trace_array *tr = m->private;
1234
1235 (*pos)++;
1236
1237 list_for_each_entry_continue(file, &tr->events, list) {
1238 if (file->flags & EVENT_FILE_FL_ENABLED)
1239 return file;
1240 }
1241
1242 return NULL;
1243 }
1244
s_start(struct seq_file * m,loff_t * pos)1245 static void *s_start(struct seq_file *m, loff_t *pos)
1246 {
1247 struct trace_event_file *file;
1248 struct trace_array *tr = m->private;
1249 loff_t l;
1250
1251 mutex_lock(&event_mutex);
1252
1253 file = list_entry(&tr->events, struct trace_event_file, list);
1254 for (l = 0; l <= *pos; ) {
1255 file = s_next(m, file, &l);
1256 if (!file)
1257 break;
1258 }
1259 return file;
1260 }
1261
t_show(struct seq_file * m,void * v)1262 static int t_show(struct seq_file *m, void *v)
1263 {
1264 struct trace_event_file *file = v;
1265 struct trace_event_call *call = file->event_call;
1266
1267 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1268 seq_printf(m, "%s:", call->class->system);
1269 seq_printf(m, "%s\n", trace_event_name(call));
1270
1271 return 0;
1272 }
1273
t_stop(struct seq_file * m,void * p)1274 static void t_stop(struct seq_file *m, void *p)
1275 {
1276 mutex_unlock(&event_mutex);
1277 }
1278
1279 static void *
__next(struct seq_file * m,void * v,loff_t * pos,int type)1280 __next(struct seq_file *m, void *v, loff_t *pos, int type)
1281 {
1282 struct trace_array *tr = m->private;
1283 struct trace_pid_list *pid_list;
1284
1285 if (type == TRACE_PIDS)
1286 pid_list = rcu_dereference_sched(tr->filtered_pids);
1287 else
1288 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1289
1290 return trace_pid_next(pid_list, v, pos);
1291 }
1292
1293 static void *
p_next(struct seq_file * m,void * v,loff_t * pos)1294 p_next(struct seq_file *m, void *v, loff_t *pos)
1295 {
1296 return __next(m, v, pos, TRACE_PIDS);
1297 }
1298
1299 static void *
np_next(struct seq_file * m,void * v,loff_t * pos)1300 np_next(struct seq_file *m, void *v, loff_t *pos)
1301 {
1302 return __next(m, v, pos, TRACE_NO_PIDS);
1303 }
1304
__start(struct seq_file * m,loff_t * pos,int type)1305 static void *__start(struct seq_file *m, loff_t *pos, int type)
1306 __acquires(RCU)
1307 {
1308 struct trace_pid_list *pid_list;
1309 struct trace_array *tr = m->private;
1310
1311 /*
1312 * Grab the mutex, to keep calls to p_next() having the same
1313 * tr->filtered_pids as p_start() has.
1314 * If we just passed the tr->filtered_pids around, then RCU would
1315 * have been enough, but doing that makes things more complex.
1316 */
1317 mutex_lock(&event_mutex);
1318 rcu_read_lock_sched();
1319
1320 if (type == TRACE_PIDS)
1321 pid_list = rcu_dereference_sched(tr->filtered_pids);
1322 else
1323 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1324
1325 if (!pid_list)
1326 return NULL;
1327
1328 return trace_pid_start(pid_list, pos);
1329 }
1330
p_start(struct seq_file * m,loff_t * pos)1331 static void *p_start(struct seq_file *m, loff_t *pos)
1332 __acquires(RCU)
1333 {
1334 return __start(m, pos, TRACE_PIDS);
1335 }
1336
np_start(struct seq_file * m,loff_t * pos)1337 static void *np_start(struct seq_file *m, loff_t *pos)
1338 __acquires(RCU)
1339 {
1340 return __start(m, pos, TRACE_NO_PIDS);
1341 }
1342
p_stop(struct seq_file * m,void * p)1343 static void p_stop(struct seq_file *m, void *p)
1344 __releases(RCU)
1345 {
1346 rcu_read_unlock_sched();
1347 mutex_unlock(&event_mutex);
1348 }
1349
1350 static ssize_t
event_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1351 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1352 loff_t *ppos)
1353 {
1354 struct trace_event_file *file;
1355 unsigned long flags;
1356 char buf[4] = "0";
1357
1358 mutex_lock(&event_mutex);
1359 file = event_file_data(filp);
1360 if (likely(file))
1361 flags = file->flags;
1362 mutex_unlock(&event_mutex);
1363
1364 if (!file)
1365 return -ENODEV;
1366
1367 if (flags & EVENT_FILE_FL_ENABLED &&
1368 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1369 strcpy(buf, "1");
1370
1371 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1372 flags & EVENT_FILE_FL_SOFT_MODE)
1373 strcat(buf, "*");
1374
1375 strcat(buf, "\n");
1376
1377 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1378 }
1379
1380 static ssize_t
event_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1381 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1382 loff_t *ppos)
1383 {
1384 struct trace_event_file *file;
1385 unsigned long val;
1386 int ret;
1387
1388 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1389 if (ret)
1390 return ret;
1391
1392 ret = tracing_update_buffers();
1393 if (ret < 0)
1394 return ret;
1395
1396 switch (val) {
1397 case 0:
1398 case 1:
1399 ret = -ENODEV;
1400 mutex_lock(&event_mutex);
1401 file = event_file_data(filp);
1402 if (likely(file))
1403 ret = ftrace_event_enable_disable(file, val);
1404 mutex_unlock(&event_mutex);
1405 break;
1406
1407 default:
1408 return -EINVAL;
1409 }
1410
1411 *ppos += cnt;
1412
1413 return ret ? ret : cnt;
1414 }
1415
1416 static ssize_t
system_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1417 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1418 loff_t *ppos)
1419 {
1420 const char set_to_char[4] = { '?', '0', '1', 'X' };
1421 struct trace_subsystem_dir *dir = filp->private_data;
1422 struct event_subsystem *system = dir->subsystem;
1423 struct trace_event_call *call;
1424 struct trace_event_file *file;
1425 struct trace_array *tr = dir->tr;
1426 char buf[2];
1427 int set = 0;
1428 int ret;
1429
1430 mutex_lock(&event_mutex);
1431 list_for_each_entry(file, &tr->events, list) {
1432 call = file->event_call;
1433 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1434 !trace_event_name(call) || !call->class || !call->class->reg)
1435 continue;
1436
1437 if (system && strcmp(call->class->system, system->name) != 0)
1438 continue;
1439
1440 /*
1441 * We need to find out if all the events are set
1442 * or if all events or cleared, or if we have
1443 * a mixture.
1444 */
1445 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1446
1447 /*
1448 * If we have a mixture, no need to look further.
1449 */
1450 if (set == 3)
1451 break;
1452 }
1453 mutex_unlock(&event_mutex);
1454
1455 buf[0] = set_to_char[set];
1456 buf[1] = '\n';
1457
1458 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1459
1460 return ret;
1461 }
1462
1463 static ssize_t
system_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1464 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1465 loff_t *ppos)
1466 {
1467 struct trace_subsystem_dir *dir = filp->private_data;
1468 struct event_subsystem *system = dir->subsystem;
1469 const char *name = NULL;
1470 unsigned long val;
1471 ssize_t ret;
1472
1473 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1474 if (ret)
1475 return ret;
1476
1477 ret = tracing_update_buffers();
1478 if (ret < 0)
1479 return ret;
1480
1481 if (val != 0 && val != 1)
1482 return -EINVAL;
1483
1484 /*
1485 * Opening of "enable" adds a ref count to system,
1486 * so the name is safe to use.
1487 */
1488 if (system)
1489 name = system->name;
1490
1491 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1492 if (ret)
1493 goto out;
1494
1495 ret = cnt;
1496
1497 out:
1498 *ppos += cnt;
1499
1500 return ret;
1501 }
1502
1503 enum {
1504 FORMAT_HEADER = 1,
1505 FORMAT_FIELD_SEPERATOR = 2,
1506 FORMAT_PRINTFMT = 3,
1507 };
1508
f_next(struct seq_file * m,void * v,loff_t * pos)1509 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1510 {
1511 struct trace_event_call *call = event_file_data(m->private);
1512 struct list_head *common_head = &ftrace_common_fields;
1513 struct list_head *head = trace_get_fields(call);
1514 struct list_head *node = v;
1515
1516 (*pos)++;
1517
1518 switch ((unsigned long)v) {
1519 case FORMAT_HEADER:
1520 node = common_head;
1521 break;
1522
1523 case FORMAT_FIELD_SEPERATOR:
1524 node = head;
1525 break;
1526
1527 case FORMAT_PRINTFMT:
1528 /* all done */
1529 return NULL;
1530 }
1531
1532 node = node->prev;
1533 if (node == common_head)
1534 return (void *)FORMAT_FIELD_SEPERATOR;
1535 else if (node == head)
1536 return (void *)FORMAT_PRINTFMT;
1537 else
1538 return node;
1539 }
1540
f_show(struct seq_file * m,void * v)1541 static int f_show(struct seq_file *m, void *v)
1542 {
1543 struct trace_event_call *call = event_file_data(m->private);
1544 struct ftrace_event_field *field;
1545 const char *array_descriptor;
1546
1547 switch ((unsigned long)v) {
1548 case FORMAT_HEADER:
1549 seq_printf(m, "name: %s\n", trace_event_name(call));
1550 seq_printf(m, "ID: %d\n", call->event.type);
1551 seq_puts(m, "format:\n");
1552 return 0;
1553
1554 case FORMAT_FIELD_SEPERATOR:
1555 seq_putc(m, '\n');
1556 return 0;
1557
1558 case FORMAT_PRINTFMT:
1559 seq_printf(m, "\nprint fmt: %s\n",
1560 call->print_fmt);
1561 return 0;
1562 }
1563
1564 field = list_entry(v, struct ftrace_event_field, link);
1565 /*
1566 * Smartly shows the array type(except dynamic array).
1567 * Normal:
1568 * field:TYPE VAR
1569 * If TYPE := TYPE[LEN], it is shown:
1570 * field:TYPE VAR[LEN]
1571 */
1572 array_descriptor = strchr(field->type, '[');
1573
1574 if (str_has_prefix(field->type, "__data_loc"))
1575 array_descriptor = NULL;
1576
1577 if (!array_descriptor)
1578 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1579 field->type, field->name, field->offset,
1580 field->size, !!field->is_signed);
1581 else
1582 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1583 (int)(array_descriptor - field->type),
1584 field->type, field->name,
1585 array_descriptor, field->offset,
1586 field->size, !!field->is_signed);
1587
1588 return 0;
1589 }
1590
f_start(struct seq_file * m,loff_t * pos)1591 static void *f_start(struct seq_file *m, loff_t *pos)
1592 {
1593 void *p = (void *)FORMAT_HEADER;
1594 loff_t l = 0;
1595
1596 /* ->stop() is called even if ->start() fails */
1597 mutex_lock(&event_mutex);
1598 if (!event_file_data(m->private))
1599 return ERR_PTR(-ENODEV);
1600
1601 while (l < *pos && p)
1602 p = f_next(m, p, &l);
1603
1604 return p;
1605 }
1606
f_stop(struct seq_file * m,void * p)1607 static void f_stop(struct seq_file *m, void *p)
1608 {
1609 mutex_unlock(&event_mutex);
1610 }
1611
1612 static const struct seq_operations trace_format_seq_ops = {
1613 .start = f_start,
1614 .next = f_next,
1615 .stop = f_stop,
1616 .show = f_show,
1617 };
1618
trace_format_open(struct inode * inode,struct file * file)1619 static int trace_format_open(struct inode *inode, struct file *file)
1620 {
1621 struct seq_file *m;
1622 int ret;
1623
1624 /* Do we want to hide event format files on tracefs lockdown? */
1625
1626 ret = seq_open(file, &trace_format_seq_ops);
1627 if (ret < 0)
1628 return ret;
1629
1630 m = file->private_data;
1631 m->private = file;
1632
1633 return 0;
1634 }
1635
1636 static ssize_t
event_id_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1637 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1638 {
1639 int id = (long)event_file_data(filp);
1640 char buf[32];
1641 int len;
1642
1643 if (unlikely(!id))
1644 return -ENODEV;
1645
1646 len = sprintf(buf, "%d\n", id);
1647
1648 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1649 }
1650
1651 static ssize_t
event_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1652 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1653 loff_t *ppos)
1654 {
1655 struct trace_event_file *file;
1656 struct trace_seq *s;
1657 int r = -ENODEV;
1658
1659 if (*ppos)
1660 return 0;
1661
1662 s = kmalloc(sizeof(*s), GFP_KERNEL);
1663
1664 if (!s)
1665 return -ENOMEM;
1666
1667 trace_seq_init(s);
1668
1669 mutex_lock(&event_mutex);
1670 file = event_file_data(filp);
1671 if (file)
1672 print_event_filter(file, s);
1673 mutex_unlock(&event_mutex);
1674
1675 if (file)
1676 r = simple_read_from_buffer(ubuf, cnt, ppos,
1677 s->buffer, trace_seq_used(s));
1678
1679 kfree(s);
1680
1681 return r;
1682 }
1683
1684 static ssize_t
event_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1685 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1686 loff_t *ppos)
1687 {
1688 struct trace_event_file *file;
1689 char *buf;
1690 int err = -ENODEV;
1691
1692 if (cnt >= PAGE_SIZE)
1693 return -EINVAL;
1694
1695 buf = memdup_user_nul(ubuf, cnt);
1696 if (IS_ERR(buf))
1697 return PTR_ERR(buf);
1698
1699 mutex_lock(&event_mutex);
1700 file = event_file_data(filp);
1701 if (file)
1702 err = apply_event_filter(file, buf);
1703 mutex_unlock(&event_mutex);
1704
1705 kfree(buf);
1706 if (err < 0)
1707 return err;
1708
1709 *ppos += cnt;
1710
1711 return cnt;
1712 }
1713
1714 static LIST_HEAD(event_subsystems);
1715
subsystem_open(struct inode * inode,struct file * filp)1716 static int subsystem_open(struct inode *inode, struct file *filp)
1717 {
1718 struct event_subsystem *system = NULL;
1719 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1720 struct trace_array *tr;
1721 int ret;
1722
1723 if (tracing_is_disabled())
1724 return -ENODEV;
1725
1726 /* Make sure the system still exists */
1727 mutex_lock(&event_mutex);
1728 mutex_lock(&trace_types_lock);
1729 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1730 list_for_each_entry(dir, &tr->systems, list) {
1731 if (dir == inode->i_private) {
1732 /* Don't open systems with no events */
1733 if (dir->nr_events) {
1734 __get_system_dir(dir);
1735 system = dir->subsystem;
1736 }
1737 goto exit_loop;
1738 }
1739 }
1740 }
1741 exit_loop:
1742 mutex_unlock(&trace_types_lock);
1743 mutex_unlock(&event_mutex);
1744
1745 if (!system)
1746 return -ENODEV;
1747
1748 /* Some versions of gcc think dir can be uninitialized here */
1749 WARN_ON(!dir);
1750
1751 /* Still need to increment the ref count of the system */
1752 if (trace_array_get(tr) < 0) {
1753 put_system(dir);
1754 return -ENODEV;
1755 }
1756
1757 ret = tracing_open_generic(inode, filp);
1758 if (ret < 0) {
1759 trace_array_put(tr);
1760 put_system(dir);
1761 }
1762
1763 return ret;
1764 }
1765
system_tr_open(struct inode * inode,struct file * filp)1766 static int system_tr_open(struct inode *inode, struct file *filp)
1767 {
1768 struct trace_subsystem_dir *dir;
1769 struct trace_array *tr = inode->i_private;
1770 int ret;
1771
1772 /* Make a temporary dir that has no system but points to tr */
1773 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1774 if (!dir)
1775 return -ENOMEM;
1776
1777 ret = tracing_open_generic_tr(inode, filp);
1778 if (ret < 0) {
1779 kfree(dir);
1780 return ret;
1781 }
1782 dir->tr = tr;
1783 filp->private_data = dir;
1784
1785 return 0;
1786 }
1787
subsystem_release(struct inode * inode,struct file * file)1788 static int subsystem_release(struct inode *inode, struct file *file)
1789 {
1790 struct trace_subsystem_dir *dir = file->private_data;
1791
1792 trace_array_put(dir->tr);
1793
1794 /*
1795 * If dir->subsystem is NULL, then this is a temporary
1796 * descriptor that was made for a trace_array to enable
1797 * all subsystems.
1798 */
1799 if (dir->subsystem)
1800 put_system(dir);
1801 else
1802 kfree(dir);
1803
1804 return 0;
1805 }
1806
1807 static ssize_t
subsystem_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1808 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1809 loff_t *ppos)
1810 {
1811 struct trace_subsystem_dir *dir = filp->private_data;
1812 struct event_subsystem *system = dir->subsystem;
1813 struct trace_seq *s;
1814 int r;
1815
1816 if (*ppos)
1817 return 0;
1818
1819 s = kmalloc(sizeof(*s), GFP_KERNEL);
1820 if (!s)
1821 return -ENOMEM;
1822
1823 trace_seq_init(s);
1824
1825 print_subsystem_event_filter(system, s);
1826 r = simple_read_from_buffer(ubuf, cnt, ppos,
1827 s->buffer, trace_seq_used(s));
1828
1829 kfree(s);
1830
1831 return r;
1832 }
1833
1834 static ssize_t
subsystem_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1835 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1836 loff_t *ppos)
1837 {
1838 struct trace_subsystem_dir *dir = filp->private_data;
1839 char *buf;
1840 int err;
1841
1842 if (cnt >= PAGE_SIZE)
1843 return -EINVAL;
1844
1845 buf = memdup_user_nul(ubuf, cnt);
1846 if (IS_ERR(buf))
1847 return PTR_ERR(buf);
1848
1849 err = apply_subsystem_event_filter(dir, buf);
1850 kfree(buf);
1851 if (err < 0)
1852 return err;
1853
1854 *ppos += cnt;
1855
1856 return cnt;
1857 }
1858
1859 static ssize_t
show_header(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1860 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1861 {
1862 int (*func)(struct trace_seq *s) = filp->private_data;
1863 struct trace_seq *s;
1864 int r;
1865
1866 if (*ppos)
1867 return 0;
1868
1869 s = kmalloc(sizeof(*s), GFP_KERNEL);
1870 if (!s)
1871 return -ENOMEM;
1872
1873 trace_seq_init(s);
1874
1875 func(s);
1876 r = simple_read_from_buffer(ubuf, cnt, ppos,
1877 s->buffer, trace_seq_used(s));
1878
1879 kfree(s);
1880
1881 return r;
1882 }
1883
ignore_task_cpu(void * data)1884 static void ignore_task_cpu(void *data)
1885 {
1886 struct trace_array *tr = data;
1887 struct trace_pid_list *pid_list;
1888 struct trace_pid_list *no_pid_list;
1889
1890 /*
1891 * This function is called by on_each_cpu() while the
1892 * event_mutex is held.
1893 */
1894 pid_list = rcu_dereference_protected(tr->filtered_pids,
1895 mutex_is_locked(&event_mutex));
1896 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1897 mutex_is_locked(&event_mutex));
1898
1899 this_cpu_write(tr->array_buffer.data->ignore_pid,
1900 trace_ignore_this_task(pid_list, no_pid_list, current));
1901 }
1902
register_pid_events(struct trace_array * tr)1903 static void register_pid_events(struct trace_array *tr)
1904 {
1905 /*
1906 * Register a probe that is called before all other probes
1907 * to set ignore_pid if next or prev do not match.
1908 * Register a probe this is called after all other probes
1909 * to only keep ignore_pid set if next pid matches.
1910 */
1911 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1912 tr, INT_MAX);
1913 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1914 tr, 0);
1915
1916 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1917 tr, INT_MAX);
1918 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1919 tr, 0);
1920
1921 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1922 tr, INT_MAX);
1923 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1924 tr, 0);
1925
1926 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1927 tr, INT_MAX);
1928 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1929 tr, 0);
1930 }
1931
1932 static ssize_t
event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)1933 event_pid_write(struct file *filp, const char __user *ubuf,
1934 size_t cnt, loff_t *ppos, int type)
1935 {
1936 struct seq_file *m = filp->private_data;
1937 struct trace_array *tr = m->private;
1938 struct trace_pid_list *filtered_pids = NULL;
1939 struct trace_pid_list *other_pids = NULL;
1940 struct trace_pid_list *pid_list;
1941 struct trace_event_file *file;
1942 ssize_t ret;
1943
1944 if (!cnt)
1945 return 0;
1946
1947 ret = tracing_update_buffers();
1948 if (ret < 0)
1949 return ret;
1950
1951 mutex_lock(&event_mutex);
1952
1953 if (type == TRACE_PIDS) {
1954 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1955 lockdep_is_held(&event_mutex));
1956 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
1957 lockdep_is_held(&event_mutex));
1958 } else {
1959 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
1960 lockdep_is_held(&event_mutex));
1961 other_pids = rcu_dereference_protected(tr->filtered_pids,
1962 lockdep_is_held(&event_mutex));
1963 }
1964
1965 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1966 if (ret < 0)
1967 goto out;
1968
1969 if (type == TRACE_PIDS)
1970 rcu_assign_pointer(tr->filtered_pids, pid_list);
1971 else
1972 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
1973
1974 list_for_each_entry(file, &tr->events, list) {
1975 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1976 }
1977
1978 if (filtered_pids) {
1979 tracepoint_synchronize_unregister();
1980 trace_pid_list_free(filtered_pids);
1981 } else if (pid_list && !other_pids) {
1982 register_pid_events(tr);
1983 }
1984
1985 /*
1986 * Ignoring of pids is done at task switch. But we have to
1987 * check for those tasks that are currently running.
1988 * Always do this in case a pid was appended or removed.
1989 */
1990 on_each_cpu(ignore_task_cpu, tr, 1);
1991
1992 out:
1993 mutex_unlock(&event_mutex);
1994
1995 if (ret > 0)
1996 *ppos += ret;
1997
1998 return ret;
1999 }
2000
2001 static ssize_t
ftrace_event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2002 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
2003 size_t cnt, loff_t *ppos)
2004 {
2005 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
2006 }
2007
2008 static ssize_t
ftrace_event_npid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2009 ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
2010 size_t cnt, loff_t *ppos)
2011 {
2012 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
2013 }
2014
2015 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
2016 static int ftrace_event_set_open(struct inode *inode, struct file *file);
2017 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
2018 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
2019 static int ftrace_event_release(struct inode *inode, struct file *file);
2020
2021 static const struct seq_operations show_event_seq_ops = {
2022 .start = t_start,
2023 .next = t_next,
2024 .show = t_show,
2025 .stop = t_stop,
2026 };
2027
2028 static const struct seq_operations show_set_event_seq_ops = {
2029 .start = s_start,
2030 .next = s_next,
2031 .show = t_show,
2032 .stop = t_stop,
2033 };
2034
2035 static const struct seq_operations show_set_pid_seq_ops = {
2036 .start = p_start,
2037 .next = p_next,
2038 .show = trace_pid_show,
2039 .stop = p_stop,
2040 };
2041
2042 static const struct seq_operations show_set_no_pid_seq_ops = {
2043 .start = np_start,
2044 .next = np_next,
2045 .show = trace_pid_show,
2046 .stop = p_stop,
2047 };
2048
2049 static const struct file_operations ftrace_avail_fops = {
2050 .open = ftrace_event_avail_open,
2051 .read = seq_read,
2052 .llseek = seq_lseek,
2053 .release = seq_release,
2054 };
2055
2056 static const struct file_operations ftrace_set_event_fops = {
2057 .open = ftrace_event_set_open,
2058 .read = seq_read,
2059 .write = ftrace_event_write,
2060 .llseek = seq_lseek,
2061 .release = ftrace_event_release,
2062 };
2063
2064 static const struct file_operations ftrace_set_event_pid_fops = {
2065 .open = ftrace_event_set_pid_open,
2066 .read = seq_read,
2067 .write = ftrace_event_pid_write,
2068 .llseek = seq_lseek,
2069 .release = ftrace_event_release,
2070 };
2071
2072 static const struct file_operations ftrace_set_event_notrace_pid_fops = {
2073 .open = ftrace_event_set_npid_open,
2074 .read = seq_read,
2075 .write = ftrace_event_npid_write,
2076 .llseek = seq_lseek,
2077 .release = ftrace_event_release,
2078 };
2079
2080 static const struct file_operations ftrace_enable_fops = {
2081 .open = tracing_open_file_tr,
2082 .read = event_enable_read,
2083 .write = event_enable_write,
2084 .release = tracing_release_file_tr,
2085 .llseek = default_llseek,
2086 };
2087
2088 static const struct file_operations ftrace_event_format_fops = {
2089 .open = trace_format_open,
2090 .read = seq_read,
2091 .llseek = seq_lseek,
2092 .release = seq_release,
2093 };
2094
2095 static const struct file_operations ftrace_event_id_fops = {
2096 .read = event_id_read,
2097 .llseek = default_llseek,
2098 };
2099
2100 static const struct file_operations ftrace_event_filter_fops = {
2101 .open = tracing_open_file_tr,
2102 .read = event_filter_read,
2103 .write = event_filter_write,
2104 .release = tracing_release_file_tr,
2105 .llseek = default_llseek,
2106 };
2107
2108 static const struct file_operations ftrace_subsystem_filter_fops = {
2109 .open = subsystem_open,
2110 .read = subsystem_filter_read,
2111 .write = subsystem_filter_write,
2112 .llseek = default_llseek,
2113 .release = subsystem_release,
2114 };
2115
2116 static const struct file_operations ftrace_system_enable_fops = {
2117 .open = subsystem_open,
2118 .read = system_enable_read,
2119 .write = system_enable_write,
2120 .llseek = default_llseek,
2121 .release = subsystem_release,
2122 };
2123
2124 static const struct file_operations ftrace_tr_enable_fops = {
2125 .open = system_tr_open,
2126 .read = system_enable_read,
2127 .write = system_enable_write,
2128 .llseek = default_llseek,
2129 .release = subsystem_release,
2130 };
2131
2132 static const struct file_operations ftrace_show_header_fops = {
2133 .open = tracing_open_generic,
2134 .read = show_header,
2135 .llseek = default_llseek,
2136 };
2137
2138 static int
ftrace_event_open(struct inode * inode,struct file * file,const struct seq_operations * seq_ops)2139 ftrace_event_open(struct inode *inode, struct file *file,
2140 const struct seq_operations *seq_ops)
2141 {
2142 struct seq_file *m;
2143 int ret;
2144
2145 ret = security_locked_down(LOCKDOWN_TRACEFS);
2146 if (ret)
2147 return ret;
2148
2149 ret = seq_open(file, seq_ops);
2150 if (ret < 0)
2151 return ret;
2152 m = file->private_data;
2153 /* copy tr over to seq ops */
2154 m->private = inode->i_private;
2155
2156 return ret;
2157 }
2158
ftrace_event_release(struct inode * inode,struct file * file)2159 static int ftrace_event_release(struct inode *inode, struct file *file)
2160 {
2161 struct trace_array *tr = inode->i_private;
2162
2163 trace_array_put(tr);
2164
2165 return seq_release(inode, file);
2166 }
2167
2168 static int
ftrace_event_avail_open(struct inode * inode,struct file * file)2169 ftrace_event_avail_open(struct inode *inode, struct file *file)
2170 {
2171 const struct seq_operations *seq_ops = &show_event_seq_ops;
2172
2173 /* Checks for tracefs lockdown */
2174 return ftrace_event_open(inode, file, seq_ops);
2175 }
2176
2177 static int
ftrace_event_set_open(struct inode * inode,struct file * file)2178 ftrace_event_set_open(struct inode *inode, struct file *file)
2179 {
2180 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
2181 struct trace_array *tr = inode->i_private;
2182 int ret;
2183
2184 ret = tracing_check_open_get_tr(tr);
2185 if (ret)
2186 return ret;
2187
2188 if ((file->f_mode & FMODE_WRITE) &&
2189 (file->f_flags & O_TRUNC))
2190 ftrace_clear_events(tr);
2191
2192 ret = ftrace_event_open(inode, file, seq_ops);
2193 if (ret < 0)
2194 trace_array_put(tr);
2195 return ret;
2196 }
2197
2198 static int
ftrace_event_set_pid_open(struct inode * inode,struct file * file)2199 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
2200 {
2201 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
2202 struct trace_array *tr = inode->i_private;
2203 int ret;
2204
2205 ret = tracing_check_open_get_tr(tr);
2206 if (ret)
2207 return ret;
2208
2209 if ((file->f_mode & FMODE_WRITE) &&
2210 (file->f_flags & O_TRUNC))
2211 ftrace_clear_event_pids(tr, TRACE_PIDS);
2212
2213 ret = ftrace_event_open(inode, file, seq_ops);
2214 if (ret < 0)
2215 trace_array_put(tr);
2216 return ret;
2217 }
2218
2219 static int
ftrace_event_set_npid_open(struct inode * inode,struct file * file)2220 ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2221 {
2222 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2223 struct trace_array *tr = inode->i_private;
2224 int ret;
2225
2226 ret = tracing_check_open_get_tr(tr);
2227 if (ret)
2228 return ret;
2229
2230 if ((file->f_mode & FMODE_WRITE) &&
2231 (file->f_flags & O_TRUNC))
2232 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
2233
2234 ret = ftrace_event_open(inode, file, seq_ops);
2235 if (ret < 0)
2236 trace_array_put(tr);
2237 return ret;
2238 }
2239
2240 static struct event_subsystem *
create_new_subsystem(const char * name)2241 create_new_subsystem(const char *name)
2242 {
2243 struct event_subsystem *system;
2244
2245 /* need to create new entry */
2246 system = kmalloc(sizeof(*system), GFP_KERNEL);
2247 if (!system)
2248 return NULL;
2249
2250 system->ref_count = 1;
2251
2252 /* Only allocate if dynamic (kprobes and modules) */
2253 system->name = kstrdup_const(name, GFP_KERNEL);
2254 if (!system->name)
2255 goto out_free;
2256
2257 system->filter = NULL;
2258
2259 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
2260 if (!system->filter)
2261 goto out_free;
2262
2263 list_add(&system->list, &event_subsystems);
2264
2265 return system;
2266
2267 out_free:
2268 kfree_const(system->name);
2269 kfree(system);
2270 return NULL;
2271 }
2272
2273 static struct dentry *
event_subsystem_dir(struct trace_array * tr,const char * name,struct trace_event_file * file,struct dentry * parent)2274 event_subsystem_dir(struct trace_array *tr, const char *name,
2275 struct trace_event_file *file, struct dentry *parent)
2276 {
2277 struct trace_subsystem_dir *dir;
2278 struct event_subsystem *system;
2279 struct dentry *entry;
2280
2281 /* First see if we did not already create this dir */
2282 list_for_each_entry(dir, &tr->systems, list) {
2283 system = dir->subsystem;
2284 if (strcmp(system->name, name) == 0) {
2285 dir->nr_events++;
2286 file->system = dir;
2287 return dir->entry;
2288 }
2289 }
2290
2291 /* Now see if the system itself exists. */
2292 list_for_each_entry(system, &event_subsystems, list) {
2293 if (strcmp(system->name, name) == 0)
2294 break;
2295 }
2296 /* Reset system variable when not found */
2297 if (&system->list == &event_subsystems)
2298 system = NULL;
2299
2300 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2301 if (!dir)
2302 goto out_fail;
2303
2304 if (!system) {
2305 system = create_new_subsystem(name);
2306 if (!system)
2307 goto out_free;
2308 } else
2309 __get_system(system);
2310
2311 dir->entry = tracefs_create_dir(name, parent);
2312 if (!dir->entry) {
2313 pr_warn("Failed to create system directory %s\n", name);
2314 __put_system(system);
2315 goto out_free;
2316 }
2317
2318 dir->tr = tr;
2319 dir->ref_count = 1;
2320 dir->nr_events = 1;
2321 dir->subsystem = system;
2322 file->system = dir;
2323
2324 /* the ftrace system is special, do not create enable or filter files */
2325 if (strcmp(name, "ftrace") != 0) {
2326
2327 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
2328 &ftrace_subsystem_filter_fops);
2329 if (!entry) {
2330 kfree(system->filter);
2331 system->filter = NULL;
2332 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
2333 }
2334
2335 trace_create_file("enable", 0644, dir->entry, dir,
2336 &ftrace_system_enable_fops);
2337 }
2338
2339 list_add(&dir->list, &tr->systems);
2340
2341 return dir->entry;
2342
2343 out_free:
2344 kfree(dir);
2345 out_fail:
2346 /* Only print this message if failed on memory allocation */
2347 if (!dir || !system)
2348 pr_warn("No memory to create event subsystem %s\n", name);
2349 return NULL;
2350 }
2351
2352 static int
event_define_fields(struct trace_event_call * call)2353 event_define_fields(struct trace_event_call *call)
2354 {
2355 struct list_head *head;
2356 int ret = 0;
2357
2358 /*
2359 * Other events may have the same class. Only update
2360 * the fields if they are not already defined.
2361 */
2362 head = trace_get_fields(call);
2363 if (list_empty(head)) {
2364 struct trace_event_fields *field = call->class->fields_array;
2365 unsigned int offset = sizeof(struct trace_entry);
2366
2367 for (; field->type; field++) {
2368 if (field->type == TRACE_FUNCTION_TYPE) {
2369 field->define_fields(call);
2370 break;
2371 }
2372
2373 offset = ALIGN(offset, field->align);
2374 ret = trace_define_field(call, field->type, field->name,
2375 offset, field->size,
2376 field->is_signed, field->filter_type);
2377 if (WARN_ON_ONCE(ret)) {
2378 pr_err("error code is %d\n", ret);
2379 break;
2380 }
2381
2382 offset += field->size;
2383 }
2384 }
2385
2386 return ret;
2387 }
2388
2389 static int
event_create_dir(struct dentry * parent,struct trace_event_file * file)2390 event_create_dir(struct dentry *parent, struct trace_event_file *file)
2391 {
2392 struct trace_event_call *call = file->event_call;
2393 struct trace_array *tr = file->tr;
2394 struct dentry *d_events;
2395 const char *name;
2396 int ret;
2397
2398 /*
2399 * If the trace point header did not define TRACE_SYSTEM
2400 * then the system would be called "TRACE_SYSTEM".
2401 */
2402 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2403 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2404 if (!d_events)
2405 return -ENOMEM;
2406 } else
2407 d_events = parent;
2408
2409 name = trace_event_name(call);
2410 file->dir = tracefs_create_dir(name, d_events);
2411 if (!file->dir) {
2412 pr_warn("Could not create tracefs '%s' directory\n", name);
2413 return -1;
2414 }
2415
2416 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2417 trace_create_file("enable", 0644, file->dir, file,
2418 &ftrace_enable_fops);
2419
2420 #ifdef CONFIG_PERF_EVENTS
2421 if (call->event.type && call->class->reg)
2422 trace_create_file("id", 0444, file->dir,
2423 (void *)(long)call->event.type,
2424 &ftrace_event_id_fops);
2425 #endif
2426
2427 ret = event_define_fields(call);
2428 if (ret < 0) {
2429 pr_warn("Could not initialize trace point events/%s\n", name);
2430 return ret;
2431 }
2432
2433 /*
2434 * Only event directories that can be enabled should have
2435 * triggers or filters.
2436 */
2437 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2438 trace_create_file("filter", 0644, file->dir, file,
2439 &ftrace_event_filter_fops);
2440
2441 trace_create_file("trigger", 0644, file->dir, file,
2442 &event_trigger_fops);
2443 }
2444
2445 #ifdef CONFIG_HIST_TRIGGERS
2446 trace_create_file("hist", 0444, file->dir, file,
2447 &event_hist_fops);
2448 #endif
2449 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
2450 trace_create_file("hist_debug", 0444, file->dir, file,
2451 &event_hist_debug_fops);
2452 #endif
2453 trace_create_file("format", 0444, file->dir, call,
2454 &ftrace_event_format_fops);
2455
2456 #ifdef CONFIG_TRACE_EVENT_INJECT
2457 if (call->event.type && call->class->reg)
2458 trace_create_file("inject", 0200, file->dir, file,
2459 &event_inject_fops);
2460 #endif
2461
2462 return 0;
2463 }
2464
remove_event_from_tracers(struct trace_event_call * call)2465 static void remove_event_from_tracers(struct trace_event_call *call)
2466 {
2467 struct trace_event_file *file;
2468 struct trace_array *tr;
2469
2470 do_for_each_event_file_safe(tr, file) {
2471 if (file->event_call != call)
2472 continue;
2473
2474 remove_event_file_dir(file);
2475 /*
2476 * The do_for_each_event_file_safe() is
2477 * a double loop. After finding the call for this
2478 * trace_array, we use break to jump to the next
2479 * trace_array.
2480 */
2481 break;
2482 } while_for_each_event_file();
2483 }
2484
event_remove(struct trace_event_call * call)2485 static void event_remove(struct trace_event_call *call)
2486 {
2487 struct trace_array *tr;
2488 struct trace_event_file *file;
2489
2490 do_for_each_event_file(tr, file) {
2491 if (file->event_call != call)
2492 continue;
2493
2494 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2495 tr->clear_trace = true;
2496
2497 ftrace_event_enable_disable(file, 0);
2498 /*
2499 * The do_for_each_event_file() is
2500 * a double loop. After finding the call for this
2501 * trace_array, we use break to jump to the next
2502 * trace_array.
2503 */
2504 break;
2505 } while_for_each_event_file();
2506
2507 if (call->event.funcs)
2508 __unregister_trace_event(&call->event);
2509 remove_event_from_tracers(call);
2510 list_del(&call->list);
2511 }
2512
event_init(struct trace_event_call * call)2513 static int event_init(struct trace_event_call *call)
2514 {
2515 int ret = 0;
2516 const char *name;
2517
2518 name = trace_event_name(call);
2519 if (WARN_ON(!name))
2520 return -EINVAL;
2521
2522 if (call->class->raw_init) {
2523 ret = call->class->raw_init(call);
2524 if (ret < 0 && ret != -ENOSYS)
2525 pr_warn("Could not initialize trace events/%s\n", name);
2526 }
2527
2528 return ret;
2529 }
2530
2531 static int
__register_event(struct trace_event_call * call,struct module * mod)2532 __register_event(struct trace_event_call *call, struct module *mod)
2533 {
2534 int ret;
2535
2536 ret = event_init(call);
2537 if (ret < 0)
2538 return ret;
2539
2540 list_add(&call->list, &ftrace_events);
2541 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
2542 atomic_set(&call->refcnt, 0);
2543 else
2544 call->module = mod;
2545
2546 return 0;
2547 }
2548
eval_replace(char * ptr,struct trace_eval_map * map,int len)2549 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
2550 {
2551 int rlen;
2552 int elen;
2553
2554 /* Find the length of the eval value as a string */
2555 elen = snprintf(ptr, 0, "%ld", map->eval_value);
2556 /* Make sure there's enough room to replace the string with the value */
2557 if (len < elen)
2558 return NULL;
2559
2560 snprintf(ptr, elen + 1, "%ld", map->eval_value);
2561
2562 /* Get the rest of the string of ptr */
2563 rlen = strlen(ptr + len);
2564 memmove(ptr + elen, ptr + len, rlen);
2565 /* Make sure we end the new string */
2566 ptr[elen + rlen] = 0;
2567
2568 return ptr + elen;
2569 }
2570
update_event_printk(struct trace_event_call * call,struct trace_eval_map * map)2571 static void update_event_printk(struct trace_event_call *call,
2572 struct trace_eval_map *map)
2573 {
2574 char *ptr;
2575 int quote = 0;
2576 int len = strlen(map->eval_string);
2577
2578 for (ptr = call->print_fmt; *ptr; ptr++) {
2579 if (*ptr == '\\') {
2580 ptr++;
2581 /* paranoid */
2582 if (!*ptr)
2583 break;
2584 continue;
2585 }
2586 if (*ptr == '"') {
2587 quote ^= 1;
2588 continue;
2589 }
2590 if (quote)
2591 continue;
2592 if (isdigit(*ptr)) {
2593 /* skip numbers */
2594 do {
2595 ptr++;
2596 /* Check for alpha chars like ULL */
2597 } while (isalnum(*ptr));
2598 if (!*ptr)
2599 break;
2600 /*
2601 * A number must have some kind of delimiter after
2602 * it, and we can ignore that too.
2603 */
2604 continue;
2605 }
2606 if (isalpha(*ptr) || *ptr == '_') {
2607 if (strncmp(map->eval_string, ptr, len) == 0 &&
2608 !isalnum(ptr[len]) && ptr[len] != '_') {
2609 ptr = eval_replace(ptr, map, len);
2610 /* enum/sizeof string smaller than value */
2611 if (WARN_ON_ONCE(!ptr))
2612 return;
2613 /*
2614 * No need to decrement here, as eval_replace()
2615 * returns the pointer to the character passed
2616 * the eval, and two evals can not be placed
2617 * back to back without something in between.
2618 * We can skip that something in between.
2619 */
2620 continue;
2621 }
2622 skip_more:
2623 do {
2624 ptr++;
2625 } while (isalnum(*ptr) || *ptr == '_');
2626 if (!*ptr)
2627 break;
2628 /*
2629 * If what comes after this variable is a '.' or
2630 * '->' then we can continue to ignore that string.
2631 */
2632 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2633 ptr += *ptr == '.' ? 1 : 2;
2634 if (!*ptr)
2635 break;
2636 goto skip_more;
2637 }
2638 /*
2639 * Once again, we can skip the delimiter that came
2640 * after the string.
2641 */
2642 continue;
2643 }
2644 }
2645 }
2646
add_str_to_module(struct module * module,char * str)2647 static void add_str_to_module(struct module *module, char *str)
2648 {
2649 struct module_string *modstr;
2650
2651 modstr = kmalloc(sizeof(*modstr), GFP_KERNEL);
2652
2653 /*
2654 * If we failed to allocate memory here, then we'll just
2655 * let the str memory leak when the module is removed.
2656 * If this fails to allocate, there's worse problems than
2657 * a leaked string on module removal.
2658 */
2659 if (WARN_ON_ONCE(!modstr))
2660 return;
2661
2662 modstr->module = module;
2663 modstr->str = str;
2664
2665 list_add(&modstr->next, &module_strings);
2666 }
2667
update_event_fields(struct trace_event_call * call,struct trace_eval_map * map)2668 static void update_event_fields(struct trace_event_call *call,
2669 struct trace_eval_map *map)
2670 {
2671 struct ftrace_event_field *field;
2672 struct list_head *head;
2673 char *ptr;
2674 char *str;
2675 int len = strlen(map->eval_string);
2676
2677 /* Dynamic events should never have field maps */
2678 if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC))
2679 return;
2680
2681 head = trace_get_fields(call);
2682 list_for_each_entry(field, head, link) {
2683 ptr = strchr(field->type, '[');
2684 if (!ptr)
2685 continue;
2686 ptr++;
2687
2688 if (!isalpha(*ptr) && *ptr != '_')
2689 continue;
2690
2691 if (strncmp(map->eval_string, ptr, len) != 0)
2692 continue;
2693
2694 str = kstrdup(field->type, GFP_KERNEL);
2695 if (WARN_ON_ONCE(!str))
2696 return;
2697 ptr = str + (ptr - field->type);
2698 ptr = eval_replace(ptr, map, len);
2699 /* enum/sizeof string smaller than value */
2700 if (WARN_ON_ONCE(!ptr)) {
2701 kfree(str);
2702 continue;
2703 }
2704
2705 /*
2706 * If the event is part of a module, then we need to free the string
2707 * when the module is removed. Otherwise, it will stay allocated
2708 * until a reboot.
2709 */
2710 if (call->module)
2711 add_str_to_module(call->module, str);
2712
2713 field->type = str;
2714 }
2715 }
2716
trace_event_eval_update(struct trace_eval_map ** map,int len)2717 void trace_event_eval_update(struct trace_eval_map **map, int len)
2718 {
2719 struct trace_event_call *call, *p;
2720 const char *last_system = NULL;
2721 bool first = false;
2722 int last_i;
2723 int i;
2724
2725 down_write(&trace_event_sem);
2726 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2727 /* events are usually grouped together with systems */
2728 if (!last_system || call->class->system != last_system) {
2729 first = true;
2730 last_i = 0;
2731 last_system = call->class->system;
2732 }
2733
2734 /*
2735 * Since calls are grouped by systems, the likelihood that the
2736 * next call in the iteration belongs to the same system as the
2737 * previous call is high. As an optimization, we skip searching
2738 * for a map[] that matches the call's system if the last call
2739 * was from the same system. That's what last_i is for. If the
2740 * call has the same system as the previous call, then last_i
2741 * will be the index of the first map[] that has a matching
2742 * system.
2743 */
2744 for (i = last_i; i < len; i++) {
2745 if (call->class->system == map[i]->system) {
2746 /* Save the first system if need be */
2747 if (first) {
2748 last_i = i;
2749 first = false;
2750 }
2751 update_event_printk(call, map[i]);
2752 update_event_fields(call, map[i]);
2753 }
2754 }
2755 cond_resched();
2756 }
2757 up_write(&trace_event_sem);
2758 }
2759
2760 static struct trace_event_file *
trace_create_new_event(struct trace_event_call * call,struct trace_array * tr)2761 trace_create_new_event(struct trace_event_call *call,
2762 struct trace_array *tr)
2763 {
2764 struct trace_pid_list *no_pid_list;
2765 struct trace_pid_list *pid_list;
2766 struct trace_event_file *file;
2767
2768 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2769 if (!file)
2770 return NULL;
2771
2772 pid_list = rcu_dereference_protected(tr->filtered_pids,
2773 lockdep_is_held(&event_mutex));
2774 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2775 lockdep_is_held(&event_mutex));
2776
2777 if (pid_list || no_pid_list)
2778 file->flags |= EVENT_FILE_FL_PID_FILTER;
2779
2780 file->event_call = call;
2781 file->tr = tr;
2782 atomic_set(&file->sm_ref, 0);
2783 atomic_set(&file->tm_ref, 0);
2784 INIT_LIST_HEAD(&file->triggers);
2785 list_add(&file->list, &tr->events);
2786
2787 return file;
2788 }
2789
2790 /* Add an event to a trace directory */
2791 static int
__trace_add_new_event(struct trace_event_call * call,struct trace_array * tr)2792 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2793 {
2794 struct trace_event_file *file;
2795
2796 file = trace_create_new_event(call, tr);
2797 if (!file)
2798 return -ENOMEM;
2799
2800 if (eventdir_initialized)
2801 return event_create_dir(tr->event_dir, file);
2802 else
2803 return event_define_fields(call);
2804 }
2805
2806 /*
2807 * Just create a descriptor for early init. A descriptor is required
2808 * for enabling events at boot. We want to enable events before
2809 * the filesystem is initialized.
2810 */
2811 static int
__trace_early_add_new_event(struct trace_event_call * call,struct trace_array * tr)2812 __trace_early_add_new_event(struct trace_event_call *call,
2813 struct trace_array *tr)
2814 {
2815 struct trace_event_file *file;
2816
2817 file = trace_create_new_event(call, tr);
2818 if (!file)
2819 return -ENOMEM;
2820
2821 return event_define_fields(call);
2822 }
2823
2824 struct ftrace_module_file_ops;
2825 static void __add_event_to_tracers(struct trace_event_call *call);
2826
2827 /* Add an additional event_call dynamically */
trace_add_event_call(struct trace_event_call * call)2828 int trace_add_event_call(struct trace_event_call *call)
2829 {
2830 int ret;
2831 lockdep_assert_held(&event_mutex);
2832
2833 mutex_lock(&trace_types_lock);
2834
2835 ret = __register_event(call, NULL);
2836 if (ret >= 0)
2837 __add_event_to_tracers(call);
2838
2839 mutex_unlock(&trace_types_lock);
2840 return ret;
2841 }
2842
2843 /*
2844 * Must be called under locking of trace_types_lock, event_mutex and
2845 * trace_event_sem.
2846 */
__trace_remove_event_call(struct trace_event_call * call)2847 static void __trace_remove_event_call(struct trace_event_call *call)
2848 {
2849 event_remove(call);
2850 trace_destroy_fields(call);
2851 free_event_filter(call->filter);
2852 call->filter = NULL;
2853 }
2854
probe_remove_event_call(struct trace_event_call * call)2855 static int probe_remove_event_call(struct trace_event_call *call)
2856 {
2857 struct trace_array *tr;
2858 struct trace_event_file *file;
2859
2860 #ifdef CONFIG_PERF_EVENTS
2861 if (call->perf_refcount)
2862 return -EBUSY;
2863 #endif
2864 do_for_each_event_file(tr, file) {
2865 if (file->event_call != call)
2866 continue;
2867 /*
2868 * We can't rely on ftrace_event_enable_disable(enable => 0)
2869 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2870 * TRACE_REG_UNREGISTER.
2871 */
2872 if (file->flags & EVENT_FILE_FL_ENABLED)
2873 goto busy;
2874
2875 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2876 tr->clear_trace = true;
2877 /*
2878 * The do_for_each_event_file_safe() is
2879 * a double loop. After finding the call for this
2880 * trace_array, we use break to jump to the next
2881 * trace_array.
2882 */
2883 break;
2884 } while_for_each_event_file();
2885
2886 __trace_remove_event_call(call);
2887
2888 return 0;
2889 busy:
2890 /* No need to clear the trace now */
2891 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2892 tr->clear_trace = false;
2893 }
2894 return -EBUSY;
2895 }
2896
2897 /* Remove an event_call */
trace_remove_event_call(struct trace_event_call * call)2898 int trace_remove_event_call(struct trace_event_call *call)
2899 {
2900 int ret;
2901
2902 lockdep_assert_held(&event_mutex);
2903
2904 mutex_lock(&trace_types_lock);
2905 down_write(&trace_event_sem);
2906 ret = probe_remove_event_call(call);
2907 up_write(&trace_event_sem);
2908 mutex_unlock(&trace_types_lock);
2909
2910 return ret;
2911 }
2912
2913 #define for_each_event(event, start, end) \
2914 for (event = start; \
2915 (unsigned long)event < (unsigned long)end; \
2916 event++)
2917
2918 #ifdef CONFIG_MODULES
2919
trace_module_add_events(struct module * mod)2920 static void trace_module_add_events(struct module *mod)
2921 {
2922 struct trace_event_call **call, **start, **end;
2923
2924 if (!mod->num_trace_events)
2925 return;
2926
2927 /* Don't add infrastructure for mods without tracepoints */
2928 if (trace_module_has_bad_taint(mod)) {
2929 pr_err("%s: module has bad taint, not creating trace events\n",
2930 mod->name);
2931 return;
2932 }
2933
2934 start = mod->trace_events;
2935 end = mod->trace_events + mod->num_trace_events;
2936
2937 for_each_event(call, start, end) {
2938 __register_event(*call, mod);
2939 __add_event_to_tracers(*call);
2940 }
2941 }
2942
trace_module_remove_events(struct module * mod)2943 static void trace_module_remove_events(struct module *mod)
2944 {
2945 struct trace_event_call *call, *p;
2946 struct module_string *modstr, *m;
2947
2948 down_write(&trace_event_sem);
2949 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2950 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
2951 continue;
2952 if (call->module == mod)
2953 __trace_remove_event_call(call);
2954 }
2955 /* Check for any strings allocade for this module */
2956 list_for_each_entry_safe(modstr, m, &module_strings, next) {
2957 if (modstr->module != mod)
2958 continue;
2959 list_del(&modstr->next);
2960 kfree(modstr->str);
2961 kfree(modstr);
2962 }
2963 up_write(&trace_event_sem);
2964
2965 /*
2966 * It is safest to reset the ring buffer if the module being unloaded
2967 * registered any events that were used. The only worry is if
2968 * a new module gets loaded, and takes on the same id as the events
2969 * of this module. When printing out the buffer, traced events left
2970 * over from this module may be passed to the new module events and
2971 * unexpected results may occur.
2972 */
2973 tracing_reset_all_online_cpus_unlocked();
2974 }
2975
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)2976 static int trace_module_notify(struct notifier_block *self,
2977 unsigned long val, void *data)
2978 {
2979 struct module *mod = data;
2980
2981 mutex_lock(&event_mutex);
2982 mutex_lock(&trace_types_lock);
2983 switch (val) {
2984 case MODULE_STATE_COMING:
2985 trace_module_add_events(mod);
2986 break;
2987 case MODULE_STATE_GOING:
2988 trace_module_remove_events(mod);
2989 break;
2990 }
2991 mutex_unlock(&trace_types_lock);
2992 mutex_unlock(&event_mutex);
2993
2994 return NOTIFY_OK;
2995 }
2996
2997 static struct notifier_block trace_module_nb = {
2998 .notifier_call = trace_module_notify,
2999 .priority = 1, /* higher than trace.c module notify */
3000 };
3001 #endif /* CONFIG_MODULES */
3002
3003 /* Create a new event directory structure for a trace directory. */
3004 static void
__trace_add_event_dirs(struct trace_array * tr)3005 __trace_add_event_dirs(struct trace_array *tr)
3006 {
3007 struct trace_event_call *call;
3008 int ret;
3009
3010 list_for_each_entry(call, &ftrace_events, list) {
3011 ret = __trace_add_new_event(call, tr);
3012 if (ret < 0)
3013 pr_warn("Could not create directory for event %s\n",
3014 trace_event_name(call));
3015 }
3016 }
3017
3018 /* Returns any file that matches the system and event */
3019 struct trace_event_file *
__find_event_file(struct trace_array * tr,const char * system,const char * event)3020 __find_event_file(struct trace_array *tr, const char *system, const char *event)
3021 {
3022 struct trace_event_file *file;
3023 struct trace_event_call *call;
3024 const char *name;
3025
3026 list_for_each_entry(file, &tr->events, list) {
3027
3028 call = file->event_call;
3029 name = trace_event_name(call);
3030
3031 if (!name || !call->class)
3032 continue;
3033
3034 if (strcmp(event, name) == 0 &&
3035 strcmp(system, call->class->system) == 0)
3036 return file;
3037 }
3038 return NULL;
3039 }
3040
3041 /* Returns valid trace event files that match system and event */
3042 struct trace_event_file *
find_event_file(struct trace_array * tr,const char * system,const char * event)3043 find_event_file(struct trace_array *tr, const char *system, const char *event)
3044 {
3045 struct trace_event_file *file;
3046
3047 file = __find_event_file(tr, system, event);
3048 if (!file || !file->event_call->class->reg ||
3049 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
3050 return NULL;
3051
3052 return file;
3053 }
3054
3055 /**
3056 * trace_get_event_file - Find and return a trace event file
3057 * @instance: The name of the trace instance containing the event
3058 * @system: The name of the system containing the event
3059 * @event: The name of the event
3060 *
3061 * Return a trace event file given the trace instance name, trace
3062 * system, and trace event name. If the instance name is NULL, it
3063 * refers to the top-level trace array.
3064 *
3065 * This function will look it up and return it if found, after calling
3066 * trace_array_get() to prevent the instance from going away, and
3067 * increment the event's module refcount to prevent it from being
3068 * removed.
3069 *
3070 * To release the file, call trace_put_event_file(), which will call
3071 * trace_array_put() and decrement the event's module refcount.
3072 *
3073 * Return: The trace event on success, ERR_PTR otherwise.
3074 */
trace_get_event_file(const char * instance,const char * system,const char * event)3075 struct trace_event_file *trace_get_event_file(const char *instance,
3076 const char *system,
3077 const char *event)
3078 {
3079 struct trace_array *tr = top_trace_array();
3080 struct trace_event_file *file = NULL;
3081 int ret = -EINVAL;
3082
3083 if (instance) {
3084 tr = trace_array_find_get(instance);
3085 if (!tr)
3086 return ERR_PTR(-ENOENT);
3087 } else {
3088 ret = trace_array_get(tr);
3089 if (ret)
3090 return ERR_PTR(ret);
3091 }
3092
3093 mutex_lock(&event_mutex);
3094
3095 file = find_event_file(tr, system, event);
3096 if (!file) {
3097 trace_array_put(tr);
3098 ret = -EINVAL;
3099 goto out;
3100 }
3101
3102 /* Don't let event modules unload while in use */
3103 ret = trace_event_try_get_ref(file->event_call);
3104 if (!ret) {
3105 trace_array_put(tr);
3106 ret = -EBUSY;
3107 goto out;
3108 }
3109
3110 ret = 0;
3111 out:
3112 mutex_unlock(&event_mutex);
3113
3114 if (ret)
3115 file = ERR_PTR(ret);
3116
3117 return file;
3118 }
3119 EXPORT_SYMBOL_GPL(trace_get_event_file);
3120
3121 /**
3122 * trace_put_event_file - Release a file from trace_get_event_file()
3123 * @file: The trace event file
3124 *
3125 * If a file was retrieved using trace_get_event_file(), this should
3126 * be called when it's no longer needed. It will cancel the previous
3127 * trace_array_get() called by that function, and decrement the
3128 * event's module refcount.
3129 */
trace_put_event_file(struct trace_event_file * file)3130 void trace_put_event_file(struct trace_event_file *file)
3131 {
3132 mutex_lock(&event_mutex);
3133 trace_event_put_ref(file->event_call);
3134 mutex_unlock(&event_mutex);
3135
3136 trace_array_put(file->tr);
3137 }
3138 EXPORT_SYMBOL_GPL(trace_put_event_file);
3139
3140 #ifdef CONFIG_DYNAMIC_FTRACE
3141
3142 /* Avoid typos */
3143 #define ENABLE_EVENT_STR "enable_event"
3144 #define DISABLE_EVENT_STR "disable_event"
3145
3146 struct event_probe_data {
3147 struct trace_event_file *file;
3148 unsigned long count;
3149 int ref;
3150 bool enable;
3151 };
3152
update_event_probe(struct event_probe_data * data)3153 static void update_event_probe(struct event_probe_data *data)
3154 {
3155 if (data->enable)
3156 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3157 else
3158 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3159 }
3160
3161 static void
event_enable_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)3162 event_enable_probe(unsigned long ip, unsigned long parent_ip,
3163 struct trace_array *tr, struct ftrace_probe_ops *ops,
3164 void *data)
3165 {
3166 struct ftrace_func_mapper *mapper = data;
3167 struct event_probe_data *edata;
3168 void **pdata;
3169
3170 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3171 if (!pdata || !*pdata)
3172 return;
3173
3174 edata = *pdata;
3175 update_event_probe(edata);
3176 }
3177
3178 static void
event_enable_count_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)3179 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
3180 struct trace_array *tr, struct ftrace_probe_ops *ops,
3181 void *data)
3182 {
3183 struct ftrace_func_mapper *mapper = data;
3184 struct event_probe_data *edata;
3185 void **pdata;
3186
3187 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3188 if (!pdata || !*pdata)
3189 return;
3190
3191 edata = *pdata;
3192
3193 if (!edata->count)
3194 return;
3195
3196 /* Skip if the event is in a state we want to switch to */
3197 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
3198 return;
3199
3200 if (edata->count != -1)
3201 (edata->count)--;
3202
3203 update_event_probe(edata);
3204 }
3205
3206 static int
event_enable_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)3207 event_enable_print(struct seq_file *m, unsigned long ip,
3208 struct ftrace_probe_ops *ops, void *data)
3209 {
3210 struct ftrace_func_mapper *mapper = data;
3211 struct event_probe_data *edata;
3212 void **pdata;
3213
3214 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3215
3216 if (WARN_ON_ONCE(!pdata || !*pdata))
3217 return 0;
3218
3219 edata = *pdata;
3220
3221 seq_printf(m, "%ps:", (void *)ip);
3222
3223 seq_printf(m, "%s:%s:%s",
3224 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
3225 edata->file->event_call->class->system,
3226 trace_event_name(edata->file->event_call));
3227
3228 if (edata->count == -1)
3229 seq_puts(m, ":unlimited\n");
3230 else
3231 seq_printf(m, ":count=%ld\n", edata->count);
3232
3233 return 0;
3234 }
3235
3236 static int
event_enable_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)3237 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
3238 unsigned long ip, void *init_data, void **data)
3239 {
3240 struct ftrace_func_mapper *mapper = *data;
3241 struct event_probe_data *edata = init_data;
3242 int ret;
3243
3244 if (!mapper) {
3245 mapper = allocate_ftrace_func_mapper();
3246 if (!mapper)
3247 return -ENODEV;
3248 *data = mapper;
3249 }
3250
3251 ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
3252 if (ret < 0)
3253 return ret;
3254
3255 edata->ref++;
3256
3257 return 0;
3258 }
3259
free_probe_data(void * data)3260 static int free_probe_data(void *data)
3261 {
3262 struct event_probe_data *edata = data;
3263
3264 edata->ref--;
3265 if (!edata->ref) {
3266 /* Remove the SOFT_MODE flag */
3267 __ftrace_event_enable_disable(edata->file, 0, 1);
3268 trace_event_put_ref(edata->file->event_call);
3269 kfree(edata);
3270 }
3271 return 0;
3272 }
3273
3274 static void
event_enable_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)3275 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
3276 unsigned long ip, void *data)
3277 {
3278 struct ftrace_func_mapper *mapper = data;
3279 struct event_probe_data *edata;
3280
3281 if (!ip) {
3282 if (!mapper)
3283 return;
3284 free_ftrace_func_mapper(mapper, free_probe_data);
3285 return;
3286 }
3287
3288 edata = ftrace_func_mapper_remove_ip(mapper, ip);
3289
3290 if (WARN_ON_ONCE(!edata))
3291 return;
3292
3293 if (WARN_ON_ONCE(edata->ref <= 0))
3294 return;
3295
3296 free_probe_data(edata);
3297 }
3298
3299 static struct ftrace_probe_ops event_enable_probe_ops = {
3300 .func = event_enable_probe,
3301 .print = event_enable_print,
3302 .init = event_enable_init,
3303 .free = event_enable_free,
3304 };
3305
3306 static struct ftrace_probe_ops event_enable_count_probe_ops = {
3307 .func = event_enable_count_probe,
3308 .print = event_enable_print,
3309 .init = event_enable_init,
3310 .free = event_enable_free,
3311 };
3312
3313 static struct ftrace_probe_ops event_disable_probe_ops = {
3314 .func = event_enable_probe,
3315 .print = event_enable_print,
3316 .init = event_enable_init,
3317 .free = event_enable_free,
3318 };
3319
3320 static struct ftrace_probe_ops event_disable_count_probe_ops = {
3321 .func = event_enable_count_probe,
3322 .print = event_enable_print,
3323 .init = event_enable_init,
3324 .free = event_enable_free,
3325 };
3326
3327 static int
event_enable_func(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enabled)3328 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3329 char *glob, char *cmd, char *param, int enabled)
3330 {
3331 struct trace_event_file *file;
3332 struct ftrace_probe_ops *ops;
3333 struct event_probe_data *data;
3334 const char *system;
3335 const char *event;
3336 char *number;
3337 bool enable;
3338 int ret;
3339
3340 if (!tr)
3341 return -ENODEV;
3342
3343 /* hash funcs only work with set_ftrace_filter */
3344 if (!enabled || !param)
3345 return -EINVAL;
3346
3347 system = strsep(¶m, ":");
3348 if (!param)
3349 return -EINVAL;
3350
3351 event = strsep(¶m, ":");
3352
3353 mutex_lock(&event_mutex);
3354
3355 ret = -EINVAL;
3356 file = find_event_file(tr, system, event);
3357 if (!file)
3358 goto out;
3359
3360 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
3361
3362 if (enable)
3363 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
3364 else
3365 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
3366
3367 if (glob[0] == '!') {
3368 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
3369 goto out;
3370 }
3371
3372 ret = -ENOMEM;
3373
3374 data = kzalloc(sizeof(*data), GFP_KERNEL);
3375 if (!data)
3376 goto out;
3377
3378 data->enable = enable;
3379 data->count = -1;
3380 data->file = file;
3381
3382 if (!param)
3383 goto out_reg;
3384
3385 number = strsep(¶m, ":");
3386
3387 ret = -EINVAL;
3388 if (!strlen(number))
3389 goto out_free;
3390
3391 /*
3392 * We use the callback data field (which is a pointer)
3393 * as our counter.
3394 */
3395 ret = kstrtoul(number, 0, &data->count);
3396 if (ret)
3397 goto out_free;
3398
3399 out_reg:
3400 /* Don't let event modules unload while probe registered */
3401 ret = trace_event_try_get_ref(file->event_call);
3402 if (!ret) {
3403 ret = -EBUSY;
3404 goto out_free;
3405 }
3406
3407 ret = __ftrace_event_enable_disable(file, 1, 1);
3408 if (ret < 0)
3409 goto out_put;
3410
3411 ret = register_ftrace_function_probe(glob, tr, ops, data);
3412 /*
3413 * The above returns on success the # of functions enabled,
3414 * but if it didn't find any functions it returns zero.
3415 * Consider no functions a failure too.
3416 */
3417 if (!ret) {
3418 ret = -ENOENT;
3419 goto out_disable;
3420 } else if (ret < 0)
3421 goto out_disable;
3422 /* Just return zero, not the number of enabled functions */
3423 ret = 0;
3424 out:
3425 mutex_unlock(&event_mutex);
3426 return ret;
3427
3428 out_disable:
3429 __ftrace_event_enable_disable(file, 0, 1);
3430 out_put:
3431 trace_event_put_ref(file->event_call);
3432 out_free:
3433 kfree(data);
3434 goto out;
3435 }
3436
3437 static struct ftrace_func_command event_enable_cmd = {
3438 .name = ENABLE_EVENT_STR,
3439 .func = event_enable_func,
3440 };
3441
3442 static struct ftrace_func_command event_disable_cmd = {
3443 .name = DISABLE_EVENT_STR,
3444 .func = event_enable_func,
3445 };
3446
register_event_cmds(void)3447 static __init int register_event_cmds(void)
3448 {
3449 int ret;
3450
3451 ret = register_ftrace_command(&event_enable_cmd);
3452 if (WARN_ON(ret < 0))
3453 return ret;
3454 ret = register_ftrace_command(&event_disable_cmd);
3455 if (WARN_ON(ret < 0))
3456 unregister_ftrace_command(&event_enable_cmd);
3457 return ret;
3458 }
3459 #else
register_event_cmds(void)3460 static inline int register_event_cmds(void) { return 0; }
3461 #endif /* CONFIG_DYNAMIC_FTRACE */
3462
3463 /*
3464 * The top level array and trace arrays created by boot-time tracing
3465 * have already had its trace_event_file descriptors created in order
3466 * to allow for early events to be recorded.
3467 * This function is called after the tracefs has been initialized,
3468 * and we now have to create the files associated to the events.
3469 */
__trace_early_add_event_dirs(struct trace_array * tr)3470 static void __trace_early_add_event_dirs(struct trace_array *tr)
3471 {
3472 struct trace_event_file *file;
3473 int ret;
3474
3475
3476 list_for_each_entry(file, &tr->events, list) {
3477 ret = event_create_dir(tr->event_dir, file);
3478 if (ret < 0)
3479 pr_warn("Could not create directory for event %s\n",
3480 trace_event_name(file->event_call));
3481 }
3482 }
3483
3484 /*
3485 * For early boot up, the top trace array and the trace arrays created
3486 * by boot-time tracing require to have a list of events that can be
3487 * enabled. This must be done before the filesystem is set up in order
3488 * to allow events to be traced early.
3489 */
__trace_early_add_events(struct trace_array * tr)3490 void __trace_early_add_events(struct trace_array *tr)
3491 {
3492 struct trace_event_call *call;
3493 int ret;
3494
3495 list_for_each_entry(call, &ftrace_events, list) {
3496 /* Early boot up should not have any modules loaded */
3497 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
3498 WARN_ON_ONCE(call->module))
3499 continue;
3500
3501 ret = __trace_early_add_new_event(call, tr);
3502 if (ret < 0)
3503 pr_warn("Could not create early event %s\n",
3504 trace_event_name(call));
3505 }
3506 }
3507
3508 /* Remove the event directory structure for a trace directory. */
3509 static void
__trace_remove_event_dirs(struct trace_array * tr)3510 __trace_remove_event_dirs(struct trace_array *tr)
3511 {
3512 struct trace_event_file *file, *next;
3513
3514 list_for_each_entry_safe(file, next, &tr->events, list)
3515 remove_event_file_dir(file);
3516 }
3517
__add_event_to_tracers(struct trace_event_call * call)3518 static void __add_event_to_tracers(struct trace_event_call *call)
3519 {
3520 struct trace_array *tr;
3521
3522 list_for_each_entry(tr, &ftrace_trace_arrays, list)
3523 __trace_add_new_event(call, tr);
3524 }
3525
3526 extern struct trace_event_call *__start_ftrace_events[];
3527 extern struct trace_event_call *__stop_ftrace_events[];
3528
3529 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
3530
setup_trace_event(char * str)3531 static __init int setup_trace_event(char *str)
3532 {
3533 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
3534 ring_buffer_expanded = true;
3535 disable_tracing_selftest("running event tracing");
3536
3537 return 1;
3538 }
3539 __setup("trace_event=", setup_trace_event);
3540
3541 /* Expects to have event_mutex held when called */
3542 static int
create_event_toplevel_files(struct dentry * parent,struct trace_array * tr)3543 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
3544 {
3545 struct dentry *d_events;
3546 struct dentry *entry;
3547
3548 entry = tracefs_create_file("set_event", 0644, parent,
3549 tr, &ftrace_set_event_fops);
3550 if (!entry) {
3551 pr_warn("Could not create tracefs 'set_event' entry\n");
3552 return -ENOMEM;
3553 }
3554
3555 d_events = tracefs_create_dir("events", parent);
3556 if (!d_events) {
3557 pr_warn("Could not create tracefs 'events' directory\n");
3558 return -ENOMEM;
3559 }
3560
3561 entry = trace_create_file("enable", 0644, d_events,
3562 tr, &ftrace_tr_enable_fops);
3563 if (!entry) {
3564 pr_warn("Could not create tracefs 'enable' entry\n");
3565 return -ENOMEM;
3566 }
3567
3568 /* There are not as crucial, just warn if they are not created */
3569
3570 entry = tracefs_create_file("set_event_pid", 0644, parent,
3571 tr, &ftrace_set_event_pid_fops);
3572 if (!entry)
3573 pr_warn("Could not create tracefs 'set_event_pid' entry\n");
3574
3575 entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
3576 tr, &ftrace_set_event_notrace_pid_fops);
3577 if (!entry)
3578 pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
3579
3580 /* ring buffer internal formats */
3581 entry = trace_create_file("header_page", 0444, d_events,
3582 ring_buffer_print_page_header,
3583 &ftrace_show_header_fops);
3584 if (!entry)
3585 pr_warn("Could not create tracefs 'header_page' entry\n");
3586
3587 entry = trace_create_file("header_event", 0444, d_events,
3588 ring_buffer_print_entry_header,
3589 &ftrace_show_header_fops);
3590 if (!entry)
3591 pr_warn("Could not create tracefs 'header_event' entry\n");
3592
3593 tr->event_dir = d_events;
3594
3595 return 0;
3596 }
3597
3598 /**
3599 * event_trace_add_tracer - add a instance of a trace_array to events
3600 * @parent: The parent dentry to place the files/directories for events in
3601 * @tr: The trace array associated with these events
3602 *
3603 * When a new instance is created, it needs to set up its events
3604 * directory, as well as other files associated with events. It also
3605 * creates the event hierarchy in the @parent/events directory.
3606 *
3607 * Returns 0 on success.
3608 *
3609 * Must be called with event_mutex held.
3610 */
event_trace_add_tracer(struct dentry * parent,struct trace_array * tr)3611 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
3612 {
3613 int ret;
3614
3615 lockdep_assert_held(&event_mutex);
3616
3617 ret = create_event_toplevel_files(parent, tr);
3618 if (ret)
3619 goto out;
3620
3621 down_write(&trace_event_sem);
3622 /* If tr already has the event list, it is initialized in early boot. */
3623 if (unlikely(!list_empty(&tr->events)))
3624 __trace_early_add_event_dirs(tr);
3625 else
3626 __trace_add_event_dirs(tr);
3627 up_write(&trace_event_sem);
3628
3629 out:
3630 return ret;
3631 }
3632
3633 /*
3634 * The top trace array already had its file descriptors created.
3635 * Now the files themselves need to be created.
3636 */
3637 static __init int
early_event_add_tracer(struct dentry * parent,struct trace_array * tr)3638 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3639 {
3640 int ret;
3641
3642 mutex_lock(&event_mutex);
3643
3644 ret = create_event_toplevel_files(parent, tr);
3645 if (ret)
3646 goto out_unlock;
3647
3648 down_write(&trace_event_sem);
3649 __trace_early_add_event_dirs(tr);
3650 up_write(&trace_event_sem);
3651
3652 out_unlock:
3653 mutex_unlock(&event_mutex);
3654
3655 return ret;
3656 }
3657
3658 /* Must be called with event_mutex held */
event_trace_del_tracer(struct trace_array * tr)3659 int event_trace_del_tracer(struct trace_array *tr)
3660 {
3661 lockdep_assert_held(&event_mutex);
3662
3663 /* Disable any event triggers and associated soft-disabled events */
3664 clear_event_triggers(tr);
3665
3666 /* Clear the pid list */
3667 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
3668
3669 /* Disable any running events */
3670 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3671
3672 /* Make sure no more events are being executed */
3673 tracepoint_synchronize_unregister();
3674
3675 down_write(&trace_event_sem);
3676 __trace_remove_event_dirs(tr);
3677 tracefs_remove(tr->event_dir);
3678 up_write(&trace_event_sem);
3679
3680 tr->event_dir = NULL;
3681
3682 return 0;
3683 }
3684
event_trace_memsetup(void)3685 static __init int event_trace_memsetup(void)
3686 {
3687 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3688 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3689 return 0;
3690 }
3691
3692 static __init void
early_enable_events(struct trace_array * tr,bool disable_first)3693 early_enable_events(struct trace_array *tr, bool disable_first)
3694 {
3695 char *buf = bootup_event_buf;
3696 char *token;
3697 int ret;
3698
3699 while (true) {
3700 token = strsep(&buf, ",");
3701
3702 if (!token)
3703 break;
3704
3705 if (*token) {
3706 /* Restarting syscalls requires that we stop them first */
3707 if (disable_first)
3708 ftrace_set_clr_event(tr, token, 0);
3709
3710 ret = ftrace_set_clr_event(tr, token, 1);
3711 if (ret)
3712 pr_warn("Failed to enable trace event: %s\n", token);
3713 }
3714
3715 /* Put back the comma to allow this to be called again */
3716 if (buf)
3717 *(buf - 1) = ',';
3718 }
3719 }
3720
event_trace_enable(void)3721 static __init int event_trace_enable(void)
3722 {
3723 struct trace_array *tr = top_trace_array();
3724 struct trace_event_call **iter, *call;
3725 int ret;
3726
3727 if (!tr)
3728 return -ENODEV;
3729
3730 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3731
3732 call = *iter;
3733 ret = event_init(call);
3734 if (!ret)
3735 list_add(&call->list, &ftrace_events);
3736 }
3737
3738 /*
3739 * We need the top trace array to have a working set of trace
3740 * points at early init, before the debug files and directories
3741 * are created. Create the file entries now, and attach them
3742 * to the actual file dentries later.
3743 */
3744 __trace_early_add_events(tr);
3745
3746 early_enable_events(tr, false);
3747
3748 trace_printk_start_comm();
3749
3750 register_event_cmds();
3751
3752 register_trigger_cmds();
3753
3754 return 0;
3755 }
3756
3757 /*
3758 * event_trace_enable() is called from trace_event_init() first to
3759 * initialize events and perhaps start any events that are on the
3760 * command line. Unfortunately, there are some events that will not
3761 * start this early, like the system call tracepoints that need
3762 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
3763 * event_trace_enable() is called before pid 1 starts, and this flag
3764 * is never set, making the syscall tracepoint never get reached, but
3765 * the event is enabled regardless (and not doing anything).
3766 */
event_trace_enable_again(void)3767 static __init int event_trace_enable_again(void)
3768 {
3769 struct trace_array *tr;
3770
3771 tr = top_trace_array();
3772 if (!tr)
3773 return -ENODEV;
3774
3775 early_enable_events(tr, true);
3776
3777 return 0;
3778 }
3779
3780 early_initcall(event_trace_enable_again);
3781
3782 /* Init fields which doesn't related to the tracefs */
event_trace_init_fields(void)3783 static __init int event_trace_init_fields(void)
3784 {
3785 if (trace_define_generic_fields())
3786 pr_warn("tracing: Failed to allocated generic fields");
3787
3788 if (trace_define_common_fields())
3789 pr_warn("tracing: Failed to allocate common fields");
3790
3791 return 0;
3792 }
3793
event_trace_init(void)3794 __init int event_trace_init(void)
3795 {
3796 struct trace_array *tr;
3797 struct dentry *entry;
3798 int ret;
3799
3800 tr = top_trace_array();
3801 if (!tr)
3802 return -ENODEV;
3803
3804 entry = tracefs_create_file("available_events", 0444, NULL,
3805 tr, &ftrace_avail_fops);
3806 if (!entry)
3807 pr_warn("Could not create tracefs 'available_events' entry\n");
3808
3809 ret = early_event_add_tracer(NULL, tr);
3810 if (ret)
3811 return ret;
3812
3813 #ifdef CONFIG_MODULES
3814 ret = register_module_notifier(&trace_module_nb);
3815 if (ret)
3816 pr_warn("Failed to register trace events module notifier\n");
3817 #endif
3818
3819 eventdir_initialized = true;
3820
3821 return 0;
3822 }
3823
trace_event_init(void)3824 void __init trace_event_init(void)
3825 {
3826 event_trace_memsetup();
3827 init_ftrace_syscalls();
3828 event_trace_enable();
3829 event_trace_init_fields();
3830 }
3831
3832 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
3833
3834 static DEFINE_SPINLOCK(test_spinlock);
3835 static DEFINE_SPINLOCK(test_spinlock_irq);
3836 static DEFINE_MUTEX(test_mutex);
3837
test_work(struct work_struct * dummy)3838 static __init void test_work(struct work_struct *dummy)
3839 {
3840 spin_lock(&test_spinlock);
3841 spin_lock_irq(&test_spinlock_irq);
3842 udelay(1);
3843 spin_unlock_irq(&test_spinlock_irq);
3844 spin_unlock(&test_spinlock);
3845
3846 mutex_lock(&test_mutex);
3847 msleep(1);
3848 mutex_unlock(&test_mutex);
3849 }
3850
event_test_thread(void * unused)3851 static __init int event_test_thread(void *unused)
3852 {
3853 void *test_malloc;
3854
3855 test_malloc = kmalloc(1234, GFP_KERNEL);
3856 if (!test_malloc)
3857 pr_info("failed to kmalloc\n");
3858
3859 schedule_on_each_cpu(test_work);
3860
3861 kfree(test_malloc);
3862
3863 set_current_state(TASK_INTERRUPTIBLE);
3864 while (!kthread_should_stop()) {
3865 schedule();
3866 set_current_state(TASK_INTERRUPTIBLE);
3867 }
3868 __set_current_state(TASK_RUNNING);
3869
3870 return 0;
3871 }
3872
3873 /*
3874 * Do various things that may trigger events.
3875 */
event_test_stuff(void)3876 static __init void event_test_stuff(void)
3877 {
3878 struct task_struct *test_thread;
3879
3880 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3881 msleep(1);
3882 kthread_stop(test_thread);
3883 }
3884
3885 /*
3886 * For every trace event defined, we will test each trace point separately,
3887 * and then by groups, and finally all trace points.
3888 */
event_trace_self_tests(void)3889 static __init void event_trace_self_tests(void)
3890 {
3891 struct trace_subsystem_dir *dir;
3892 struct trace_event_file *file;
3893 struct trace_event_call *call;
3894 struct event_subsystem *system;
3895 struct trace_array *tr;
3896 int ret;
3897
3898 tr = top_trace_array();
3899 if (!tr)
3900 return;
3901
3902 pr_info("Running tests on trace events:\n");
3903
3904 list_for_each_entry(file, &tr->events, list) {
3905
3906 call = file->event_call;
3907
3908 /* Only test those that have a probe */
3909 if (!call->class || !call->class->probe)
3910 continue;
3911
3912 /*
3913 * Testing syscall events here is pretty useless, but
3914 * we still do it if configured. But this is time consuming.
3915 * What we really need is a user thread to perform the
3916 * syscalls as we test.
3917 */
3918 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3919 if (call->class->system &&
3920 strcmp(call->class->system, "syscalls") == 0)
3921 continue;
3922 #endif
3923
3924 pr_info("Testing event %s: ", trace_event_name(call));
3925
3926 /*
3927 * If an event is already enabled, someone is using
3928 * it and the self test should not be on.
3929 */
3930 if (file->flags & EVENT_FILE_FL_ENABLED) {
3931 pr_warn("Enabled event during self test!\n");
3932 WARN_ON_ONCE(1);
3933 continue;
3934 }
3935
3936 ftrace_event_enable_disable(file, 1);
3937 event_test_stuff();
3938 ftrace_event_enable_disable(file, 0);
3939
3940 pr_cont("OK\n");
3941 }
3942
3943 /* Now test at the sub system level */
3944
3945 pr_info("Running tests on trace event systems:\n");
3946
3947 list_for_each_entry(dir, &tr->systems, list) {
3948
3949 system = dir->subsystem;
3950
3951 /* the ftrace system is special, skip it */
3952 if (strcmp(system->name, "ftrace") == 0)
3953 continue;
3954
3955 pr_info("Testing event system %s: ", system->name);
3956
3957 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3958 if (WARN_ON_ONCE(ret)) {
3959 pr_warn("error enabling system %s\n",
3960 system->name);
3961 continue;
3962 }
3963
3964 event_test_stuff();
3965
3966 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3967 if (WARN_ON_ONCE(ret)) {
3968 pr_warn("error disabling system %s\n",
3969 system->name);
3970 continue;
3971 }
3972
3973 pr_cont("OK\n");
3974 }
3975
3976 /* Test with all events enabled */
3977
3978 pr_info("Running tests on all trace events:\n");
3979 pr_info("Testing all events: ");
3980
3981 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3982 if (WARN_ON_ONCE(ret)) {
3983 pr_warn("error enabling all events\n");
3984 return;
3985 }
3986
3987 event_test_stuff();
3988
3989 /* reset sysname */
3990 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3991 if (WARN_ON_ONCE(ret)) {
3992 pr_warn("error disabling all events\n");
3993 return;
3994 }
3995
3996 pr_cont("OK\n");
3997 }
3998
3999 #ifdef CONFIG_FUNCTION_TRACER
4000
4001 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
4002
4003 static struct trace_event_file event_trace_file __initdata;
4004
4005 static void __init
function_test_events_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * regs)4006 function_test_events_call(unsigned long ip, unsigned long parent_ip,
4007 struct ftrace_ops *op, struct ftrace_regs *regs)
4008 {
4009 struct trace_buffer *buffer;
4010 struct ring_buffer_event *event;
4011 struct ftrace_entry *entry;
4012 unsigned int trace_ctx;
4013 long disabled;
4014 int cpu;
4015
4016 trace_ctx = tracing_gen_ctx();
4017 preempt_disable_notrace();
4018 cpu = raw_smp_processor_id();
4019 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
4020
4021 if (disabled != 1)
4022 goto out;
4023
4024 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
4025 TRACE_FN, sizeof(*entry),
4026 trace_ctx);
4027 if (!event)
4028 goto out;
4029 entry = ring_buffer_event_data(event);
4030 entry->ip = ip;
4031 entry->parent_ip = parent_ip;
4032
4033 event_trigger_unlock_commit(&event_trace_file, buffer, event,
4034 entry, trace_ctx);
4035 out:
4036 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
4037 preempt_enable_notrace();
4038 }
4039
4040 static struct ftrace_ops trace_ops __initdata =
4041 {
4042 .func = function_test_events_call,
4043 };
4044
event_trace_self_test_with_function(void)4045 static __init void event_trace_self_test_with_function(void)
4046 {
4047 int ret;
4048
4049 event_trace_file.tr = top_trace_array();
4050 if (WARN_ON(!event_trace_file.tr))
4051 return;
4052
4053 ret = register_ftrace_function(&trace_ops);
4054 if (WARN_ON(ret < 0)) {
4055 pr_info("Failed to enable function tracer for event tests\n");
4056 return;
4057 }
4058 pr_info("Running tests again, along with the function tracer\n");
4059 event_trace_self_tests();
4060 unregister_ftrace_function(&trace_ops);
4061 }
4062 #else
event_trace_self_test_with_function(void)4063 static __init void event_trace_self_test_with_function(void)
4064 {
4065 }
4066 #endif
4067
event_trace_self_tests_init(void)4068 static __init int event_trace_self_tests_init(void)
4069 {
4070 if (!tracing_selftest_disabled) {
4071 event_trace_self_tests();
4072 event_trace_self_test_with_function();
4073 }
4074
4075 return 0;
4076 }
4077
4078 late_initcall(event_trace_self_tests_init);
4079
4080 #endif
4081