1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * event tracer
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 * - Added format output of fields of the trace point.
8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 *
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27 #include <trace/syscall.h>
28
29 #include <asm/setup.h>
30
31 #include "trace_output.h"
32
33 #undef TRACE_SYSTEM
34 #define TRACE_SYSTEM "TRACE_SYSTEM"
35
36 DEFINE_MUTEX(event_mutex);
37
38 LIST_HEAD(ftrace_events);
39 static LIST_HEAD(ftrace_generic_fields);
40 static LIST_HEAD(ftrace_common_fields);
41 static bool eventdir_initialized;
42
43 static LIST_HEAD(module_strings);
44
45 struct module_string {
46 struct list_head next;
47 struct module *module;
48 char *str;
49 };
50
51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
52
53 static struct kmem_cache *field_cachep;
54 static struct kmem_cache *file_cachep;
55
system_refcount(struct event_subsystem * system)56 static inline int system_refcount(struct event_subsystem *system)
57 {
58 return system->ref_count;
59 }
60
system_refcount_inc(struct event_subsystem * system)61 static int system_refcount_inc(struct event_subsystem *system)
62 {
63 return system->ref_count++;
64 }
65
system_refcount_dec(struct event_subsystem * system)66 static int system_refcount_dec(struct event_subsystem *system)
67 {
68 return --system->ref_count;
69 }
70
71 /* Double loops, do not use break, only goto's work */
72 #define do_for_each_event_file(tr, file) \
73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
74 list_for_each_entry(file, &tr->events, list)
75
76 #define do_for_each_event_file_safe(tr, file) \
77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
78 struct trace_event_file *___n; \
79 list_for_each_entry_safe(file, ___n, &tr->events, list)
80
81 #define while_for_each_event_file() \
82 }
83
84 static struct ftrace_event_field *
__find_event_field(struct list_head * head,const char * name)85 __find_event_field(struct list_head *head, const char *name)
86 {
87 struct ftrace_event_field *field;
88
89 list_for_each_entry(field, head, link) {
90 if (!strcmp(field->name, name))
91 return field;
92 }
93
94 return NULL;
95 }
96
97 struct ftrace_event_field *
trace_find_event_field(struct trace_event_call * call,char * name)98 trace_find_event_field(struct trace_event_call *call, char *name)
99 {
100 struct ftrace_event_field *field;
101 struct list_head *head;
102
103 head = trace_get_fields(call);
104 field = __find_event_field(head, name);
105 if (field)
106 return field;
107
108 field = __find_event_field(&ftrace_generic_fields, name);
109 if (field)
110 return field;
111
112 return __find_event_field(&ftrace_common_fields, name);
113 }
114
__trace_define_field(struct list_head * head,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)115 static int __trace_define_field(struct list_head *head, const char *type,
116 const char *name, int offset, int size,
117 int is_signed, int filter_type, int len,
118 int need_test)
119 {
120 struct ftrace_event_field *field;
121
122 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
123 if (!field)
124 return -ENOMEM;
125
126 field->name = name;
127 field->type = type;
128
129 if (filter_type == FILTER_OTHER)
130 field->filter_type = filter_assign_type(type);
131 else
132 field->filter_type = filter_type;
133
134 field->offset = offset;
135 field->size = size;
136 field->is_signed = is_signed;
137 field->needs_test = need_test;
138 field->len = len;
139
140 list_add(&field->link, head);
141
142 return 0;
143 }
144
trace_define_field(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)145 int trace_define_field(struct trace_event_call *call, const char *type,
146 const char *name, int offset, int size, int is_signed,
147 int filter_type)
148 {
149 struct list_head *head;
150
151 if (WARN_ON(!call->class))
152 return 0;
153
154 head = trace_get_fields(call);
155 return __trace_define_field(head, type, name, offset, size,
156 is_signed, filter_type, 0, 0);
157 }
158 EXPORT_SYMBOL_GPL(trace_define_field);
159
trace_define_field_ext(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)160 static int trace_define_field_ext(struct trace_event_call *call, const char *type,
161 const char *name, int offset, int size, int is_signed,
162 int filter_type, int len, int need_test)
163 {
164 struct list_head *head;
165
166 if (WARN_ON(!call->class))
167 return 0;
168
169 head = trace_get_fields(call);
170 return __trace_define_field(head, type, name, offset, size,
171 is_signed, filter_type, len, need_test);
172 }
173
174 #define __generic_field(type, item, filter_type) \
175 ret = __trace_define_field(&ftrace_generic_fields, #type, \
176 #item, 0, 0, is_signed_type(type), \
177 filter_type, 0, 0); \
178 if (ret) \
179 return ret;
180
181 #define __common_field(type, item) \
182 ret = __trace_define_field(&ftrace_common_fields, #type, \
183 "common_" #item, \
184 offsetof(typeof(ent), item), \
185 sizeof(ent.item), \
186 is_signed_type(type), FILTER_OTHER, \
187 0, 0); \
188 if (ret) \
189 return ret;
190
trace_define_generic_fields(void)191 static int trace_define_generic_fields(void)
192 {
193 int ret;
194
195 __generic_field(int, CPU, FILTER_CPU);
196 __generic_field(int, cpu, FILTER_CPU);
197 __generic_field(int, common_cpu, FILTER_CPU);
198 __generic_field(char *, COMM, FILTER_COMM);
199 __generic_field(char *, comm, FILTER_COMM);
200 __generic_field(char *, stacktrace, FILTER_STACKTRACE);
201 __generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
202
203 return ret;
204 }
205
trace_define_common_fields(void)206 static int trace_define_common_fields(void)
207 {
208 int ret;
209 struct trace_entry ent;
210
211 __common_field(unsigned short, type);
212 __common_field(unsigned char, flags);
213 /* Holds both preempt_count and migrate_disable */
214 __common_field(unsigned char, preempt_count);
215 __common_field(int, pid);
216
217 return ret;
218 }
219
trace_destroy_fields(struct trace_event_call * call)220 static void trace_destroy_fields(struct trace_event_call *call)
221 {
222 struct ftrace_event_field *field, *next;
223 struct list_head *head;
224
225 head = trace_get_fields(call);
226 list_for_each_entry_safe(field, next, head, link) {
227 list_del(&field->link);
228 kmem_cache_free(field_cachep, field);
229 }
230 }
231
232 /*
233 * run-time version of trace_event_get_offsets_<call>() that returns the last
234 * accessible offset of trace fields excluding __dynamic_array bytes
235 */
trace_event_get_offsets(struct trace_event_call * call)236 int trace_event_get_offsets(struct trace_event_call *call)
237 {
238 struct ftrace_event_field *tail;
239 struct list_head *head;
240
241 head = trace_get_fields(call);
242 /*
243 * head->next points to the last field with the largest offset,
244 * since it was added last by trace_define_field()
245 */
246 tail = list_first_entry(head, struct ftrace_event_field, link);
247 return tail->offset + tail->size;
248 }
249
250
find_event_field(const char * fmt,struct trace_event_call * call)251 static struct trace_event_fields *find_event_field(const char *fmt,
252 struct trace_event_call *call)
253 {
254 struct trace_event_fields *field = call->class->fields_array;
255 const char *p = fmt;
256 int len;
257
258 if (!(len = str_has_prefix(fmt, "REC->")))
259 return NULL;
260 fmt += len;
261 for (p = fmt; *p; p++) {
262 if (!isalnum(*p) && *p != '_')
263 break;
264 }
265 len = p - fmt;
266
267 for (; field->type; field++) {
268 if (strncmp(field->name, fmt, len) || field->name[len])
269 continue;
270
271 return field;
272 }
273 return NULL;
274 }
275
276 /*
277 * Check if the referenced field is an array and return true,
278 * as arrays are OK to dereference.
279 */
test_field(const char * fmt,struct trace_event_call * call)280 static bool test_field(const char *fmt, struct trace_event_call *call)
281 {
282 struct trace_event_fields *field;
283
284 field = find_event_field(fmt, call);
285 if (!field)
286 return false;
287
288 /* This is an array and is OK to dereference. */
289 return strchr(field->type, '[') != NULL;
290 }
291
292 /* Look for a string within an argument */
find_print_string(const char * arg,const char * str,const char * end)293 static bool find_print_string(const char *arg, const char *str, const char *end)
294 {
295 const char *r;
296
297 r = strstr(arg, str);
298 return r && r < end;
299 }
300
301 /* Return true if the argument pointer is safe */
process_pointer(const char * fmt,int len,struct trace_event_call * call)302 static bool process_pointer(const char *fmt, int len, struct trace_event_call *call)
303 {
304 const char *r, *e, *a;
305
306 e = fmt + len;
307
308 /* Find the REC-> in the argument */
309 r = strstr(fmt, "REC->");
310 if (r && r < e) {
311 /*
312 * Addresses of events on the buffer, or an array on the buffer is
313 * OK to dereference. There's ways to fool this, but
314 * this is to catch common mistakes, not malicious code.
315 */
316 a = strchr(fmt, '&');
317 if ((a && (a < r)) || test_field(r, call))
318 return true;
319 } else if (find_print_string(fmt, "__get_dynamic_array(", e)) {
320 return true;
321 } else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) {
322 return true;
323 } else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) {
324 return true;
325 } else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) {
326 return true;
327 } else if (find_print_string(fmt, "__get_sockaddr(", e)) {
328 return true;
329 } else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) {
330 return true;
331 }
332 return false;
333 }
334
335 /* Return true if the string is safe */
process_string(const char * fmt,int len,struct trace_event_call * call)336 static bool process_string(const char *fmt, int len, struct trace_event_call *call)
337 {
338 struct trace_event_fields *field;
339 const char *r, *e, *s;
340
341 e = fmt + len;
342
343 /*
344 * There are several helper functions that return strings.
345 * If the argument contains a function, then assume its field is valid.
346 * It is considered that the argument has a function if it has:
347 * alphanumeric or '_' before a parenthesis.
348 */
349 s = fmt;
350 do {
351 r = strstr(s, "(");
352 if (!r || r >= e)
353 break;
354 for (int i = 1; r - i >= s; i++) {
355 char ch = *(r - i);
356 if (isspace(ch))
357 continue;
358 if (isalnum(ch) || ch == '_')
359 return true;
360 /* Anything else, this isn't a function */
361 break;
362 }
363 /* A function could be wrapped in parethesis, try the next one */
364 s = r + 1;
365 } while (s < e);
366
367 /*
368 * Check for arrays. If the argument has: foo[REC->val]
369 * then it is very likely that foo is an array of strings
370 * that are safe to use.
371 */
372 r = strstr(s, "[");
373 if (r && r < e) {
374 r = strstr(r, "REC->");
375 if (r && r < e)
376 return true;
377 }
378
379 /*
380 * If there's any strings in the argument consider this arg OK as it
381 * could be: REC->field ? "foo" : "bar" and we don't want to get into
382 * verifying that logic here.
383 */
384 if (find_print_string(fmt, "\"", e))
385 return true;
386
387 /* Dereferenced strings are also valid like any other pointer */
388 if (process_pointer(fmt, len, call))
389 return true;
390
391 /* Make sure the field is found */
392 field = find_event_field(fmt, call);
393 if (!field)
394 return false;
395
396 /* Test this field's string before printing the event */
397 call->flags |= TRACE_EVENT_FL_TEST_STR;
398 field->needs_test = 1;
399
400 return true;
401 }
402
403 /*
404 * Examine the print fmt of the event looking for unsafe dereference
405 * pointers using %p* that could be recorded in the trace event and
406 * much later referenced after the pointer was freed. Dereferencing
407 * pointers are OK, if it is dereferenced into the event itself.
408 */
test_event_printk(struct trace_event_call * call)409 static void test_event_printk(struct trace_event_call *call)
410 {
411 u64 dereference_flags = 0;
412 u64 string_flags = 0;
413 bool first = true;
414 const char *fmt;
415 int parens = 0;
416 char in_quote = 0;
417 int start_arg = 0;
418 int arg = 0;
419 int i, e;
420
421 fmt = call->print_fmt;
422
423 if (!fmt)
424 return;
425
426 for (i = 0; fmt[i]; i++) {
427 switch (fmt[i]) {
428 case '\\':
429 i++;
430 if (!fmt[i])
431 return;
432 continue;
433 case '"':
434 case '\'':
435 /*
436 * The print fmt starts with a string that
437 * is processed first to find %p* usage,
438 * then after the first string, the print fmt
439 * contains arguments that are used to check
440 * if the dereferenced %p* usage is safe.
441 */
442 if (first) {
443 if (fmt[i] == '\'')
444 continue;
445 if (in_quote) {
446 arg = 0;
447 first = false;
448 /*
449 * If there was no %p* uses
450 * the fmt is OK.
451 */
452 if (!dereference_flags)
453 return;
454 }
455 }
456 if (in_quote) {
457 if (in_quote == fmt[i])
458 in_quote = 0;
459 } else {
460 in_quote = fmt[i];
461 }
462 continue;
463 case '%':
464 if (!first || !in_quote)
465 continue;
466 i++;
467 if (!fmt[i])
468 return;
469 switch (fmt[i]) {
470 case '%':
471 continue;
472 case 'p':
473 do_pointer:
474 /* Find dereferencing fields */
475 switch (fmt[i + 1]) {
476 case 'B': case 'R': case 'r':
477 case 'b': case 'M': case 'm':
478 case 'I': case 'i': case 'E':
479 case 'U': case 'V': case 'N':
480 case 'a': case 'd': case 'D':
481 case 'g': case 't': case 'C':
482 case 'O': case 'f':
483 if (WARN_ONCE(arg == 63,
484 "Too many args for event: %s",
485 trace_event_name(call)))
486 return;
487 dereference_flags |= 1ULL << arg;
488 }
489 break;
490 default:
491 {
492 bool star = false;
493 int j;
494
495 /* Increment arg if %*s exists. */
496 for (j = 0; fmt[i + j]; j++) {
497 if (isdigit(fmt[i + j]) ||
498 fmt[i + j] == '.')
499 continue;
500 if (fmt[i + j] == '*') {
501 star = true;
502 /* Handle %*pbl case */
503 if (!j && fmt[i + 1] == 'p') {
504 arg++;
505 i++;
506 goto do_pointer;
507 }
508 continue;
509 }
510 if ((fmt[i + j] == 's')) {
511 if (star)
512 arg++;
513 if (WARN_ONCE(arg == 63,
514 "Too many args for event: %s",
515 trace_event_name(call)))
516 return;
517 dereference_flags |= 1ULL << arg;
518 string_flags |= 1ULL << arg;
519 }
520 break;
521 }
522 break;
523 } /* default */
524
525 } /* switch */
526 arg++;
527 continue;
528 case '(':
529 if (in_quote)
530 continue;
531 parens++;
532 continue;
533 case ')':
534 if (in_quote)
535 continue;
536 parens--;
537 if (WARN_ONCE(parens < 0,
538 "Paren mismatch for event: %s\narg='%s'\n%*s",
539 trace_event_name(call),
540 fmt + start_arg,
541 (i - start_arg) + 5, "^"))
542 return;
543 continue;
544 case ',':
545 if (in_quote || parens)
546 continue;
547 e = i;
548 i++;
549 while (isspace(fmt[i]))
550 i++;
551
552 /*
553 * If start_arg is zero, then this is the start of the
554 * first argument. The processing of the argument happens
555 * when the end of the argument is found, as it needs to
556 * handle paranthesis and such.
557 */
558 if (!start_arg) {
559 start_arg = i;
560 /* Balance out the i++ in the for loop */
561 i--;
562 continue;
563 }
564
565 if (dereference_flags & (1ULL << arg)) {
566 if (string_flags & (1ULL << arg)) {
567 if (process_string(fmt + start_arg, e - start_arg, call))
568 dereference_flags &= ~(1ULL << arg);
569 } else if (process_pointer(fmt + start_arg, e - start_arg, call))
570 dereference_flags &= ~(1ULL << arg);
571 }
572
573 start_arg = i;
574 arg++;
575 /* Balance out the i++ in the for loop */
576 i--;
577 }
578 }
579
580 if (dereference_flags & (1ULL << arg)) {
581 if (string_flags & (1ULL << arg)) {
582 if (process_string(fmt + start_arg, i - start_arg, call))
583 dereference_flags &= ~(1ULL << arg);
584 } else if (process_pointer(fmt + start_arg, i - start_arg, call))
585 dereference_flags &= ~(1ULL << arg);
586 }
587
588 /*
589 * If you triggered the below warning, the trace event reported
590 * uses an unsafe dereference pointer %p*. As the data stored
591 * at the trace event time may no longer exist when the trace
592 * event is printed, dereferencing to the original source is
593 * unsafe. The source of the dereference must be copied into the
594 * event itself, and the dereference must access the copy instead.
595 */
596 if (WARN_ON_ONCE(dereference_flags)) {
597 arg = 1;
598 while (!(dereference_flags & 1)) {
599 dereference_flags >>= 1;
600 arg++;
601 }
602 pr_warn("event %s has unsafe dereference of argument %d\n",
603 trace_event_name(call), arg);
604 pr_warn("print_fmt: %s\n", fmt);
605 }
606 }
607
trace_event_raw_init(struct trace_event_call * call)608 int trace_event_raw_init(struct trace_event_call *call)
609 {
610 int id;
611
612 id = register_trace_event(&call->event);
613 if (!id)
614 return -ENODEV;
615
616 test_event_printk(call);
617
618 return 0;
619 }
620 EXPORT_SYMBOL_GPL(trace_event_raw_init);
621
trace_event_ignore_this_pid(struct trace_event_file * trace_file)622 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
623 {
624 struct trace_array *tr = trace_file->tr;
625 struct trace_array_cpu *data;
626 struct trace_pid_list *no_pid_list;
627 struct trace_pid_list *pid_list;
628
629 pid_list = rcu_dereference_raw(tr->filtered_pids);
630 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
631
632 if (!pid_list && !no_pid_list)
633 return false;
634
635 data = this_cpu_ptr(tr->array_buffer.data);
636
637 return data->ignore_pid;
638 }
639 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
640
trace_event_buffer_reserve(struct trace_event_buffer * fbuffer,struct trace_event_file * trace_file,unsigned long len)641 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
642 struct trace_event_file *trace_file,
643 unsigned long len)
644 {
645 struct trace_event_call *event_call = trace_file->event_call;
646
647 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
648 trace_event_ignore_this_pid(trace_file))
649 return NULL;
650
651 /*
652 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
653 * preemption (adding one to the preempt_count). Since we are
654 * interested in the preempt_count at the time the tracepoint was
655 * hit, we need to subtract one to offset the increment.
656 */
657 fbuffer->trace_ctx = tracing_gen_ctx_dec();
658 fbuffer->trace_file = trace_file;
659
660 fbuffer->event =
661 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
662 event_call->event.type, len,
663 fbuffer->trace_ctx);
664 if (!fbuffer->event)
665 return NULL;
666
667 fbuffer->regs = NULL;
668 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
669 return fbuffer->entry;
670 }
671 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
672
trace_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)673 int trace_event_reg(struct trace_event_call *call,
674 enum trace_reg type, void *data)
675 {
676 struct trace_event_file *file = data;
677
678 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
679 switch (type) {
680 case TRACE_REG_REGISTER:
681 return tracepoint_probe_register(call->tp,
682 call->class->probe,
683 file);
684 case TRACE_REG_UNREGISTER:
685 tracepoint_probe_unregister(call->tp,
686 call->class->probe,
687 file);
688 return 0;
689
690 #ifdef CONFIG_PERF_EVENTS
691 case TRACE_REG_PERF_REGISTER:
692 return tracepoint_probe_register(call->tp,
693 call->class->perf_probe,
694 call);
695 case TRACE_REG_PERF_UNREGISTER:
696 tracepoint_probe_unregister(call->tp,
697 call->class->perf_probe,
698 call);
699 return 0;
700 case TRACE_REG_PERF_OPEN:
701 case TRACE_REG_PERF_CLOSE:
702 case TRACE_REG_PERF_ADD:
703 case TRACE_REG_PERF_DEL:
704 return 0;
705 #endif
706 }
707 return 0;
708 }
709 EXPORT_SYMBOL_GPL(trace_event_reg);
710
trace_event_enable_cmd_record(bool enable)711 void trace_event_enable_cmd_record(bool enable)
712 {
713 struct trace_event_file *file;
714 struct trace_array *tr;
715
716 lockdep_assert_held(&event_mutex);
717
718 do_for_each_event_file(tr, file) {
719
720 if (!(file->flags & EVENT_FILE_FL_ENABLED))
721 continue;
722
723 if (enable) {
724 tracing_start_cmdline_record();
725 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
726 } else {
727 tracing_stop_cmdline_record();
728 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
729 }
730 } while_for_each_event_file();
731 }
732
trace_event_enable_tgid_record(bool enable)733 void trace_event_enable_tgid_record(bool enable)
734 {
735 struct trace_event_file *file;
736 struct trace_array *tr;
737
738 lockdep_assert_held(&event_mutex);
739
740 do_for_each_event_file(tr, file) {
741 if (!(file->flags & EVENT_FILE_FL_ENABLED))
742 continue;
743
744 if (enable) {
745 tracing_start_tgid_record();
746 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
747 } else {
748 tracing_stop_tgid_record();
749 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
750 &file->flags);
751 }
752 } while_for_each_event_file();
753 }
754
__ftrace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)755 static int __ftrace_event_enable_disable(struct trace_event_file *file,
756 int enable, int soft_disable)
757 {
758 struct trace_event_call *call = file->event_call;
759 struct trace_array *tr = file->tr;
760 int ret = 0;
761 int disable;
762
763 switch (enable) {
764 case 0:
765 /*
766 * When soft_disable is set and enable is cleared, the sm_ref
767 * reference counter is decremented. If it reaches 0, we want
768 * to clear the SOFT_DISABLED flag but leave the event in the
769 * state that it was. That is, if the event was enabled and
770 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
771 * is set we do not want the event to be enabled before we
772 * clear the bit.
773 *
774 * When soft_disable is not set but the SOFT_MODE flag is,
775 * we do nothing. Do not disable the tracepoint, otherwise
776 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
777 */
778 if (soft_disable) {
779 if (atomic_dec_return(&file->sm_ref) > 0)
780 break;
781 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
782 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
783 /* Disable use of trace_buffered_event */
784 trace_buffered_event_disable();
785 } else
786 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
787
788 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
789 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
790 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
791 tracing_stop_cmdline_record();
792 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
793 }
794
795 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
796 tracing_stop_tgid_record();
797 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
798 }
799
800 ret = call->class->reg(call, TRACE_REG_UNREGISTER, file);
801
802 WARN_ON_ONCE(ret);
803 }
804 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
805 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
806 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
807 else
808 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
809 break;
810 case 1:
811 /*
812 * When soft_disable is set and enable is set, we want to
813 * register the tracepoint for the event, but leave the event
814 * as is. That means, if the event was already enabled, we do
815 * nothing (but set SOFT_MODE). If the event is disabled, we
816 * set SOFT_DISABLED before enabling the event tracepoint, so
817 * it still seems to be disabled.
818 */
819 if (!soft_disable)
820 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
821 else {
822 if (atomic_inc_return(&file->sm_ref) > 1)
823 break;
824 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
825 /* Enable use of trace_buffered_event */
826 trace_buffered_event_enable();
827 }
828
829 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
830 bool cmd = false, tgid = false;
831
832 /* Keep the event disabled, when going to SOFT_MODE. */
833 if (soft_disable)
834 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
835
836 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
837 cmd = true;
838 tracing_start_cmdline_record();
839 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
840 }
841
842 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
843 tgid = true;
844 tracing_start_tgid_record();
845 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
846 }
847
848 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
849 if (ret) {
850 if (cmd)
851 tracing_stop_cmdline_record();
852 if (tgid)
853 tracing_stop_tgid_record();
854 pr_info("event trace: Could not enable event "
855 "%s\n", trace_event_name(call));
856 break;
857 }
858 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
859
860 /* WAS_ENABLED gets set but never cleared. */
861 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
862 }
863 break;
864 }
865
866 return ret;
867 }
868
trace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)869 int trace_event_enable_disable(struct trace_event_file *file,
870 int enable, int soft_disable)
871 {
872 return __ftrace_event_enable_disable(file, enable, soft_disable);
873 }
874
ftrace_event_enable_disable(struct trace_event_file * file,int enable)875 static int ftrace_event_enable_disable(struct trace_event_file *file,
876 int enable)
877 {
878 return __ftrace_event_enable_disable(file, enable, 0);
879 }
880
ftrace_clear_events(struct trace_array * tr)881 static void ftrace_clear_events(struct trace_array *tr)
882 {
883 struct trace_event_file *file;
884
885 mutex_lock(&event_mutex);
886 list_for_each_entry(file, &tr->events, list) {
887 ftrace_event_enable_disable(file, 0);
888 }
889 mutex_unlock(&event_mutex);
890 }
891
892 static void
event_filter_pid_sched_process_exit(void * data,struct task_struct * task)893 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
894 {
895 struct trace_pid_list *pid_list;
896 struct trace_array *tr = data;
897
898 pid_list = rcu_dereference_raw(tr->filtered_pids);
899 trace_filter_add_remove_task(pid_list, NULL, task);
900
901 pid_list = rcu_dereference_raw(tr->filtered_no_pids);
902 trace_filter_add_remove_task(pid_list, NULL, task);
903 }
904
905 static void
event_filter_pid_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)906 event_filter_pid_sched_process_fork(void *data,
907 struct task_struct *self,
908 struct task_struct *task)
909 {
910 struct trace_pid_list *pid_list;
911 struct trace_array *tr = data;
912
913 pid_list = rcu_dereference_sched(tr->filtered_pids);
914 trace_filter_add_remove_task(pid_list, self, task);
915
916 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
917 trace_filter_add_remove_task(pid_list, self, task);
918 }
919
trace_event_follow_fork(struct trace_array * tr,bool enable)920 void trace_event_follow_fork(struct trace_array *tr, bool enable)
921 {
922 if (enable) {
923 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
924 tr, INT_MIN);
925 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
926 tr, INT_MAX);
927 } else {
928 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
929 tr);
930 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
931 tr);
932 }
933 }
934
935 static void
event_filter_pid_sched_switch_probe_pre(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)936 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
937 struct task_struct *prev,
938 struct task_struct *next,
939 unsigned int prev_state)
940 {
941 struct trace_array *tr = data;
942 struct trace_pid_list *no_pid_list;
943 struct trace_pid_list *pid_list;
944 bool ret;
945
946 pid_list = rcu_dereference_sched(tr->filtered_pids);
947 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
948
949 /*
950 * Sched switch is funny, as we only want to ignore it
951 * in the notrace case if both prev and next should be ignored.
952 */
953 ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
954 trace_ignore_this_task(NULL, no_pid_list, next);
955
956 this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
957 (trace_ignore_this_task(pid_list, NULL, prev) &&
958 trace_ignore_this_task(pid_list, NULL, next)));
959 }
960
961 static void
event_filter_pid_sched_switch_probe_post(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)962 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
963 struct task_struct *prev,
964 struct task_struct *next,
965 unsigned int prev_state)
966 {
967 struct trace_array *tr = data;
968 struct trace_pid_list *no_pid_list;
969 struct trace_pid_list *pid_list;
970
971 pid_list = rcu_dereference_sched(tr->filtered_pids);
972 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
973
974 this_cpu_write(tr->array_buffer.data->ignore_pid,
975 trace_ignore_this_task(pid_list, no_pid_list, next));
976 }
977
978 static void
event_filter_pid_sched_wakeup_probe_pre(void * data,struct task_struct * task)979 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
980 {
981 struct trace_array *tr = data;
982 struct trace_pid_list *no_pid_list;
983 struct trace_pid_list *pid_list;
984
985 /* Nothing to do if we are already tracing */
986 if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
987 return;
988
989 pid_list = rcu_dereference_sched(tr->filtered_pids);
990 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
991
992 this_cpu_write(tr->array_buffer.data->ignore_pid,
993 trace_ignore_this_task(pid_list, no_pid_list, task));
994 }
995
996 static void
event_filter_pid_sched_wakeup_probe_post(void * data,struct task_struct * task)997 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
998 {
999 struct trace_array *tr = data;
1000 struct trace_pid_list *no_pid_list;
1001 struct trace_pid_list *pid_list;
1002
1003 /* Nothing to do if we are not tracing */
1004 if (this_cpu_read(tr->array_buffer.data->ignore_pid))
1005 return;
1006
1007 pid_list = rcu_dereference_sched(tr->filtered_pids);
1008 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1009
1010 /* Set tracing if current is enabled */
1011 this_cpu_write(tr->array_buffer.data->ignore_pid,
1012 trace_ignore_this_task(pid_list, no_pid_list, current));
1013 }
1014
unregister_pid_events(struct trace_array * tr)1015 static void unregister_pid_events(struct trace_array *tr)
1016 {
1017 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
1018 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
1019
1020 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
1021 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
1022
1023 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
1024 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
1025
1026 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
1027 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
1028 }
1029
__ftrace_clear_event_pids(struct trace_array * tr,int type)1030 static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
1031 {
1032 struct trace_pid_list *pid_list;
1033 struct trace_pid_list *no_pid_list;
1034 struct trace_event_file *file;
1035 int cpu;
1036
1037 pid_list = rcu_dereference_protected(tr->filtered_pids,
1038 lockdep_is_held(&event_mutex));
1039 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1040 lockdep_is_held(&event_mutex));
1041
1042 /* Make sure there's something to do */
1043 if (!pid_type_enabled(type, pid_list, no_pid_list))
1044 return;
1045
1046 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
1047 unregister_pid_events(tr);
1048
1049 list_for_each_entry(file, &tr->events, list) {
1050 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1051 }
1052
1053 for_each_possible_cpu(cpu)
1054 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
1055 }
1056
1057 if (type & TRACE_PIDS)
1058 rcu_assign_pointer(tr->filtered_pids, NULL);
1059
1060 if (type & TRACE_NO_PIDS)
1061 rcu_assign_pointer(tr->filtered_no_pids, NULL);
1062
1063 /* Wait till all users are no longer using pid filtering */
1064 tracepoint_synchronize_unregister();
1065
1066 if ((type & TRACE_PIDS) && pid_list)
1067 trace_pid_list_free(pid_list);
1068
1069 if ((type & TRACE_NO_PIDS) && no_pid_list)
1070 trace_pid_list_free(no_pid_list);
1071 }
1072
ftrace_clear_event_pids(struct trace_array * tr,int type)1073 static void ftrace_clear_event_pids(struct trace_array *tr, int type)
1074 {
1075 mutex_lock(&event_mutex);
1076 __ftrace_clear_event_pids(tr, type);
1077 mutex_unlock(&event_mutex);
1078 }
1079
__put_system(struct event_subsystem * system)1080 static void __put_system(struct event_subsystem *system)
1081 {
1082 struct event_filter *filter = system->filter;
1083
1084 WARN_ON_ONCE(system_refcount(system) == 0);
1085 if (system_refcount_dec(system))
1086 return;
1087
1088 list_del(&system->list);
1089
1090 if (filter) {
1091 kfree(filter->filter_string);
1092 kfree(filter);
1093 }
1094 kfree_const(system->name);
1095 kfree(system);
1096 }
1097
__get_system(struct event_subsystem * system)1098 static void __get_system(struct event_subsystem *system)
1099 {
1100 WARN_ON_ONCE(system_refcount(system) == 0);
1101 system_refcount_inc(system);
1102 }
1103
__get_system_dir(struct trace_subsystem_dir * dir)1104 static void __get_system_dir(struct trace_subsystem_dir *dir)
1105 {
1106 WARN_ON_ONCE(dir->ref_count == 0);
1107 dir->ref_count++;
1108 __get_system(dir->subsystem);
1109 }
1110
__put_system_dir(struct trace_subsystem_dir * dir)1111 static void __put_system_dir(struct trace_subsystem_dir *dir)
1112 {
1113 WARN_ON_ONCE(dir->ref_count == 0);
1114 /* If the subsystem is about to be freed, the dir must be too */
1115 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
1116
1117 __put_system(dir->subsystem);
1118 if (!--dir->ref_count)
1119 kfree(dir);
1120 }
1121
put_system(struct trace_subsystem_dir * dir)1122 static void put_system(struct trace_subsystem_dir *dir)
1123 {
1124 mutex_lock(&event_mutex);
1125 __put_system_dir(dir);
1126 mutex_unlock(&event_mutex);
1127 }
1128
remove_subsystem(struct trace_subsystem_dir * dir)1129 static void remove_subsystem(struct trace_subsystem_dir *dir)
1130 {
1131 if (!dir)
1132 return;
1133
1134 if (!--dir->nr_events) {
1135 eventfs_remove_dir(dir->ei);
1136 list_del(&dir->list);
1137 __put_system_dir(dir);
1138 }
1139 }
1140
event_file_get(struct trace_event_file * file)1141 void event_file_get(struct trace_event_file *file)
1142 {
1143 atomic_inc(&file->ref);
1144 }
1145
event_file_put(struct trace_event_file * file)1146 void event_file_put(struct trace_event_file *file)
1147 {
1148 if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
1149 if (file->flags & EVENT_FILE_FL_FREED)
1150 kmem_cache_free(file_cachep, file);
1151 return;
1152 }
1153
1154 if (atomic_dec_and_test(&file->ref)) {
1155 /* Count should only go to zero when it is freed */
1156 if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
1157 return;
1158 kmem_cache_free(file_cachep, file);
1159 }
1160 }
1161
remove_event_file_dir(struct trace_event_file * file)1162 static void remove_event_file_dir(struct trace_event_file *file)
1163 {
1164 eventfs_remove_dir(file->ei);
1165 list_del(&file->list);
1166 remove_subsystem(file->system);
1167 free_event_filter(file->filter);
1168 file->flags |= EVENT_FILE_FL_FREED;
1169 event_file_put(file);
1170 }
1171
1172 /*
1173 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
1174 */
1175 static int
__ftrace_set_clr_event_nolock(struct trace_array * tr,const char * match,const char * sub,const char * event,int set)1176 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
1177 const char *sub, const char *event, int set)
1178 {
1179 struct trace_event_file *file;
1180 struct trace_event_call *call;
1181 const char *name;
1182 int ret = -EINVAL;
1183 int eret = 0;
1184
1185 list_for_each_entry(file, &tr->events, list) {
1186
1187 call = file->event_call;
1188 name = trace_event_name(call);
1189
1190 if (!name || !call->class || !call->class->reg)
1191 continue;
1192
1193 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1194 continue;
1195
1196 if (match &&
1197 strcmp(match, name) != 0 &&
1198 strcmp(match, call->class->system) != 0)
1199 continue;
1200
1201 if (sub && strcmp(sub, call->class->system) != 0)
1202 continue;
1203
1204 if (event && strcmp(event, name) != 0)
1205 continue;
1206
1207 ret = ftrace_event_enable_disable(file, set);
1208
1209 /*
1210 * Save the first error and return that. Some events
1211 * may still have been enabled, but let the user
1212 * know that something went wrong.
1213 */
1214 if (ret && !eret)
1215 eret = ret;
1216
1217 ret = eret;
1218 }
1219
1220 return ret;
1221 }
1222
__ftrace_set_clr_event(struct trace_array * tr,const char * match,const char * sub,const char * event,int set)1223 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
1224 const char *sub, const char *event, int set)
1225 {
1226 int ret;
1227
1228 mutex_lock(&event_mutex);
1229 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
1230 mutex_unlock(&event_mutex);
1231
1232 return ret;
1233 }
1234
ftrace_set_clr_event(struct trace_array * tr,char * buf,int set)1235 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
1236 {
1237 char *event = NULL, *sub = NULL, *match;
1238 int ret;
1239
1240 if (!tr)
1241 return -ENOENT;
1242 /*
1243 * The buf format can be <subsystem>:<event-name>
1244 * *:<event-name> means any event by that name.
1245 * :<event-name> is the same.
1246 *
1247 * <subsystem>:* means all events in that subsystem
1248 * <subsystem>: means the same.
1249 *
1250 * <name> (no ':') means all events in a subsystem with
1251 * the name <name> or any event that matches <name>
1252 */
1253
1254 match = strsep(&buf, ":");
1255 if (buf) {
1256 sub = match;
1257 event = buf;
1258 match = NULL;
1259
1260 if (!strlen(sub) || strcmp(sub, "*") == 0)
1261 sub = NULL;
1262 if (!strlen(event) || strcmp(event, "*") == 0)
1263 event = NULL;
1264 }
1265
1266 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
1267
1268 /* Put back the colon to allow this to be called again */
1269 if (buf)
1270 *(buf - 1) = ':';
1271
1272 return ret;
1273 }
1274
1275 /**
1276 * trace_set_clr_event - enable or disable an event
1277 * @system: system name to match (NULL for any system)
1278 * @event: event name to match (NULL for all events, within system)
1279 * @set: 1 to enable, 0 to disable
1280 *
1281 * This is a way for other parts of the kernel to enable or disable
1282 * event recording.
1283 *
1284 * Returns 0 on success, -EINVAL if the parameters do not match any
1285 * registered events.
1286 */
trace_set_clr_event(const char * system,const char * event,int set)1287 int trace_set_clr_event(const char *system, const char *event, int set)
1288 {
1289 struct trace_array *tr = top_trace_array();
1290
1291 if (!tr)
1292 return -ENODEV;
1293
1294 return __ftrace_set_clr_event(tr, NULL, system, event, set);
1295 }
1296 EXPORT_SYMBOL_GPL(trace_set_clr_event);
1297
1298 /**
1299 * trace_array_set_clr_event - enable or disable an event for a trace array.
1300 * @tr: concerned trace array.
1301 * @system: system name to match (NULL for any system)
1302 * @event: event name to match (NULL for all events, within system)
1303 * @enable: true to enable, false to disable
1304 *
1305 * This is a way for other parts of the kernel to enable or disable
1306 * event recording.
1307 *
1308 * Returns 0 on success, -EINVAL if the parameters do not match any
1309 * registered events.
1310 */
trace_array_set_clr_event(struct trace_array * tr,const char * system,const char * event,bool enable)1311 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
1312 const char *event, bool enable)
1313 {
1314 int set;
1315
1316 if (!tr)
1317 return -ENOENT;
1318
1319 set = (enable == true) ? 1 : 0;
1320 return __ftrace_set_clr_event(tr, NULL, system, event, set);
1321 }
1322 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
1323
1324 /* 128 should be much more than enough */
1325 #define EVENT_BUF_SIZE 127
1326
1327 static ssize_t
ftrace_event_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1328 ftrace_event_write(struct file *file, const char __user *ubuf,
1329 size_t cnt, loff_t *ppos)
1330 {
1331 struct trace_parser parser;
1332 struct seq_file *m = file->private_data;
1333 struct trace_array *tr = m->private;
1334 ssize_t read, ret;
1335
1336 if (!cnt)
1337 return 0;
1338
1339 ret = tracing_update_buffers();
1340 if (ret < 0)
1341 return ret;
1342
1343 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1344 return -ENOMEM;
1345
1346 read = trace_get_user(&parser, ubuf, cnt, ppos);
1347
1348 if (read >= 0 && trace_parser_loaded((&parser))) {
1349 int set = 1;
1350
1351 if (*parser.buffer == '!')
1352 set = 0;
1353
1354 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
1355 if (ret)
1356 goto out_put;
1357 }
1358
1359 ret = read;
1360
1361 out_put:
1362 trace_parser_put(&parser);
1363
1364 return ret;
1365 }
1366
1367 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)1368 t_next(struct seq_file *m, void *v, loff_t *pos)
1369 {
1370 struct trace_event_file *file = v;
1371 struct trace_event_call *call;
1372 struct trace_array *tr = m->private;
1373
1374 (*pos)++;
1375
1376 list_for_each_entry_continue(file, &tr->events, list) {
1377 call = file->event_call;
1378 /*
1379 * The ftrace subsystem is for showing formats only.
1380 * They can not be enabled or disabled via the event files.
1381 */
1382 if (call->class && call->class->reg &&
1383 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1384 return file;
1385 }
1386
1387 return NULL;
1388 }
1389
t_start(struct seq_file * m,loff_t * pos)1390 static void *t_start(struct seq_file *m, loff_t *pos)
1391 {
1392 struct trace_event_file *file;
1393 struct trace_array *tr = m->private;
1394 loff_t l;
1395
1396 mutex_lock(&event_mutex);
1397
1398 file = list_entry(&tr->events, struct trace_event_file, list);
1399 for (l = 0; l <= *pos; ) {
1400 file = t_next(m, file, &l);
1401 if (!file)
1402 break;
1403 }
1404 return file;
1405 }
1406
1407 static void *
s_next(struct seq_file * m,void * v,loff_t * pos)1408 s_next(struct seq_file *m, void *v, loff_t *pos)
1409 {
1410 struct trace_event_file *file = v;
1411 struct trace_array *tr = m->private;
1412
1413 (*pos)++;
1414
1415 list_for_each_entry_continue(file, &tr->events, list) {
1416 if (file->flags & EVENT_FILE_FL_ENABLED)
1417 return file;
1418 }
1419
1420 return NULL;
1421 }
1422
s_start(struct seq_file * m,loff_t * pos)1423 static void *s_start(struct seq_file *m, loff_t *pos)
1424 {
1425 struct trace_event_file *file;
1426 struct trace_array *tr = m->private;
1427 loff_t l;
1428
1429 mutex_lock(&event_mutex);
1430
1431 file = list_entry(&tr->events, struct trace_event_file, list);
1432 for (l = 0; l <= *pos; ) {
1433 file = s_next(m, file, &l);
1434 if (!file)
1435 break;
1436 }
1437 return file;
1438 }
1439
t_show(struct seq_file * m,void * v)1440 static int t_show(struct seq_file *m, void *v)
1441 {
1442 struct trace_event_file *file = v;
1443 struct trace_event_call *call = file->event_call;
1444
1445 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1446 seq_printf(m, "%s:", call->class->system);
1447 seq_printf(m, "%s\n", trace_event_name(call));
1448
1449 return 0;
1450 }
1451
t_stop(struct seq_file * m,void * p)1452 static void t_stop(struct seq_file *m, void *p)
1453 {
1454 mutex_unlock(&event_mutex);
1455 }
1456
1457 static void *
__next(struct seq_file * m,void * v,loff_t * pos,int type)1458 __next(struct seq_file *m, void *v, loff_t *pos, int type)
1459 {
1460 struct trace_array *tr = m->private;
1461 struct trace_pid_list *pid_list;
1462
1463 if (type == TRACE_PIDS)
1464 pid_list = rcu_dereference_sched(tr->filtered_pids);
1465 else
1466 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1467
1468 return trace_pid_next(pid_list, v, pos);
1469 }
1470
1471 static void *
p_next(struct seq_file * m,void * v,loff_t * pos)1472 p_next(struct seq_file *m, void *v, loff_t *pos)
1473 {
1474 return __next(m, v, pos, TRACE_PIDS);
1475 }
1476
1477 static void *
np_next(struct seq_file * m,void * v,loff_t * pos)1478 np_next(struct seq_file *m, void *v, loff_t *pos)
1479 {
1480 return __next(m, v, pos, TRACE_NO_PIDS);
1481 }
1482
__start(struct seq_file * m,loff_t * pos,int type)1483 static void *__start(struct seq_file *m, loff_t *pos, int type)
1484 __acquires(RCU)
1485 {
1486 struct trace_pid_list *pid_list;
1487 struct trace_array *tr = m->private;
1488
1489 /*
1490 * Grab the mutex, to keep calls to p_next() having the same
1491 * tr->filtered_pids as p_start() has.
1492 * If we just passed the tr->filtered_pids around, then RCU would
1493 * have been enough, but doing that makes things more complex.
1494 */
1495 mutex_lock(&event_mutex);
1496 rcu_read_lock_sched();
1497
1498 if (type == TRACE_PIDS)
1499 pid_list = rcu_dereference_sched(tr->filtered_pids);
1500 else
1501 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1502
1503 if (!pid_list)
1504 return NULL;
1505
1506 return trace_pid_start(pid_list, pos);
1507 }
1508
p_start(struct seq_file * m,loff_t * pos)1509 static void *p_start(struct seq_file *m, loff_t *pos)
1510 __acquires(RCU)
1511 {
1512 return __start(m, pos, TRACE_PIDS);
1513 }
1514
np_start(struct seq_file * m,loff_t * pos)1515 static void *np_start(struct seq_file *m, loff_t *pos)
1516 __acquires(RCU)
1517 {
1518 return __start(m, pos, TRACE_NO_PIDS);
1519 }
1520
p_stop(struct seq_file * m,void * p)1521 static void p_stop(struct seq_file *m, void *p)
1522 __releases(RCU)
1523 {
1524 rcu_read_unlock_sched();
1525 mutex_unlock(&event_mutex);
1526 }
1527
1528 static ssize_t
event_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1529 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1530 loff_t *ppos)
1531 {
1532 struct trace_event_file *file;
1533 unsigned long flags;
1534 char buf[4] = "0";
1535
1536 mutex_lock(&event_mutex);
1537 file = event_file_file(filp);
1538 if (likely(file))
1539 flags = file->flags;
1540 mutex_unlock(&event_mutex);
1541
1542 if (!file)
1543 return -ENODEV;
1544
1545 if (flags & EVENT_FILE_FL_ENABLED &&
1546 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1547 strcpy(buf, "1");
1548
1549 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1550 flags & EVENT_FILE_FL_SOFT_MODE)
1551 strcat(buf, "*");
1552
1553 strcat(buf, "\n");
1554
1555 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1556 }
1557
1558 static ssize_t
event_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1559 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1560 loff_t *ppos)
1561 {
1562 struct trace_event_file *file;
1563 unsigned long val;
1564 int ret;
1565
1566 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1567 if (ret)
1568 return ret;
1569
1570 ret = tracing_update_buffers();
1571 if (ret < 0)
1572 return ret;
1573
1574 switch (val) {
1575 case 0:
1576 case 1:
1577 ret = -ENODEV;
1578 mutex_lock(&event_mutex);
1579 file = event_file_file(filp);
1580 if (likely(file))
1581 ret = ftrace_event_enable_disable(file, val);
1582 mutex_unlock(&event_mutex);
1583 break;
1584
1585 default:
1586 return -EINVAL;
1587 }
1588
1589 *ppos += cnt;
1590
1591 return ret ? ret : cnt;
1592 }
1593
1594 static ssize_t
system_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1595 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1596 loff_t *ppos)
1597 {
1598 const char set_to_char[4] = { '?', '0', '1', 'X' };
1599 struct trace_subsystem_dir *dir = filp->private_data;
1600 struct event_subsystem *system = dir->subsystem;
1601 struct trace_event_call *call;
1602 struct trace_event_file *file;
1603 struct trace_array *tr = dir->tr;
1604 char buf[2];
1605 int set = 0;
1606 int ret;
1607
1608 mutex_lock(&event_mutex);
1609 list_for_each_entry(file, &tr->events, list) {
1610 call = file->event_call;
1611 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1612 !trace_event_name(call) || !call->class || !call->class->reg)
1613 continue;
1614
1615 if (system && strcmp(call->class->system, system->name) != 0)
1616 continue;
1617
1618 /*
1619 * We need to find out if all the events are set
1620 * or if all events or cleared, or if we have
1621 * a mixture.
1622 */
1623 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1624
1625 /*
1626 * If we have a mixture, no need to look further.
1627 */
1628 if (set == 3)
1629 break;
1630 }
1631 mutex_unlock(&event_mutex);
1632
1633 buf[0] = set_to_char[set];
1634 buf[1] = '\n';
1635
1636 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1637
1638 return ret;
1639 }
1640
1641 static ssize_t
system_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1642 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1643 loff_t *ppos)
1644 {
1645 struct trace_subsystem_dir *dir = filp->private_data;
1646 struct event_subsystem *system = dir->subsystem;
1647 const char *name = NULL;
1648 unsigned long val;
1649 ssize_t ret;
1650
1651 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1652 if (ret)
1653 return ret;
1654
1655 ret = tracing_update_buffers();
1656 if (ret < 0)
1657 return ret;
1658
1659 if (val != 0 && val != 1)
1660 return -EINVAL;
1661
1662 /*
1663 * Opening of "enable" adds a ref count to system,
1664 * so the name is safe to use.
1665 */
1666 if (system)
1667 name = system->name;
1668
1669 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1670 if (ret)
1671 goto out;
1672
1673 ret = cnt;
1674
1675 out:
1676 *ppos += cnt;
1677
1678 return ret;
1679 }
1680
1681 enum {
1682 FORMAT_HEADER = 1,
1683 FORMAT_FIELD_SEPERATOR = 2,
1684 FORMAT_PRINTFMT = 3,
1685 };
1686
f_next(struct seq_file * m,void * v,loff_t * pos)1687 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1688 {
1689 struct trace_event_file *file = event_file_data(m->private);
1690 struct trace_event_call *call = file->event_call;
1691 struct list_head *common_head = &ftrace_common_fields;
1692 struct list_head *head = trace_get_fields(call);
1693 struct list_head *node = v;
1694
1695 (*pos)++;
1696
1697 switch ((unsigned long)v) {
1698 case FORMAT_HEADER:
1699 node = common_head;
1700 break;
1701
1702 case FORMAT_FIELD_SEPERATOR:
1703 node = head;
1704 break;
1705
1706 case FORMAT_PRINTFMT:
1707 /* all done */
1708 return NULL;
1709 }
1710
1711 node = node->prev;
1712 if (node == common_head)
1713 return (void *)FORMAT_FIELD_SEPERATOR;
1714 else if (node == head)
1715 return (void *)FORMAT_PRINTFMT;
1716 else
1717 return node;
1718 }
1719
f_show(struct seq_file * m,void * v)1720 static int f_show(struct seq_file *m, void *v)
1721 {
1722 struct trace_event_file *file = event_file_data(m->private);
1723 struct trace_event_call *call = file->event_call;
1724 struct ftrace_event_field *field;
1725 const char *array_descriptor;
1726
1727 switch ((unsigned long)v) {
1728 case FORMAT_HEADER:
1729 seq_printf(m, "name: %s\n", trace_event_name(call));
1730 seq_printf(m, "ID: %d\n", call->event.type);
1731 seq_puts(m, "format:\n");
1732 return 0;
1733
1734 case FORMAT_FIELD_SEPERATOR:
1735 seq_putc(m, '\n');
1736 return 0;
1737
1738 case FORMAT_PRINTFMT:
1739 seq_printf(m, "\nprint fmt: %s\n",
1740 call->print_fmt);
1741 return 0;
1742 }
1743
1744 field = list_entry(v, struct ftrace_event_field, link);
1745 /*
1746 * Smartly shows the array type(except dynamic array).
1747 * Normal:
1748 * field:TYPE VAR
1749 * If TYPE := TYPE[LEN], it is shown:
1750 * field:TYPE VAR[LEN]
1751 */
1752 array_descriptor = strchr(field->type, '[');
1753
1754 if (str_has_prefix(field->type, "__data_loc"))
1755 array_descriptor = NULL;
1756
1757 if (!array_descriptor)
1758 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1759 field->type, field->name, field->offset,
1760 field->size, !!field->is_signed);
1761 else if (field->len)
1762 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1763 (int)(array_descriptor - field->type),
1764 field->type, field->name,
1765 field->len, field->offset,
1766 field->size, !!field->is_signed);
1767 else
1768 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1769 (int)(array_descriptor - field->type),
1770 field->type, field->name,
1771 field->offset, field->size, !!field->is_signed);
1772
1773 return 0;
1774 }
1775
f_start(struct seq_file * m,loff_t * pos)1776 static void *f_start(struct seq_file *m, loff_t *pos)
1777 {
1778 struct trace_event_file *file;
1779 void *p = (void *)FORMAT_HEADER;
1780 loff_t l = 0;
1781
1782 /* ->stop() is called even if ->start() fails */
1783 mutex_lock(&event_mutex);
1784 file = event_file_file(m->private);
1785 if (!file)
1786 return ERR_PTR(-ENODEV);
1787
1788 while (l < *pos && p)
1789 p = f_next(m, p, &l);
1790
1791 return p;
1792 }
1793
f_stop(struct seq_file * m,void * p)1794 static void f_stop(struct seq_file *m, void *p)
1795 {
1796 mutex_unlock(&event_mutex);
1797 }
1798
1799 static const struct seq_operations trace_format_seq_ops = {
1800 .start = f_start,
1801 .next = f_next,
1802 .stop = f_stop,
1803 .show = f_show,
1804 };
1805
trace_format_open(struct inode * inode,struct file * file)1806 static int trace_format_open(struct inode *inode, struct file *file)
1807 {
1808 struct seq_file *m;
1809 int ret;
1810
1811 /* Do we want to hide event format files on tracefs lockdown? */
1812
1813 ret = seq_open(file, &trace_format_seq_ops);
1814 if (ret < 0)
1815 return ret;
1816
1817 m = file->private_data;
1818 m->private = file;
1819
1820 return 0;
1821 }
1822
1823 #ifdef CONFIG_PERF_EVENTS
1824 static ssize_t
event_id_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1825 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1826 {
1827 int id = (long)event_file_data(filp);
1828 char buf[32];
1829 int len;
1830
1831 if (unlikely(!id))
1832 return -ENODEV;
1833
1834 len = sprintf(buf, "%d\n", id);
1835
1836 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1837 }
1838 #endif
1839
1840 static ssize_t
event_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1841 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1842 loff_t *ppos)
1843 {
1844 struct trace_event_file *file;
1845 struct trace_seq *s;
1846 int r = -ENODEV;
1847
1848 if (*ppos)
1849 return 0;
1850
1851 s = kmalloc(sizeof(*s), GFP_KERNEL);
1852
1853 if (!s)
1854 return -ENOMEM;
1855
1856 trace_seq_init(s);
1857
1858 mutex_lock(&event_mutex);
1859 file = event_file_file(filp);
1860 if (file)
1861 print_event_filter(file, s);
1862 mutex_unlock(&event_mutex);
1863
1864 if (file)
1865 r = simple_read_from_buffer(ubuf, cnt, ppos,
1866 s->buffer, trace_seq_used(s));
1867
1868 kfree(s);
1869
1870 return r;
1871 }
1872
1873 static ssize_t
event_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1874 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1875 loff_t *ppos)
1876 {
1877 struct trace_event_file *file;
1878 char *buf;
1879 int err = -ENODEV;
1880
1881 if (cnt >= PAGE_SIZE)
1882 return -EINVAL;
1883
1884 buf = memdup_user_nul(ubuf, cnt);
1885 if (IS_ERR(buf))
1886 return PTR_ERR(buf);
1887
1888 mutex_lock(&event_mutex);
1889 file = event_file_file(filp);
1890 if (file) {
1891 if (file->flags & EVENT_FILE_FL_FREED)
1892 err = -ENODEV;
1893 else
1894 err = apply_event_filter(file, buf);
1895 }
1896 mutex_unlock(&event_mutex);
1897
1898 kfree(buf);
1899 if (err < 0)
1900 return err;
1901
1902 *ppos += cnt;
1903
1904 return cnt;
1905 }
1906
1907 static LIST_HEAD(event_subsystems);
1908
subsystem_open(struct inode * inode,struct file * filp)1909 static int subsystem_open(struct inode *inode, struct file *filp)
1910 {
1911 struct trace_subsystem_dir *dir = NULL, *iter_dir;
1912 struct trace_array *tr = NULL, *iter_tr;
1913 struct event_subsystem *system = NULL;
1914 int ret;
1915
1916 if (tracing_is_disabled())
1917 return -ENODEV;
1918
1919 /* Make sure the system still exists */
1920 mutex_lock(&event_mutex);
1921 mutex_lock(&trace_types_lock);
1922 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
1923 list_for_each_entry(iter_dir, &iter_tr->systems, list) {
1924 if (iter_dir == inode->i_private) {
1925 /* Don't open systems with no events */
1926 tr = iter_tr;
1927 dir = iter_dir;
1928 if (dir->nr_events) {
1929 __get_system_dir(dir);
1930 system = dir->subsystem;
1931 }
1932 goto exit_loop;
1933 }
1934 }
1935 }
1936 exit_loop:
1937 mutex_unlock(&trace_types_lock);
1938 mutex_unlock(&event_mutex);
1939
1940 if (!system)
1941 return -ENODEV;
1942
1943 /* Still need to increment the ref count of the system */
1944 if (trace_array_get(tr) < 0) {
1945 put_system(dir);
1946 return -ENODEV;
1947 }
1948
1949 ret = tracing_open_generic(inode, filp);
1950 if (ret < 0) {
1951 trace_array_put(tr);
1952 put_system(dir);
1953 }
1954
1955 return ret;
1956 }
1957
system_tr_open(struct inode * inode,struct file * filp)1958 static int system_tr_open(struct inode *inode, struct file *filp)
1959 {
1960 struct trace_subsystem_dir *dir;
1961 struct trace_array *tr = inode->i_private;
1962 int ret;
1963
1964 /* Make a temporary dir that has no system but points to tr */
1965 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1966 if (!dir)
1967 return -ENOMEM;
1968
1969 ret = tracing_open_generic_tr(inode, filp);
1970 if (ret < 0) {
1971 kfree(dir);
1972 return ret;
1973 }
1974 dir->tr = tr;
1975 filp->private_data = dir;
1976
1977 return 0;
1978 }
1979
subsystem_release(struct inode * inode,struct file * file)1980 static int subsystem_release(struct inode *inode, struct file *file)
1981 {
1982 struct trace_subsystem_dir *dir = file->private_data;
1983
1984 trace_array_put(dir->tr);
1985
1986 /*
1987 * If dir->subsystem is NULL, then this is a temporary
1988 * descriptor that was made for a trace_array to enable
1989 * all subsystems.
1990 */
1991 if (dir->subsystem)
1992 put_system(dir);
1993 else
1994 kfree(dir);
1995
1996 return 0;
1997 }
1998
1999 static ssize_t
subsystem_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2000 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2001 loff_t *ppos)
2002 {
2003 struct trace_subsystem_dir *dir = filp->private_data;
2004 struct event_subsystem *system = dir->subsystem;
2005 struct trace_seq *s;
2006 int r;
2007
2008 if (*ppos)
2009 return 0;
2010
2011 s = kmalloc(sizeof(*s), GFP_KERNEL);
2012 if (!s)
2013 return -ENOMEM;
2014
2015 trace_seq_init(s);
2016
2017 print_subsystem_event_filter(system, s);
2018 r = simple_read_from_buffer(ubuf, cnt, ppos,
2019 s->buffer, trace_seq_used(s));
2020
2021 kfree(s);
2022
2023 return r;
2024 }
2025
2026 static ssize_t
subsystem_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2027 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2028 loff_t *ppos)
2029 {
2030 struct trace_subsystem_dir *dir = filp->private_data;
2031 char *buf;
2032 int err;
2033
2034 if (cnt >= PAGE_SIZE)
2035 return -EINVAL;
2036
2037 buf = memdup_user_nul(ubuf, cnt);
2038 if (IS_ERR(buf))
2039 return PTR_ERR(buf);
2040
2041 err = apply_subsystem_event_filter(dir, buf);
2042 kfree(buf);
2043 if (err < 0)
2044 return err;
2045
2046 *ppos += cnt;
2047
2048 return cnt;
2049 }
2050
2051 static ssize_t
show_header(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2052 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2053 {
2054 int (*func)(struct trace_seq *s) = filp->private_data;
2055 struct trace_seq *s;
2056 int r;
2057
2058 if (*ppos)
2059 return 0;
2060
2061 s = kmalloc(sizeof(*s), GFP_KERNEL);
2062 if (!s)
2063 return -ENOMEM;
2064
2065 trace_seq_init(s);
2066
2067 func(s);
2068 r = simple_read_from_buffer(ubuf, cnt, ppos,
2069 s->buffer, trace_seq_used(s));
2070
2071 kfree(s);
2072
2073 return r;
2074 }
2075
ignore_task_cpu(void * data)2076 static void ignore_task_cpu(void *data)
2077 {
2078 struct trace_array *tr = data;
2079 struct trace_pid_list *pid_list;
2080 struct trace_pid_list *no_pid_list;
2081
2082 /*
2083 * This function is called by on_each_cpu() while the
2084 * event_mutex is held.
2085 */
2086 pid_list = rcu_dereference_protected(tr->filtered_pids,
2087 mutex_is_locked(&event_mutex));
2088 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2089 mutex_is_locked(&event_mutex));
2090
2091 this_cpu_write(tr->array_buffer.data->ignore_pid,
2092 trace_ignore_this_task(pid_list, no_pid_list, current));
2093 }
2094
register_pid_events(struct trace_array * tr)2095 static void register_pid_events(struct trace_array *tr)
2096 {
2097 /*
2098 * Register a probe that is called before all other probes
2099 * to set ignore_pid if next or prev do not match.
2100 * Register a probe this is called after all other probes
2101 * to only keep ignore_pid set if next pid matches.
2102 */
2103 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
2104 tr, INT_MAX);
2105 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
2106 tr, 0);
2107
2108 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
2109 tr, INT_MAX);
2110 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
2111 tr, 0);
2112
2113 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
2114 tr, INT_MAX);
2115 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
2116 tr, 0);
2117
2118 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
2119 tr, INT_MAX);
2120 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
2121 tr, 0);
2122 }
2123
2124 static ssize_t
event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)2125 event_pid_write(struct file *filp, const char __user *ubuf,
2126 size_t cnt, loff_t *ppos, int type)
2127 {
2128 struct seq_file *m = filp->private_data;
2129 struct trace_array *tr = m->private;
2130 struct trace_pid_list *filtered_pids = NULL;
2131 struct trace_pid_list *other_pids = NULL;
2132 struct trace_pid_list *pid_list;
2133 struct trace_event_file *file;
2134 ssize_t ret;
2135
2136 if (!cnt)
2137 return 0;
2138
2139 ret = tracing_update_buffers();
2140 if (ret < 0)
2141 return ret;
2142
2143 mutex_lock(&event_mutex);
2144
2145 if (type == TRACE_PIDS) {
2146 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
2147 lockdep_is_held(&event_mutex));
2148 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
2149 lockdep_is_held(&event_mutex));
2150 } else {
2151 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
2152 lockdep_is_held(&event_mutex));
2153 other_pids = rcu_dereference_protected(tr->filtered_pids,
2154 lockdep_is_held(&event_mutex));
2155 }
2156
2157 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
2158 if (ret < 0)
2159 goto out;
2160
2161 if (type == TRACE_PIDS)
2162 rcu_assign_pointer(tr->filtered_pids, pid_list);
2163 else
2164 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
2165
2166 list_for_each_entry(file, &tr->events, list) {
2167 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
2168 }
2169
2170 if (filtered_pids) {
2171 tracepoint_synchronize_unregister();
2172 trace_pid_list_free(filtered_pids);
2173 } else if (pid_list && !other_pids) {
2174 register_pid_events(tr);
2175 }
2176
2177 /*
2178 * Ignoring of pids is done at task switch. But we have to
2179 * check for those tasks that are currently running.
2180 * Always do this in case a pid was appended or removed.
2181 */
2182 on_each_cpu(ignore_task_cpu, tr, 1);
2183
2184 out:
2185 mutex_unlock(&event_mutex);
2186
2187 if (ret > 0)
2188 *ppos += ret;
2189
2190 return ret;
2191 }
2192
2193 static ssize_t
ftrace_event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2194 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
2195 size_t cnt, loff_t *ppos)
2196 {
2197 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
2198 }
2199
2200 static ssize_t
ftrace_event_npid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2201 ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
2202 size_t cnt, loff_t *ppos)
2203 {
2204 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
2205 }
2206
2207 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
2208 static int ftrace_event_set_open(struct inode *inode, struct file *file);
2209 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
2210 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
2211 static int ftrace_event_release(struct inode *inode, struct file *file);
2212
2213 static const struct seq_operations show_event_seq_ops = {
2214 .start = t_start,
2215 .next = t_next,
2216 .show = t_show,
2217 .stop = t_stop,
2218 };
2219
2220 static const struct seq_operations show_set_event_seq_ops = {
2221 .start = s_start,
2222 .next = s_next,
2223 .show = t_show,
2224 .stop = t_stop,
2225 };
2226
2227 static const struct seq_operations show_set_pid_seq_ops = {
2228 .start = p_start,
2229 .next = p_next,
2230 .show = trace_pid_show,
2231 .stop = p_stop,
2232 };
2233
2234 static const struct seq_operations show_set_no_pid_seq_ops = {
2235 .start = np_start,
2236 .next = np_next,
2237 .show = trace_pid_show,
2238 .stop = p_stop,
2239 };
2240
2241 static const struct file_operations ftrace_avail_fops = {
2242 .open = ftrace_event_avail_open,
2243 .read = seq_read,
2244 .llseek = seq_lseek,
2245 .release = seq_release,
2246 };
2247
2248 static const struct file_operations ftrace_set_event_fops = {
2249 .open = ftrace_event_set_open,
2250 .read = seq_read,
2251 .write = ftrace_event_write,
2252 .llseek = seq_lseek,
2253 .release = ftrace_event_release,
2254 };
2255
2256 static const struct file_operations ftrace_set_event_pid_fops = {
2257 .open = ftrace_event_set_pid_open,
2258 .read = seq_read,
2259 .write = ftrace_event_pid_write,
2260 .llseek = seq_lseek,
2261 .release = ftrace_event_release,
2262 };
2263
2264 static const struct file_operations ftrace_set_event_notrace_pid_fops = {
2265 .open = ftrace_event_set_npid_open,
2266 .read = seq_read,
2267 .write = ftrace_event_npid_write,
2268 .llseek = seq_lseek,
2269 .release = ftrace_event_release,
2270 };
2271
2272 static const struct file_operations ftrace_enable_fops = {
2273 .open = tracing_open_file_tr,
2274 .read = event_enable_read,
2275 .write = event_enable_write,
2276 .release = tracing_release_file_tr,
2277 .llseek = default_llseek,
2278 };
2279
2280 static const struct file_operations ftrace_event_format_fops = {
2281 .open = trace_format_open,
2282 .read = seq_read,
2283 .llseek = seq_lseek,
2284 .release = seq_release,
2285 };
2286
2287 #ifdef CONFIG_PERF_EVENTS
2288 static const struct file_operations ftrace_event_id_fops = {
2289 .read = event_id_read,
2290 .llseek = default_llseek,
2291 };
2292 #endif
2293
2294 static const struct file_operations ftrace_event_filter_fops = {
2295 .open = tracing_open_file_tr,
2296 .read = event_filter_read,
2297 .write = event_filter_write,
2298 .release = tracing_release_file_tr,
2299 .llseek = default_llseek,
2300 };
2301
2302 static const struct file_operations ftrace_subsystem_filter_fops = {
2303 .open = subsystem_open,
2304 .read = subsystem_filter_read,
2305 .write = subsystem_filter_write,
2306 .llseek = default_llseek,
2307 .release = subsystem_release,
2308 };
2309
2310 static const struct file_operations ftrace_system_enable_fops = {
2311 .open = subsystem_open,
2312 .read = system_enable_read,
2313 .write = system_enable_write,
2314 .llseek = default_llseek,
2315 .release = subsystem_release,
2316 };
2317
2318 static const struct file_operations ftrace_tr_enable_fops = {
2319 .open = system_tr_open,
2320 .read = system_enable_read,
2321 .write = system_enable_write,
2322 .llseek = default_llseek,
2323 .release = subsystem_release,
2324 };
2325
2326 static const struct file_operations ftrace_show_header_fops = {
2327 .open = tracing_open_generic,
2328 .read = show_header,
2329 .llseek = default_llseek,
2330 };
2331
2332 static int
ftrace_event_open(struct inode * inode,struct file * file,const struct seq_operations * seq_ops)2333 ftrace_event_open(struct inode *inode, struct file *file,
2334 const struct seq_operations *seq_ops)
2335 {
2336 struct seq_file *m;
2337 int ret;
2338
2339 ret = security_locked_down(LOCKDOWN_TRACEFS);
2340 if (ret)
2341 return ret;
2342
2343 ret = seq_open(file, seq_ops);
2344 if (ret < 0)
2345 return ret;
2346 m = file->private_data;
2347 /* copy tr over to seq ops */
2348 m->private = inode->i_private;
2349
2350 return ret;
2351 }
2352
ftrace_event_release(struct inode * inode,struct file * file)2353 static int ftrace_event_release(struct inode *inode, struct file *file)
2354 {
2355 struct trace_array *tr = inode->i_private;
2356
2357 trace_array_put(tr);
2358
2359 return seq_release(inode, file);
2360 }
2361
2362 static int
ftrace_event_avail_open(struct inode * inode,struct file * file)2363 ftrace_event_avail_open(struct inode *inode, struct file *file)
2364 {
2365 const struct seq_operations *seq_ops = &show_event_seq_ops;
2366
2367 /* Checks for tracefs lockdown */
2368 return ftrace_event_open(inode, file, seq_ops);
2369 }
2370
2371 static int
ftrace_event_set_open(struct inode * inode,struct file * file)2372 ftrace_event_set_open(struct inode *inode, struct file *file)
2373 {
2374 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
2375 struct trace_array *tr = inode->i_private;
2376 int ret;
2377
2378 ret = tracing_check_open_get_tr(tr);
2379 if (ret)
2380 return ret;
2381
2382 if ((file->f_mode & FMODE_WRITE) &&
2383 (file->f_flags & O_TRUNC))
2384 ftrace_clear_events(tr);
2385
2386 ret = ftrace_event_open(inode, file, seq_ops);
2387 if (ret < 0)
2388 trace_array_put(tr);
2389 return ret;
2390 }
2391
2392 static int
ftrace_event_set_pid_open(struct inode * inode,struct file * file)2393 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
2394 {
2395 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
2396 struct trace_array *tr = inode->i_private;
2397 int ret;
2398
2399 ret = tracing_check_open_get_tr(tr);
2400 if (ret)
2401 return ret;
2402
2403 if ((file->f_mode & FMODE_WRITE) &&
2404 (file->f_flags & O_TRUNC))
2405 ftrace_clear_event_pids(tr, TRACE_PIDS);
2406
2407 ret = ftrace_event_open(inode, file, seq_ops);
2408 if (ret < 0)
2409 trace_array_put(tr);
2410 return ret;
2411 }
2412
2413 static int
ftrace_event_set_npid_open(struct inode * inode,struct file * file)2414 ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2415 {
2416 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2417 struct trace_array *tr = inode->i_private;
2418 int ret;
2419
2420 ret = tracing_check_open_get_tr(tr);
2421 if (ret)
2422 return ret;
2423
2424 if ((file->f_mode & FMODE_WRITE) &&
2425 (file->f_flags & O_TRUNC))
2426 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
2427
2428 ret = ftrace_event_open(inode, file, seq_ops);
2429 if (ret < 0)
2430 trace_array_put(tr);
2431 return ret;
2432 }
2433
2434 static struct event_subsystem *
create_new_subsystem(const char * name)2435 create_new_subsystem(const char *name)
2436 {
2437 struct event_subsystem *system;
2438
2439 /* need to create new entry */
2440 system = kmalloc(sizeof(*system), GFP_KERNEL);
2441 if (!system)
2442 return NULL;
2443
2444 system->ref_count = 1;
2445
2446 /* Only allocate if dynamic (kprobes and modules) */
2447 system->name = kstrdup_const(name, GFP_KERNEL);
2448 if (!system->name)
2449 goto out_free;
2450
2451 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
2452 if (!system->filter)
2453 goto out_free;
2454
2455 list_add(&system->list, &event_subsystems);
2456
2457 return system;
2458
2459 out_free:
2460 kfree_const(system->name);
2461 kfree(system);
2462 return NULL;
2463 }
2464
system_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)2465 static int system_callback(const char *name, umode_t *mode, void **data,
2466 const struct file_operations **fops)
2467 {
2468 if (strcmp(name, "filter") == 0)
2469 *fops = &ftrace_subsystem_filter_fops;
2470
2471 else if (strcmp(name, "enable") == 0)
2472 *fops = &ftrace_system_enable_fops;
2473
2474 else
2475 return 0;
2476
2477 *mode = TRACE_MODE_WRITE;
2478 return 1;
2479 }
2480
2481 static struct eventfs_inode *
event_subsystem_dir(struct trace_array * tr,const char * name,struct trace_event_file * file,struct eventfs_inode * parent)2482 event_subsystem_dir(struct trace_array *tr, const char *name,
2483 struct trace_event_file *file, struct eventfs_inode *parent)
2484 {
2485 struct event_subsystem *system, *iter;
2486 struct trace_subsystem_dir *dir;
2487 struct eventfs_inode *ei;
2488 int nr_entries;
2489 static struct eventfs_entry system_entries[] = {
2490 {
2491 .name = "filter",
2492 .callback = system_callback,
2493 },
2494 {
2495 .name = "enable",
2496 .callback = system_callback,
2497 }
2498 };
2499
2500 /* First see if we did not already create this dir */
2501 list_for_each_entry(dir, &tr->systems, list) {
2502 system = dir->subsystem;
2503 if (strcmp(system->name, name) == 0) {
2504 dir->nr_events++;
2505 file->system = dir;
2506 return dir->ei;
2507 }
2508 }
2509
2510 /* Now see if the system itself exists. */
2511 system = NULL;
2512 list_for_each_entry(iter, &event_subsystems, list) {
2513 if (strcmp(iter->name, name) == 0) {
2514 system = iter;
2515 break;
2516 }
2517 }
2518
2519 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2520 if (!dir)
2521 goto out_fail;
2522
2523 if (!system) {
2524 system = create_new_subsystem(name);
2525 if (!system)
2526 goto out_free;
2527 } else
2528 __get_system(system);
2529
2530 /* ftrace only has directories no files */
2531 if (strcmp(name, "ftrace") == 0)
2532 nr_entries = 0;
2533 else
2534 nr_entries = ARRAY_SIZE(system_entries);
2535
2536 ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
2537 if (IS_ERR(ei)) {
2538 pr_warn("Failed to create system directory %s\n", name);
2539 __put_system(system);
2540 goto out_free;
2541 }
2542
2543 dir->ei = ei;
2544 dir->tr = tr;
2545 dir->ref_count = 1;
2546 dir->nr_events = 1;
2547 dir->subsystem = system;
2548 file->system = dir;
2549
2550 list_add(&dir->list, &tr->systems);
2551
2552 return dir->ei;
2553
2554 out_free:
2555 kfree(dir);
2556 out_fail:
2557 /* Only print this message if failed on memory allocation */
2558 if (!dir || !system)
2559 pr_warn("No memory to create event subsystem %s\n", name);
2560 return NULL;
2561 }
2562
2563 static int
event_define_fields(struct trace_event_call * call)2564 event_define_fields(struct trace_event_call *call)
2565 {
2566 struct list_head *head;
2567 int ret = 0;
2568
2569 /*
2570 * Other events may have the same class. Only update
2571 * the fields if they are not already defined.
2572 */
2573 head = trace_get_fields(call);
2574 if (list_empty(head)) {
2575 struct trace_event_fields *field = call->class->fields_array;
2576 unsigned int offset = sizeof(struct trace_entry);
2577
2578 for (; field->type; field++) {
2579 if (field->type == TRACE_FUNCTION_TYPE) {
2580 field->define_fields(call);
2581 break;
2582 }
2583
2584 offset = ALIGN(offset, field->align);
2585 ret = trace_define_field_ext(call, field->type, field->name,
2586 offset, field->size,
2587 field->is_signed, field->filter_type,
2588 field->len, field->needs_test);
2589 if (WARN_ON_ONCE(ret)) {
2590 pr_err("error code is %d\n", ret);
2591 break;
2592 }
2593
2594 offset += field->size;
2595 }
2596 }
2597
2598 return ret;
2599 }
2600
event_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)2601 static int event_callback(const char *name, umode_t *mode, void **data,
2602 const struct file_operations **fops)
2603 {
2604 struct trace_event_file *file = *data;
2605 struct trace_event_call *call = file->event_call;
2606
2607 if (strcmp(name, "format") == 0) {
2608 *mode = TRACE_MODE_READ;
2609 *fops = &ftrace_event_format_fops;
2610 return 1;
2611 }
2612
2613 /*
2614 * Only event directories that can be enabled should have
2615 * triggers or filters, with the exception of the "print"
2616 * event that can have a "trigger" file.
2617 */
2618 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2619 if (call->class->reg && strcmp(name, "enable") == 0) {
2620 *mode = TRACE_MODE_WRITE;
2621 *fops = &ftrace_enable_fops;
2622 return 1;
2623 }
2624
2625 if (strcmp(name, "filter") == 0) {
2626 *mode = TRACE_MODE_WRITE;
2627 *fops = &ftrace_event_filter_fops;
2628 return 1;
2629 }
2630 }
2631
2632 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
2633 strcmp(trace_event_name(call), "print") == 0) {
2634 if (strcmp(name, "trigger") == 0) {
2635 *mode = TRACE_MODE_WRITE;
2636 *fops = &event_trigger_fops;
2637 return 1;
2638 }
2639 }
2640
2641 #ifdef CONFIG_PERF_EVENTS
2642 if (call->event.type && call->class->reg &&
2643 strcmp(name, "id") == 0) {
2644 *mode = TRACE_MODE_READ;
2645 *data = (void *)(long)call->event.type;
2646 *fops = &ftrace_event_id_fops;
2647 return 1;
2648 }
2649 #endif
2650
2651 #ifdef CONFIG_HIST_TRIGGERS
2652 if (strcmp(name, "hist") == 0) {
2653 *mode = TRACE_MODE_READ;
2654 *fops = &event_hist_fops;
2655 return 1;
2656 }
2657 #endif
2658 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
2659 if (strcmp(name, "hist_debug") == 0) {
2660 *mode = TRACE_MODE_READ;
2661 *fops = &event_hist_debug_fops;
2662 return 1;
2663 }
2664 #endif
2665 #ifdef CONFIG_TRACE_EVENT_INJECT
2666 if (call->event.type && call->class->reg &&
2667 strcmp(name, "inject") == 0) {
2668 *mode = 0200;
2669 *fops = &event_inject_fops;
2670 return 1;
2671 }
2672 #endif
2673 return 0;
2674 }
2675
2676 /* The file is incremented on creation and freeing the enable file decrements it */
event_release(const char * name,void * data)2677 static void event_release(const char *name, void *data)
2678 {
2679 struct trace_event_file *file = data;
2680
2681 event_file_put(file);
2682 }
2683
2684 static int
event_create_dir(struct eventfs_inode * parent,struct trace_event_file * file)2685 event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
2686 {
2687 struct trace_event_call *call = file->event_call;
2688 struct trace_array *tr = file->tr;
2689 struct eventfs_inode *e_events;
2690 struct eventfs_inode *ei;
2691 const char *name;
2692 int nr_entries;
2693 int ret;
2694 static struct eventfs_entry event_entries[] = {
2695 {
2696 .name = "enable",
2697 .callback = event_callback,
2698 .release = event_release,
2699 },
2700 {
2701 .name = "filter",
2702 .callback = event_callback,
2703 },
2704 {
2705 .name = "trigger",
2706 .callback = event_callback,
2707 },
2708 {
2709 .name = "format",
2710 .callback = event_callback,
2711 },
2712 #ifdef CONFIG_PERF_EVENTS
2713 {
2714 .name = "id",
2715 .callback = event_callback,
2716 },
2717 #endif
2718 #ifdef CONFIG_HIST_TRIGGERS
2719 {
2720 .name = "hist",
2721 .callback = event_callback,
2722 },
2723 #endif
2724 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
2725 {
2726 .name = "hist_debug",
2727 .callback = event_callback,
2728 },
2729 #endif
2730 #ifdef CONFIG_TRACE_EVENT_INJECT
2731 {
2732 .name = "inject",
2733 .callback = event_callback,
2734 },
2735 #endif
2736 };
2737
2738 /*
2739 * If the trace point header did not define TRACE_SYSTEM
2740 * then the system would be called "TRACE_SYSTEM". This should
2741 * never happen.
2742 */
2743 if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
2744 return -ENODEV;
2745
2746 e_events = event_subsystem_dir(tr, call->class->system, file, parent);
2747 if (!e_events)
2748 return -ENOMEM;
2749
2750 nr_entries = ARRAY_SIZE(event_entries);
2751
2752 name = trace_event_name(call);
2753 ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
2754 if (IS_ERR(ei)) {
2755 pr_warn("Could not create tracefs '%s' directory\n", name);
2756 return -1;
2757 }
2758
2759 file->ei = ei;
2760
2761 ret = event_define_fields(call);
2762 if (ret < 0) {
2763 pr_warn("Could not initialize trace point events/%s\n", name);
2764 return ret;
2765 }
2766
2767 /* Gets decremented on freeing of the "enable" file */
2768 event_file_get(file);
2769
2770 return 0;
2771 }
2772
remove_event_from_tracers(struct trace_event_call * call)2773 static void remove_event_from_tracers(struct trace_event_call *call)
2774 {
2775 struct trace_event_file *file;
2776 struct trace_array *tr;
2777
2778 do_for_each_event_file_safe(tr, file) {
2779 if (file->event_call != call)
2780 continue;
2781
2782 remove_event_file_dir(file);
2783 /*
2784 * The do_for_each_event_file_safe() is
2785 * a double loop. After finding the call for this
2786 * trace_array, we use break to jump to the next
2787 * trace_array.
2788 */
2789 break;
2790 } while_for_each_event_file();
2791 }
2792
event_remove(struct trace_event_call * call)2793 static void event_remove(struct trace_event_call *call)
2794 {
2795 struct trace_array *tr;
2796 struct trace_event_file *file;
2797
2798 do_for_each_event_file(tr, file) {
2799 if (file->event_call != call)
2800 continue;
2801
2802 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2803 tr->clear_trace = true;
2804
2805 ftrace_event_enable_disable(file, 0);
2806 /*
2807 * The do_for_each_event_file() is
2808 * a double loop. After finding the call for this
2809 * trace_array, we use break to jump to the next
2810 * trace_array.
2811 */
2812 break;
2813 } while_for_each_event_file();
2814
2815 if (call->event.funcs)
2816 __unregister_trace_event(&call->event);
2817 remove_event_from_tracers(call);
2818 list_del(&call->list);
2819 }
2820
event_init(struct trace_event_call * call)2821 static int event_init(struct trace_event_call *call)
2822 {
2823 int ret = 0;
2824 const char *name;
2825
2826 name = trace_event_name(call);
2827 if (WARN_ON(!name))
2828 return -EINVAL;
2829
2830 if (call->class->raw_init) {
2831 ret = call->class->raw_init(call);
2832 if (ret < 0 && ret != -ENOSYS)
2833 pr_warn("Could not initialize trace events/%s\n", name);
2834 }
2835
2836 return ret;
2837 }
2838
2839 static int
__register_event(struct trace_event_call * call,struct module * mod)2840 __register_event(struct trace_event_call *call, struct module *mod)
2841 {
2842 int ret;
2843
2844 ret = event_init(call);
2845 if (ret < 0)
2846 return ret;
2847
2848 down_write(&trace_event_sem);
2849 list_add(&call->list, &ftrace_events);
2850 up_write(&trace_event_sem);
2851
2852 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
2853 atomic_set(&call->refcnt, 0);
2854 else
2855 call->module = mod;
2856
2857 return 0;
2858 }
2859
eval_replace(char * ptr,struct trace_eval_map * map,int len)2860 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
2861 {
2862 int rlen;
2863 int elen;
2864
2865 /* Find the length of the eval value as a string */
2866 elen = snprintf(ptr, 0, "%ld", map->eval_value);
2867 /* Make sure there's enough room to replace the string with the value */
2868 if (len < elen)
2869 return NULL;
2870
2871 snprintf(ptr, elen + 1, "%ld", map->eval_value);
2872
2873 /* Get the rest of the string of ptr */
2874 rlen = strlen(ptr + len);
2875 memmove(ptr + elen, ptr + len, rlen);
2876 /* Make sure we end the new string */
2877 ptr[elen + rlen] = 0;
2878
2879 return ptr + elen;
2880 }
2881
update_event_printk(struct trace_event_call * call,struct trace_eval_map * map)2882 static void update_event_printk(struct trace_event_call *call,
2883 struct trace_eval_map *map)
2884 {
2885 char *ptr;
2886 int quote = 0;
2887 int len = strlen(map->eval_string);
2888
2889 for (ptr = call->print_fmt; *ptr; ptr++) {
2890 if (*ptr == '\\') {
2891 ptr++;
2892 /* paranoid */
2893 if (!*ptr)
2894 break;
2895 continue;
2896 }
2897 if (*ptr == '"') {
2898 quote ^= 1;
2899 continue;
2900 }
2901 if (quote)
2902 continue;
2903 if (isdigit(*ptr)) {
2904 /* skip numbers */
2905 do {
2906 ptr++;
2907 /* Check for alpha chars like ULL */
2908 } while (isalnum(*ptr));
2909 if (!*ptr)
2910 break;
2911 /*
2912 * A number must have some kind of delimiter after
2913 * it, and we can ignore that too.
2914 */
2915 continue;
2916 }
2917 if (isalpha(*ptr) || *ptr == '_') {
2918 if (strncmp(map->eval_string, ptr, len) == 0 &&
2919 !isalnum(ptr[len]) && ptr[len] != '_') {
2920 ptr = eval_replace(ptr, map, len);
2921 /* enum/sizeof string smaller than value */
2922 if (WARN_ON_ONCE(!ptr))
2923 return;
2924 /*
2925 * No need to decrement here, as eval_replace()
2926 * returns the pointer to the character passed
2927 * the eval, and two evals can not be placed
2928 * back to back without something in between.
2929 * We can skip that something in between.
2930 */
2931 continue;
2932 }
2933 skip_more:
2934 do {
2935 ptr++;
2936 } while (isalnum(*ptr) || *ptr == '_');
2937 if (!*ptr)
2938 break;
2939 /*
2940 * If what comes after this variable is a '.' or
2941 * '->' then we can continue to ignore that string.
2942 */
2943 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2944 ptr += *ptr == '.' ? 1 : 2;
2945 if (!*ptr)
2946 break;
2947 goto skip_more;
2948 }
2949 /*
2950 * Once again, we can skip the delimiter that came
2951 * after the string.
2952 */
2953 continue;
2954 }
2955 }
2956 }
2957
add_str_to_module(struct module * module,char * str)2958 static void add_str_to_module(struct module *module, char *str)
2959 {
2960 struct module_string *modstr;
2961
2962 modstr = kmalloc(sizeof(*modstr), GFP_KERNEL);
2963
2964 /*
2965 * If we failed to allocate memory here, then we'll just
2966 * let the str memory leak when the module is removed.
2967 * If this fails to allocate, there's worse problems than
2968 * a leaked string on module removal.
2969 */
2970 if (WARN_ON_ONCE(!modstr))
2971 return;
2972
2973 modstr->module = module;
2974 modstr->str = str;
2975
2976 list_add(&modstr->next, &module_strings);
2977 }
2978
update_event_fields(struct trace_event_call * call,struct trace_eval_map * map)2979 static void update_event_fields(struct trace_event_call *call,
2980 struct trace_eval_map *map)
2981 {
2982 struct ftrace_event_field *field;
2983 struct list_head *head;
2984 char *ptr;
2985 char *str;
2986 int len = strlen(map->eval_string);
2987
2988 /* Dynamic events should never have field maps */
2989 if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC))
2990 return;
2991
2992 head = trace_get_fields(call);
2993 list_for_each_entry(field, head, link) {
2994 ptr = strchr(field->type, '[');
2995 if (!ptr)
2996 continue;
2997 ptr++;
2998
2999 if (!isalpha(*ptr) && *ptr != '_')
3000 continue;
3001
3002 if (strncmp(map->eval_string, ptr, len) != 0)
3003 continue;
3004
3005 str = kstrdup(field->type, GFP_KERNEL);
3006 if (WARN_ON_ONCE(!str))
3007 return;
3008 ptr = str + (ptr - field->type);
3009 ptr = eval_replace(ptr, map, len);
3010 /* enum/sizeof string smaller than value */
3011 if (WARN_ON_ONCE(!ptr)) {
3012 kfree(str);
3013 continue;
3014 }
3015
3016 /*
3017 * If the event is part of a module, then we need to free the string
3018 * when the module is removed. Otherwise, it will stay allocated
3019 * until a reboot.
3020 */
3021 if (call->module)
3022 add_str_to_module(call->module, str);
3023
3024 field->type = str;
3025 }
3026 }
3027
trace_event_eval_update(struct trace_eval_map ** map,int len)3028 void trace_event_eval_update(struct trace_eval_map **map, int len)
3029 {
3030 struct trace_event_call *call, *p;
3031 const char *last_system = NULL;
3032 bool first = false;
3033 int last_i;
3034 int i;
3035
3036 down_write(&trace_event_sem);
3037 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3038 /* events are usually grouped together with systems */
3039 if (!last_system || call->class->system != last_system) {
3040 first = true;
3041 last_i = 0;
3042 last_system = call->class->system;
3043 }
3044
3045 /*
3046 * Since calls are grouped by systems, the likelihood that the
3047 * next call in the iteration belongs to the same system as the
3048 * previous call is high. As an optimization, we skip searching
3049 * for a map[] that matches the call's system if the last call
3050 * was from the same system. That's what last_i is for. If the
3051 * call has the same system as the previous call, then last_i
3052 * will be the index of the first map[] that has a matching
3053 * system.
3054 */
3055 for (i = last_i; i < len; i++) {
3056 if (call->class->system == map[i]->system) {
3057 /* Save the first system if need be */
3058 if (first) {
3059 last_i = i;
3060 first = false;
3061 }
3062 update_event_printk(call, map[i]);
3063 update_event_fields(call, map[i]);
3064 }
3065 }
3066 cond_resched();
3067 }
3068 up_write(&trace_event_sem);
3069 }
3070
event_in_systems(struct trace_event_call * call,const char * systems)3071 static bool event_in_systems(struct trace_event_call *call,
3072 const char *systems)
3073 {
3074 const char *system;
3075 const char *p;
3076
3077 if (!systems)
3078 return true;
3079
3080 system = call->class->system;
3081 p = strstr(systems, system);
3082 if (!p)
3083 return false;
3084
3085 if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',')
3086 return false;
3087
3088 p += strlen(system);
3089 return !*p || isspace(*p) || *p == ',';
3090 }
3091
3092 #ifdef CONFIG_HIST_TRIGGERS
3093 /*
3094 * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
3095 * may happen in any context.
3096 */
hist_poll_event_irq_work(struct irq_work * work)3097 static void hist_poll_event_irq_work(struct irq_work *work)
3098 {
3099 wake_up_all(&hist_poll_wq);
3100 }
3101
3102 DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
3103 DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
3104 #endif
3105
3106 static struct trace_event_file *
trace_create_new_event(struct trace_event_call * call,struct trace_array * tr)3107 trace_create_new_event(struct trace_event_call *call,
3108 struct trace_array *tr)
3109 {
3110 struct trace_pid_list *no_pid_list;
3111 struct trace_pid_list *pid_list;
3112 struct trace_event_file *file;
3113 unsigned int first;
3114
3115 if (!event_in_systems(call, tr->system_names))
3116 return NULL;
3117
3118 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3119 if (!file)
3120 return ERR_PTR(-ENOMEM);
3121
3122 pid_list = rcu_dereference_protected(tr->filtered_pids,
3123 lockdep_is_held(&event_mutex));
3124 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
3125 lockdep_is_held(&event_mutex));
3126
3127 if (!trace_pid_list_first(pid_list, &first) ||
3128 !trace_pid_list_first(no_pid_list, &first))
3129 file->flags |= EVENT_FILE_FL_PID_FILTER;
3130
3131 file->event_call = call;
3132 file->tr = tr;
3133 atomic_set(&file->sm_ref, 0);
3134 atomic_set(&file->tm_ref, 0);
3135 INIT_LIST_HEAD(&file->triggers);
3136 list_add(&file->list, &tr->events);
3137 event_file_get(file);
3138
3139 return file;
3140 }
3141
3142 #define MAX_BOOT_TRIGGERS 32
3143
3144 static struct boot_triggers {
3145 const char *event;
3146 char *trigger;
3147 } bootup_triggers[MAX_BOOT_TRIGGERS];
3148
3149 static char bootup_trigger_buf[COMMAND_LINE_SIZE];
3150 static int nr_boot_triggers;
3151
setup_trace_triggers(char * str)3152 static __init int setup_trace_triggers(char *str)
3153 {
3154 char *trigger;
3155 char *buf;
3156 int i;
3157
3158 strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
3159 ring_buffer_expanded = true;
3160 disable_tracing_selftest("running event triggers");
3161
3162 buf = bootup_trigger_buf;
3163 for (i = 0; i < MAX_BOOT_TRIGGERS; i++) {
3164 trigger = strsep(&buf, ",");
3165 if (!trigger)
3166 break;
3167 bootup_triggers[i].event = strsep(&trigger, ".");
3168 bootup_triggers[i].trigger = trigger;
3169 if (!bootup_triggers[i].trigger)
3170 break;
3171 }
3172
3173 nr_boot_triggers = i;
3174 return 1;
3175 }
3176 __setup("trace_trigger=", setup_trace_triggers);
3177
3178 /* Add an event to a trace directory */
3179 static int
__trace_add_new_event(struct trace_event_call * call,struct trace_array * tr)3180 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
3181 {
3182 struct trace_event_file *file;
3183
3184 file = trace_create_new_event(call, tr);
3185 /*
3186 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3187 * allocation, or NULL if the event is not part of the tr->system_names.
3188 * When the event is not part of the tr->system_names, return zero, not
3189 * an error.
3190 */
3191 if (!file)
3192 return 0;
3193
3194 if (IS_ERR(file))
3195 return PTR_ERR(file);
3196
3197 if (eventdir_initialized)
3198 return event_create_dir(tr->event_dir, file);
3199 else
3200 return event_define_fields(call);
3201 }
3202
trace_early_triggers(struct trace_event_file * file,const char * name)3203 static void trace_early_triggers(struct trace_event_file *file, const char *name)
3204 {
3205 int ret;
3206 int i;
3207
3208 for (i = 0; i < nr_boot_triggers; i++) {
3209 if (strcmp(name, bootup_triggers[i].event))
3210 continue;
3211 mutex_lock(&event_mutex);
3212 ret = trigger_process_regex(file, bootup_triggers[i].trigger);
3213 mutex_unlock(&event_mutex);
3214 if (ret)
3215 pr_err("Failed to register trigger '%s' on event %s\n",
3216 bootup_triggers[i].trigger,
3217 bootup_triggers[i].event);
3218 }
3219 }
3220
3221 /*
3222 * Just create a descriptor for early init. A descriptor is required
3223 * for enabling events at boot. We want to enable events before
3224 * the filesystem is initialized.
3225 */
3226 static int
__trace_early_add_new_event(struct trace_event_call * call,struct trace_array * tr)3227 __trace_early_add_new_event(struct trace_event_call *call,
3228 struct trace_array *tr)
3229 {
3230 struct trace_event_file *file;
3231 int ret;
3232
3233 file = trace_create_new_event(call, tr);
3234 /*
3235 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3236 * allocation, or NULL if the event is not part of the tr->system_names.
3237 * When the event is not part of the tr->system_names, return zero, not
3238 * an error.
3239 */
3240 if (!file)
3241 return 0;
3242
3243 if (IS_ERR(file))
3244 return PTR_ERR(file);
3245
3246 ret = event_define_fields(call);
3247 if (ret)
3248 return ret;
3249
3250 trace_early_triggers(file, trace_event_name(call));
3251
3252 return 0;
3253 }
3254
3255 struct ftrace_module_file_ops;
3256 static void __add_event_to_tracers(struct trace_event_call *call);
3257
3258 /* Add an additional event_call dynamically */
trace_add_event_call(struct trace_event_call * call)3259 int trace_add_event_call(struct trace_event_call *call)
3260 {
3261 int ret;
3262 lockdep_assert_held(&event_mutex);
3263
3264 mutex_lock(&trace_types_lock);
3265
3266 ret = __register_event(call, NULL);
3267 if (ret >= 0)
3268 __add_event_to_tracers(call);
3269
3270 mutex_unlock(&trace_types_lock);
3271 return ret;
3272 }
3273 EXPORT_SYMBOL_GPL(trace_add_event_call);
3274
3275 /*
3276 * Must be called under locking of trace_types_lock, event_mutex and
3277 * trace_event_sem.
3278 */
__trace_remove_event_call(struct trace_event_call * call)3279 static void __trace_remove_event_call(struct trace_event_call *call)
3280 {
3281 event_remove(call);
3282 trace_destroy_fields(call);
3283 free_event_filter(call->filter);
3284 call->filter = NULL;
3285 }
3286
probe_remove_event_call(struct trace_event_call * call)3287 static int probe_remove_event_call(struct trace_event_call *call)
3288 {
3289 struct trace_array *tr;
3290 struct trace_event_file *file;
3291
3292 #ifdef CONFIG_PERF_EVENTS
3293 if (call->perf_refcount)
3294 return -EBUSY;
3295 #endif
3296 do_for_each_event_file(tr, file) {
3297 if (file->event_call != call)
3298 continue;
3299 /*
3300 * We can't rely on ftrace_event_enable_disable(enable => 0)
3301 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
3302 * TRACE_REG_UNREGISTER.
3303 */
3304 if (file->flags & EVENT_FILE_FL_ENABLED)
3305 goto busy;
3306
3307 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
3308 tr->clear_trace = true;
3309 /*
3310 * The do_for_each_event_file_safe() is
3311 * a double loop. After finding the call for this
3312 * trace_array, we use break to jump to the next
3313 * trace_array.
3314 */
3315 break;
3316 } while_for_each_event_file();
3317
3318 __trace_remove_event_call(call);
3319
3320 return 0;
3321 busy:
3322 /* No need to clear the trace now */
3323 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
3324 tr->clear_trace = false;
3325 }
3326 return -EBUSY;
3327 }
3328
3329 /* Remove an event_call */
trace_remove_event_call(struct trace_event_call * call)3330 int trace_remove_event_call(struct trace_event_call *call)
3331 {
3332 int ret;
3333
3334 lockdep_assert_held(&event_mutex);
3335
3336 mutex_lock(&trace_types_lock);
3337 down_write(&trace_event_sem);
3338 ret = probe_remove_event_call(call);
3339 up_write(&trace_event_sem);
3340 mutex_unlock(&trace_types_lock);
3341
3342 return ret;
3343 }
3344 EXPORT_SYMBOL_GPL(trace_remove_event_call);
3345
3346 #define for_each_event(event, start, end) \
3347 for (event = start; \
3348 (unsigned long)event < (unsigned long)end; \
3349 event++)
3350
3351 #ifdef CONFIG_MODULES
3352
trace_module_add_events(struct module * mod)3353 static void trace_module_add_events(struct module *mod)
3354 {
3355 struct trace_event_call **call, **start, **end;
3356
3357 if (!mod->num_trace_events)
3358 return;
3359
3360 /* Don't add infrastructure for mods without tracepoints */
3361 if (trace_module_has_bad_taint(mod)) {
3362 pr_err("%s: module has bad taint, not creating trace events\n",
3363 mod->name);
3364 return;
3365 }
3366
3367 start = mod->trace_events;
3368 end = mod->trace_events + mod->num_trace_events;
3369
3370 for_each_event(call, start, end) {
3371 __register_event(*call, mod);
3372 __add_event_to_tracers(*call);
3373 }
3374 }
3375
trace_module_remove_events(struct module * mod)3376 static void trace_module_remove_events(struct module *mod)
3377 {
3378 struct trace_event_call *call, *p;
3379 struct module_string *modstr, *m;
3380
3381 down_write(&trace_event_sem);
3382 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3383 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
3384 continue;
3385 if (call->module == mod)
3386 __trace_remove_event_call(call);
3387 }
3388 /* Check for any strings allocade for this module */
3389 list_for_each_entry_safe(modstr, m, &module_strings, next) {
3390 if (modstr->module != mod)
3391 continue;
3392 list_del(&modstr->next);
3393 kfree(modstr->str);
3394 kfree(modstr);
3395 }
3396 up_write(&trace_event_sem);
3397
3398 /*
3399 * It is safest to reset the ring buffer if the module being unloaded
3400 * registered any events that were used. The only worry is if
3401 * a new module gets loaded, and takes on the same id as the events
3402 * of this module. When printing out the buffer, traced events left
3403 * over from this module may be passed to the new module events and
3404 * unexpected results may occur.
3405 */
3406 tracing_reset_all_online_cpus_unlocked();
3407 }
3408
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)3409 static int trace_module_notify(struct notifier_block *self,
3410 unsigned long val, void *data)
3411 {
3412 struct module *mod = data;
3413
3414 mutex_lock(&event_mutex);
3415 mutex_lock(&trace_types_lock);
3416 switch (val) {
3417 case MODULE_STATE_COMING:
3418 trace_module_add_events(mod);
3419 break;
3420 case MODULE_STATE_GOING:
3421 trace_module_remove_events(mod);
3422 break;
3423 }
3424 mutex_unlock(&trace_types_lock);
3425 mutex_unlock(&event_mutex);
3426
3427 return NOTIFY_OK;
3428 }
3429
3430 static struct notifier_block trace_module_nb = {
3431 .notifier_call = trace_module_notify,
3432 .priority = 1, /* higher than trace.c module notify */
3433 };
3434 #endif /* CONFIG_MODULES */
3435
3436 /* Create a new event directory structure for a trace directory. */
3437 static void
__trace_add_event_dirs(struct trace_array * tr)3438 __trace_add_event_dirs(struct trace_array *tr)
3439 {
3440 struct trace_event_call *call;
3441 int ret;
3442
3443 lockdep_assert_held(&trace_event_sem);
3444
3445 list_for_each_entry(call, &ftrace_events, list) {
3446 ret = __trace_add_new_event(call, tr);
3447 if (ret < 0)
3448 pr_warn("Could not create directory for event %s\n",
3449 trace_event_name(call));
3450 }
3451 }
3452
3453 /* Returns any file that matches the system and event */
3454 struct trace_event_file *
__find_event_file(struct trace_array * tr,const char * system,const char * event)3455 __find_event_file(struct trace_array *tr, const char *system, const char *event)
3456 {
3457 struct trace_event_file *file;
3458 struct trace_event_call *call;
3459 const char *name;
3460
3461 list_for_each_entry(file, &tr->events, list) {
3462
3463 call = file->event_call;
3464 name = trace_event_name(call);
3465
3466 if (!name || !call->class)
3467 continue;
3468
3469 if (strcmp(event, name) == 0 &&
3470 strcmp(system, call->class->system) == 0)
3471 return file;
3472 }
3473 return NULL;
3474 }
3475
3476 /* Returns valid trace event files that match system and event */
3477 struct trace_event_file *
find_event_file(struct trace_array * tr,const char * system,const char * event)3478 find_event_file(struct trace_array *tr, const char *system, const char *event)
3479 {
3480 struct trace_event_file *file;
3481
3482 file = __find_event_file(tr, system, event);
3483 if (!file || !file->event_call->class->reg ||
3484 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
3485 return NULL;
3486
3487 return file;
3488 }
3489
3490 /**
3491 * trace_get_event_file - Find and return a trace event file
3492 * @instance: The name of the trace instance containing the event
3493 * @system: The name of the system containing the event
3494 * @event: The name of the event
3495 *
3496 * Return a trace event file given the trace instance name, trace
3497 * system, and trace event name. If the instance name is NULL, it
3498 * refers to the top-level trace array.
3499 *
3500 * This function will look it up and return it if found, after calling
3501 * trace_array_get() to prevent the instance from going away, and
3502 * increment the event's module refcount to prevent it from being
3503 * removed.
3504 *
3505 * To release the file, call trace_put_event_file(), which will call
3506 * trace_array_put() and decrement the event's module refcount.
3507 *
3508 * Return: The trace event on success, ERR_PTR otherwise.
3509 */
trace_get_event_file(const char * instance,const char * system,const char * event)3510 struct trace_event_file *trace_get_event_file(const char *instance,
3511 const char *system,
3512 const char *event)
3513 {
3514 struct trace_array *tr = top_trace_array();
3515 struct trace_event_file *file = NULL;
3516 int ret = -EINVAL;
3517
3518 if (instance) {
3519 tr = trace_array_find_get(instance);
3520 if (!tr)
3521 return ERR_PTR(-ENOENT);
3522 } else {
3523 ret = trace_array_get(tr);
3524 if (ret)
3525 return ERR_PTR(ret);
3526 }
3527
3528 mutex_lock(&event_mutex);
3529
3530 file = find_event_file(tr, system, event);
3531 if (!file) {
3532 trace_array_put(tr);
3533 ret = -EINVAL;
3534 goto out;
3535 }
3536
3537 /* Don't let event modules unload while in use */
3538 ret = trace_event_try_get_ref(file->event_call);
3539 if (!ret) {
3540 trace_array_put(tr);
3541 ret = -EBUSY;
3542 goto out;
3543 }
3544
3545 ret = 0;
3546 out:
3547 mutex_unlock(&event_mutex);
3548
3549 if (ret)
3550 file = ERR_PTR(ret);
3551
3552 return file;
3553 }
3554 EXPORT_SYMBOL_GPL(trace_get_event_file);
3555
3556 /**
3557 * trace_put_event_file - Release a file from trace_get_event_file()
3558 * @file: The trace event file
3559 *
3560 * If a file was retrieved using trace_get_event_file(), this should
3561 * be called when it's no longer needed. It will cancel the previous
3562 * trace_array_get() called by that function, and decrement the
3563 * event's module refcount.
3564 */
trace_put_event_file(struct trace_event_file * file)3565 void trace_put_event_file(struct trace_event_file *file)
3566 {
3567 mutex_lock(&event_mutex);
3568 trace_event_put_ref(file->event_call);
3569 mutex_unlock(&event_mutex);
3570
3571 trace_array_put(file->tr);
3572 }
3573 EXPORT_SYMBOL_GPL(trace_put_event_file);
3574
3575 #ifdef CONFIG_DYNAMIC_FTRACE
3576
3577 /* Avoid typos */
3578 #define ENABLE_EVENT_STR "enable_event"
3579 #define DISABLE_EVENT_STR "disable_event"
3580
3581 struct event_probe_data {
3582 struct trace_event_file *file;
3583 unsigned long count;
3584 int ref;
3585 bool enable;
3586 };
3587
update_event_probe(struct event_probe_data * data)3588 static void update_event_probe(struct event_probe_data *data)
3589 {
3590 if (data->enable)
3591 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3592 else
3593 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3594 }
3595
3596 static void
event_enable_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)3597 event_enable_probe(unsigned long ip, unsigned long parent_ip,
3598 struct trace_array *tr, struct ftrace_probe_ops *ops,
3599 void *data)
3600 {
3601 struct ftrace_func_mapper *mapper = data;
3602 struct event_probe_data *edata;
3603 void **pdata;
3604
3605 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3606 if (!pdata || !*pdata)
3607 return;
3608
3609 edata = *pdata;
3610 update_event_probe(edata);
3611 }
3612
3613 static void
event_enable_count_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)3614 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
3615 struct trace_array *tr, struct ftrace_probe_ops *ops,
3616 void *data)
3617 {
3618 struct ftrace_func_mapper *mapper = data;
3619 struct event_probe_data *edata;
3620 void **pdata;
3621
3622 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3623 if (!pdata || !*pdata)
3624 return;
3625
3626 edata = *pdata;
3627
3628 if (!edata->count)
3629 return;
3630
3631 /* Skip if the event is in a state we want to switch to */
3632 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
3633 return;
3634
3635 if (edata->count != -1)
3636 (edata->count)--;
3637
3638 update_event_probe(edata);
3639 }
3640
3641 static int
event_enable_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)3642 event_enable_print(struct seq_file *m, unsigned long ip,
3643 struct ftrace_probe_ops *ops, void *data)
3644 {
3645 struct ftrace_func_mapper *mapper = data;
3646 struct event_probe_data *edata;
3647 void **pdata;
3648
3649 pdata = ftrace_func_mapper_find_ip(mapper, ip);
3650
3651 if (WARN_ON_ONCE(!pdata || !*pdata))
3652 return 0;
3653
3654 edata = *pdata;
3655
3656 seq_printf(m, "%ps:", (void *)ip);
3657
3658 seq_printf(m, "%s:%s:%s",
3659 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
3660 edata->file->event_call->class->system,
3661 trace_event_name(edata->file->event_call));
3662
3663 if (edata->count == -1)
3664 seq_puts(m, ":unlimited\n");
3665 else
3666 seq_printf(m, ":count=%ld\n", edata->count);
3667
3668 return 0;
3669 }
3670
3671 static int
event_enable_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)3672 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
3673 unsigned long ip, void *init_data, void **data)
3674 {
3675 struct ftrace_func_mapper *mapper = *data;
3676 struct event_probe_data *edata = init_data;
3677 int ret;
3678
3679 if (!mapper) {
3680 mapper = allocate_ftrace_func_mapper();
3681 if (!mapper)
3682 return -ENODEV;
3683 *data = mapper;
3684 }
3685
3686 ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
3687 if (ret < 0)
3688 return ret;
3689
3690 edata->ref++;
3691
3692 return 0;
3693 }
3694
free_probe_data(void * data)3695 static int free_probe_data(void *data)
3696 {
3697 struct event_probe_data *edata = data;
3698
3699 edata->ref--;
3700 if (!edata->ref) {
3701 /* Remove the SOFT_MODE flag */
3702 __ftrace_event_enable_disable(edata->file, 0, 1);
3703 trace_event_put_ref(edata->file->event_call);
3704 kfree(edata);
3705 }
3706 return 0;
3707 }
3708
3709 static void
event_enable_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)3710 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
3711 unsigned long ip, void *data)
3712 {
3713 struct ftrace_func_mapper *mapper = data;
3714 struct event_probe_data *edata;
3715
3716 if (!ip) {
3717 if (!mapper)
3718 return;
3719 free_ftrace_func_mapper(mapper, free_probe_data);
3720 return;
3721 }
3722
3723 edata = ftrace_func_mapper_remove_ip(mapper, ip);
3724
3725 if (WARN_ON_ONCE(!edata))
3726 return;
3727
3728 if (WARN_ON_ONCE(edata->ref <= 0))
3729 return;
3730
3731 free_probe_data(edata);
3732 }
3733
3734 static struct ftrace_probe_ops event_enable_probe_ops = {
3735 .func = event_enable_probe,
3736 .print = event_enable_print,
3737 .init = event_enable_init,
3738 .free = event_enable_free,
3739 };
3740
3741 static struct ftrace_probe_ops event_enable_count_probe_ops = {
3742 .func = event_enable_count_probe,
3743 .print = event_enable_print,
3744 .init = event_enable_init,
3745 .free = event_enable_free,
3746 };
3747
3748 static struct ftrace_probe_ops event_disable_probe_ops = {
3749 .func = event_enable_probe,
3750 .print = event_enable_print,
3751 .init = event_enable_init,
3752 .free = event_enable_free,
3753 };
3754
3755 static struct ftrace_probe_ops event_disable_count_probe_ops = {
3756 .func = event_enable_count_probe,
3757 .print = event_enable_print,
3758 .init = event_enable_init,
3759 .free = event_enable_free,
3760 };
3761
3762 static int
event_enable_func(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enabled)3763 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3764 char *glob, char *cmd, char *param, int enabled)
3765 {
3766 struct trace_event_file *file;
3767 struct ftrace_probe_ops *ops;
3768 struct event_probe_data *data;
3769 const char *system;
3770 const char *event;
3771 char *number;
3772 bool enable;
3773 int ret;
3774
3775 if (!tr)
3776 return -ENODEV;
3777
3778 /* hash funcs only work with set_ftrace_filter */
3779 if (!enabled || !param)
3780 return -EINVAL;
3781
3782 system = strsep(¶m, ":");
3783 if (!param)
3784 return -EINVAL;
3785
3786 event = strsep(¶m, ":");
3787
3788 mutex_lock(&event_mutex);
3789
3790 ret = -EINVAL;
3791 file = find_event_file(tr, system, event);
3792 if (!file)
3793 goto out;
3794
3795 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
3796
3797 if (enable)
3798 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
3799 else
3800 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
3801
3802 if (glob[0] == '!') {
3803 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
3804 goto out;
3805 }
3806
3807 ret = -ENOMEM;
3808
3809 data = kzalloc(sizeof(*data), GFP_KERNEL);
3810 if (!data)
3811 goto out;
3812
3813 data->enable = enable;
3814 data->count = -1;
3815 data->file = file;
3816
3817 if (!param)
3818 goto out_reg;
3819
3820 number = strsep(¶m, ":");
3821
3822 ret = -EINVAL;
3823 if (!strlen(number))
3824 goto out_free;
3825
3826 /*
3827 * We use the callback data field (which is a pointer)
3828 * as our counter.
3829 */
3830 ret = kstrtoul(number, 0, &data->count);
3831 if (ret)
3832 goto out_free;
3833
3834 out_reg:
3835 /* Don't let event modules unload while probe registered */
3836 ret = trace_event_try_get_ref(file->event_call);
3837 if (!ret) {
3838 ret = -EBUSY;
3839 goto out_free;
3840 }
3841
3842 ret = __ftrace_event_enable_disable(file, 1, 1);
3843 if (ret < 0)
3844 goto out_put;
3845
3846 ret = register_ftrace_function_probe(glob, tr, ops, data);
3847 /*
3848 * The above returns on success the # of functions enabled,
3849 * but if it didn't find any functions it returns zero.
3850 * Consider no functions a failure too.
3851 */
3852 if (!ret) {
3853 ret = -ENOENT;
3854 goto out_disable;
3855 } else if (ret < 0)
3856 goto out_disable;
3857 /* Just return zero, not the number of enabled functions */
3858 ret = 0;
3859 out:
3860 mutex_unlock(&event_mutex);
3861 return ret;
3862
3863 out_disable:
3864 __ftrace_event_enable_disable(file, 0, 1);
3865 out_put:
3866 trace_event_put_ref(file->event_call);
3867 out_free:
3868 kfree(data);
3869 goto out;
3870 }
3871
3872 static struct ftrace_func_command event_enable_cmd = {
3873 .name = ENABLE_EVENT_STR,
3874 .func = event_enable_func,
3875 };
3876
3877 static struct ftrace_func_command event_disable_cmd = {
3878 .name = DISABLE_EVENT_STR,
3879 .func = event_enable_func,
3880 };
3881
register_event_cmds(void)3882 static __init int register_event_cmds(void)
3883 {
3884 int ret;
3885
3886 ret = register_ftrace_command(&event_enable_cmd);
3887 if (WARN_ON(ret < 0))
3888 return ret;
3889 ret = register_ftrace_command(&event_disable_cmd);
3890 if (WARN_ON(ret < 0))
3891 unregister_ftrace_command(&event_enable_cmd);
3892 return ret;
3893 }
3894 #else
register_event_cmds(void)3895 static inline int register_event_cmds(void) { return 0; }
3896 #endif /* CONFIG_DYNAMIC_FTRACE */
3897
3898 /*
3899 * The top level array and trace arrays created by boot-time tracing
3900 * have already had its trace_event_file descriptors created in order
3901 * to allow for early events to be recorded.
3902 * This function is called after the tracefs has been initialized,
3903 * and we now have to create the files associated to the events.
3904 */
__trace_early_add_event_dirs(struct trace_array * tr)3905 static void __trace_early_add_event_dirs(struct trace_array *tr)
3906 {
3907 struct trace_event_file *file;
3908 int ret;
3909
3910
3911 list_for_each_entry(file, &tr->events, list) {
3912 ret = event_create_dir(tr->event_dir, file);
3913 if (ret < 0)
3914 pr_warn("Could not create directory for event %s\n",
3915 trace_event_name(file->event_call));
3916 }
3917 }
3918
3919 /*
3920 * For early boot up, the top trace array and the trace arrays created
3921 * by boot-time tracing require to have a list of events that can be
3922 * enabled. This must be done before the filesystem is set up in order
3923 * to allow events to be traced early.
3924 */
__trace_early_add_events(struct trace_array * tr)3925 void __trace_early_add_events(struct trace_array *tr)
3926 {
3927 struct trace_event_call *call;
3928 int ret;
3929
3930 list_for_each_entry(call, &ftrace_events, list) {
3931 /* Early boot up should not have any modules loaded */
3932 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
3933 WARN_ON_ONCE(call->module))
3934 continue;
3935
3936 ret = __trace_early_add_new_event(call, tr);
3937 if (ret < 0)
3938 pr_warn("Could not create early event %s\n",
3939 trace_event_name(call));
3940 }
3941 }
3942
3943 /* Remove the event directory structure for a trace directory. */
3944 static void
__trace_remove_event_dirs(struct trace_array * tr)3945 __trace_remove_event_dirs(struct trace_array *tr)
3946 {
3947 struct trace_event_file *file, *next;
3948
3949 list_for_each_entry_safe(file, next, &tr->events, list)
3950 remove_event_file_dir(file);
3951 }
3952
__add_event_to_tracers(struct trace_event_call * call)3953 static void __add_event_to_tracers(struct trace_event_call *call)
3954 {
3955 struct trace_array *tr;
3956
3957 list_for_each_entry(tr, &ftrace_trace_arrays, list)
3958 __trace_add_new_event(call, tr);
3959 }
3960
3961 extern struct trace_event_call *__start_ftrace_events[];
3962 extern struct trace_event_call *__stop_ftrace_events[];
3963
3964 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
3965
setup_trace_event(char * str)3966 static __init int setup_trace_event(char *str)
3967 {
3968 strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
3969 ring_buffer_expanded = true;
3970 disable_tracing_selftest("running event tracing");
3971
3972 return 1;
3973 }
3974 __setup("trace_event=", setup_trace_event);
3975
events_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)3976 static int events_callback(const char *name, umode_t *mode, void **data,
3977 const struct file_operations **fops)
3978 {
3979 if (strcmp(name, "enable") == 0) {
3980 *mode = TRACE_MODE_WRITE;
3981 *fops = &ftrace_tr_enable_fops;
3982 return 1;
3983 }
3984
3985 if (strcmp(name, "header_page") == 0)
3986 *data = ring_buffer_print_page_header;
3987
3988 else if (strcmp(name, "header_event") == 0)
3989 *data = ring_buffer_print_entry_header;
3990
3991 else
3992 return 0;
3993
3994 *mode = TRACE_MODE_READ;
3995 *fops = &ftrace_show_header_fops;
3996 return 1;
3997 }
3998
3999 /* Expects to have event_mutex held when called */
4000 static int
create_event_toplevel_files(struct dentry * parent,struct trace_array * tr)4001 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
4002 {
4003 struct eventfs_inode *e_events;
4004 struct dentry *entry;
4005 int nr_entries;
4006 static struct eventfs_entry events_entries[] = {
4007 {
4008 .name = "enable",
4009 .callback = events_callback,
4010 },
4011 {
4012 .name = "header_page",
4013 .callback = events_callback,
4014 },
4015 {
4016 .name = "header_event",
4017 .callback = events_callback,
4018 },
4019 };
4020
4021 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
4022 tr, &ftrace_set_event_fops);
4023 if (!entry)
4024 return -ENOMEM;
4025
4026 nr_entries = ARRAY_SIZE(events_entries);
4027
4028 e_events = eventfs_create_events_dir("events", parent, events_entries,
4029 nr_entries, tr);
4030 if (IS_ERR(e_events)) {
4031 pr_warn("Could not create tracefs 'events' directory\n");
4032 return -ENOMEM;
4033 }
4034
4035 /* There are not as crucial, just warn if they are not created */
4036
4037 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
4038 tr, &ftrace_set_event_pid_fops);
4039
4040 trace_create_file("set_event_notrace_pid",
4041 TRACE_MODE_WRITE, parent, tr,
4042 &ftrace_set_event_notrace_pid_fops);
4043
4044 tr->event_dir = e_events;
4045
4046 return 0;
4047 }
4048
4049 /**
4050 * event_trace_add_tracer - add a instance of a trace_array to events
4051 * @parent: The parent dentry to place the files/directories for events in
4052 * @tr: The trace array associated with these events
4053 *
4054 * When a new instance is created, it needs to set up its events
4055 * directory, as well as other files associated with events. It also
4056 * creates the event hierarchy in the @parent/events directory.
4057 *
4058 * Returns 0 on success.
4059 *
4060 * Must be called with event_mutex held.
4061 */
event_trace_add_tracer(struct dentry * parent,struct trace_array * tr)4062 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
4063 {
4064 int ret;
4065
4066 lockdep_assert_held(&event_mutex);
4067
4068 ret = create_event_toplevel_files(parent, tr);
4069 if (ret)
4070 goto out;
4071
4072 down_write(&trace_event_sem);
4073 /* If tr already has the event list, it is initialized in early boot. */
4074 if (unlikely(!list_empty(&tr->events)))
4075 __trace_early_add_event_dirs(tr);
4076 else
4077 __trace_add_event_dirs(tr);
4078 up_write(&trace_event_sem);
4079
4080 out:
4081 return ret;
4082 }
4083
4084 /*
4085 * The top trace array already had its file descriptors created.
4086 * Now the files themselves need to be created.
4087 */
4088 static __init int
early_event_add_tracer(struct dentry * parent,struct trace_array * tr)4089 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
4090 {
4091 int ret;
4092
4093 mutex_lock(&event_mutex);
4094
4095 ret = create_event_toplevel_files(parent, tr);
4096 if (ret)
4097 goto out_unlock;
4098
4099 down_write(&trace_event_sem);
4100 __trace_early_add_event_dirs(tr);
4101 up_write(&trace_event_sem);
4102
4103 out_unlock:
4104 mutex_unlock(&event_mutex);
4105
4106 return ret;
4107 }
4108
4109 /* Must be called with event_mutex held */
event_trace_del_tracer(struct trace_array * tr)4110 int event_trace_del_tracer(struct trace_array *tr)
4111 {
4112 lockdep_assert_held(&event_mutex);
4113
4114 /* Disable any event triggers and associated soft-disabled events */
4115 clear_event_triggers(tr);
4116
4117 /* Clear the pid list */
4118 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
4119
4120 /* Disable any running events */
4121 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
4122
4123 /* Make sure no more events are being executed */
4124 tracepoint_synchronize_unregister();
4125
4126 down_write(&trace_event_sem);
4127 __trace_remove_event_dirs(tr);
4128 eventfs_remove_events_dir(tr->event_dir);
4129 up_write(&trace_event_sem);
4130
4131 tr->event_dir = NULL;
4132
4133 return 0;
4134 }
4135
event_trace_memsetup(void)4136 static __init int event_trace_memsetup(void)
4137 {
4138 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
4139 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
4140 return 0;
4141 }
4142
4143 __init void
early_enable_events(struct trace_array * tr,char * buf,bool disable_first)4144 early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
4145 {
4146 char *token;
4147 int ret;
4148
4149 while (true) {
4150 token = strsep(&buf, ",");
4151
4152 if (!token)
4153 break;
4154
4155 if (*token) {
4156 /* Restarting syscalls requires that we stop them first */
4157 if (disable_first)
4158 ftrace_set_clr_event(tr, token, 0);
4159
4160 ret = ftrace_set_clr_event(tr, token, 1);
4161 if (ret)
4162 pr_warn("Failed to enable trace event: %s\n", token);
4163 }
4164
4165 /* Put back the comma to allow this to be called again */
4166 if (buf)
4167 *(buf - 1) = ',';
4168 }
4169 }
4170
event_trace_enable(void)4171 static __init int event_trace_enable(void)
4172 {
4173 struct trace_array *tr = top_trace_array();
4174 struct trace_event_call **iter, *call;
4175 int ret;
4176
4177 if (!tr)
4178 return -ENODEV;
4179
4180 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
4181
4182 call = *iter;
4183 ret = event_init(call);
4184 if (!ret)
4185 list_add(&call->list, &ftrace_events);
4186 }
4187
4188 register_trigger_cmds();
4189
4190 /*
4191 * We need the top trace array to have a working set of trace
4192 * points at early init, before the debug files and directories
4193 * are created. Create the file entries now, and attach them
4194 * to the actual file dentries later.
4195 */
4196 __trace_early_add_events(tr);
4197
4198 early_enable_events(tr, bootup_event_buf, false);
4199
4200 trace_printk_start_comm();
4201
4202 register_event_cmds();
4203
4204
4205 return 0;
4206 }
4207
4208 /*
4209 * event_trace_enable() is called from trace_event_init() first to
4210 * initialize events and perhaps start any events that are on the
4211 * command line. Unfortunately, there are some events that will not
4212 * start this early, like the system call tracepoints that need
4213 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
4214 * event_trace_enable() is called before pid 1 starts, and this flag
4215 * is never set, making the syscall tracepoint never get reached, but
4216 * the event is enabled regardless (and not doing anything).
4217 */
event_trace_enable_again(void)4218 static __init int event_trace_enable_again(void)
4219 {
4220 struct trace_array *tr;
4221
4222 tr = top_trace_array();
4223 if (!tr)
4224 return -ENODEV;
4225
4226 early_enable_events(tr, bootup_event_buf, true);
4227
4228 return 0;
4229 }
4230
4231 early_initcall(event_trace_enable_again);
4232
4233 /* Init fields which doesn't related to the tracefs */
event_trace_init_fields(void)4234 static __init int event_trace_init_fields(void)
4235 {
4236 if (trace_define_generic_fields())
4237 pr_warn("tracing: Failed to allocated generic fields");
4238
4239 if (trace_define_common_fields())
4240 pr_warn("tracing: Failed to allocate common fields");
4241
4242 return 0;
4243 }
4244
event_trace_init(void)4245 __init int event_trace_init(void)
4246 {
4247 struct trace_array *tr;
4248 int ret;
4249
4250 tr = top_trace_array();
4251 if (!tr)
4252 return -ENODEV;
4253
4254 trace_create_file("available_events", TRACE_MODE_READ,
4255 NULL, tr, &ftrace_avail_fops);
4256
4257 ret = early_event_add_tracer(NULL, tr);
4258 if (ret)
4259 return ret;
4260
4261 #ifdef CONFIG_MODULES
4262 ret = register_module_notifier(&trace_module_nb);
4263 if (ret)
4264 pr_warn("Failed to register trace events module notifier\n");
4265 #endif
4266
4267 eventdir_initialized = true;
4268
4269 return 0;
4270 }
4271
trace_event_init(void)4272 void __init trace_event_init(void)
4273 {
4274 event_trace_memsetup();
4275 init_ftrace_syscalls();
4276 event_trace_enable();
4277 event_trace_init_fields();
4278 }
4279
4280 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
4281
4282 static DEFINE_SPINLOCK(test_spinlock);
4283 static DEFINE_SPINLOCK(test_spinlock_irq);
4284 static DEFINE_MUTEX(test_mutex);
4285
test_work(struct work_struct * dummy)4286 static __init void test_work(struct work_struct *dummy)
4287 {
4288 spin_lock(&test_spinlock);
4289 spin_lock_irq(&test_spinlock_irq);
4290 udelay(1);
4291 spin_unlock_irq(&test_spinlock_irq);
4292 spin_unlock(&test_spinlock);
4293
4294 mutex_lock(&test_mutex);
4295 msleep(1);
4296 mutex_unlock(&test_mutex);
4297 }
4298
event_test_thread(void * unused)4299 static __init int event_test_thread(void *unused)
4300 {
4301 void *test_malloc;
4302
4303 test_malloc = kmalloc(1234, GFP_KERNEL);
4304 if (!test_malloc)
4305 pr_info("failed to kmalloc\n");
4306
4307 schedule_on_each_cpu(test_work);
4308
4309 kfree(test_malloc);
4310
4311 set_current_state(TASK_INTERRUPTIBLE);
4312 while (!kthread_should_stop()) {
4313 schedule();
4314 set_current_state(TASK_INTERRUPTIBLE);
4315 }
4316 __set_current_state(TASK_RUNNING);
4317
4318 return 0;
4319 }
4320
4321 /*
4322 * Do various things that may trigger events.
4323 */
event_test_stuff(void)4324 static __init void event_test_stuff(void)
4325 {
4326 struct task_struct *test_thread;
4327
4328 test_thread = kthread_run(event_test_thread, NULL, "test-events");
4329 msleep(1);
4330 kthread_stop(test_thread);
4331 }
4332
4333 /*
4334 * For every trace event defined, we will test each trace point separately,
4335 * and then by groups, and finally all trace points.
4336 */
event_trace_self_tests(void)4337 static __init void event_trace_self_tests(void)
4338 {
4339 struct trace_subsystem_dir *dir;
4340 struct trace_event_file *file;
4341 struct trace_event_call *call;
4342 struct event_subsystem *system;
4343 struct trace_array *tr;
4344 int ret;
4345
4346 tr = top_trace_array();
4347 if (!tr)
4348 return;
4349
4350 pr_info("Running tests on trace events:\n");
4351
4352 list_for_each_entry(file, &tr->events, list) {
4353
4354 call = file->event_call;
4355
4356 /* Only test those that have a probe */
4357 if (!call->class || !call->class->probe)
4358 continue;
4359
4360 /*
4361 * Testing syscall events here is pretty useless, but
4362 * we still do it if configured. But this is time consuming.
4363 * What we really need is a user thread to perform the
4364 * syscalls as we test.
4365 */
4366 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
4367 if (call->class->system &&
4368 strcmp(call->class->system, "syscalls") == 0)
4369 continue;
4370 #endif
4371
4372 pr_info("Testing event %s: ", trace_event_name(call));
4373
4374 /*
4375 * If an event is already enabled, someone is using
4376 * it and the self test should not be on.
4377 */
4378 if (file->flags & EVENT_FILE_FL_ENABLED) {
4379 pr_warn("Enabled event during self test!\n");
4380 WARN_ON_ONCE(1);
4381 continue;
4382 }
4383
4384 ftrace_event_enable_disable(file, 1);
4385 event_test_stuff();
4386 ftrace_event_enable_disable(file, 0);
4387
4388 pr_cont("OK\n");
4389 }
4390
4391 /* Now test at the sub system level */
4392
4393 pr_info("Running tests on trace event systems:\n");
4394
4395 list_for_each_entry(dir, &tr->systems, list) {
4396
4397 system = dir->subsystem;
4398
4399 /* the ftrace system is special, skip it */
4400 if (strcmp(system->name, "ftrace") == 0)
4401 continue;
4402
4403 pr_info("Testing event system %s: ", system->name);
4404
4405 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
4406 if (WARN_ON_ONCE(ret)) {
4407 pr_warn("error enabling system %s\n",
4408 system->name);
4409 continue;
4410 }
4411
4412 event_test_stuff();
4413
4414 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
4415 if (WARN_ON_ONCE(ret)) {
4416 pr_warn("error disabling system %s\n",
4417 system->name);
4418 continue;
4419 }
4420
4421 pr_cont("OK\n");
4422 }
4423
4424 /* Test with all events enabled */
4425
4426 pr_info("Running tests on all trace events:\n");
4427 pr_info("Testing all events: ");
4428
4429 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
4430 if (WARN_ON_ONCE(ret)) {
4431 pr_warn("error enabling all events\n");
4432 return;
4433 }
4434
4435 event_test_stuff();
4436
4437 /* reset sysname */
4438 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
4439 if (WARN_ON_ONCE(ret)) {
4440 pr_warn("error disabling all events\n");
4441 return;
4442 }
4443
4444 pr_cont("OK\n");
4445 }
4446
4447 #ifdef CONFIG_FUNCTION_TRACER
4448
4449 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
4450
4451 static struct trace_event_file event_trace_file __initdata;
4452
4453 static void __init
function_test_events_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * regs)4454 function_test_events_call(unsigned long ip, unsigned long parent_ip,
4455 struct ftrace_ops *op, struct ftrace_regs *regs)
4456 {
4457 struct trace_buffer *buffer;
4458 struct ring_buffer_event *event;
4459 struct ftrace_entry *entry;
4460 unsigned int trace_ctx;
4461 long disabled;
4462 int cpu;
4463
4464 trace_ctx = tracing_gen_ctx();
4465 preempt_disable_notrace();
4466 cpu = raw_smp_processor_id();
4467 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
4468
4469 if (disabled != 1)
4470 goto out;
4471
4472 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
4473 TRACE_FN, sizeof(*entry),
4474 trace_ctx);
4475 if (!event)
4476 goto out;
4477 entry = ring_buffer_event_data(event);
4478 entry->ip = ip;
4479 entry->parent_ip = parent_ip;
4480
4481 event_trigger_unlock_commit(&event_trace_file, buffer, event,
4482 entry, trace_ctx);
4483 out:
4484 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
4485 preempt_enable_notrace();
4486 }
4487
4488 static struct ftrace_ops trace_ops __initdata =
4489 {
4490 .func = function_test_events_call,
4491 };
4492
event_trace_self_test_with_function(void)4493 static __init void event_trace_self_test_with_function(void)
4494 {
4495 int ret;
4496
4497 event_trace_file.tr = top_trace_array();
4498 if (WARN_ON(!event_trace_file.tr))
4499 return;
4500
4501 ret = register_ftrace_function(&trace_ops);
4502 if (WARN_ON(ret < 0)) {
4503 pr_info("Failed to enable function tracer for event tests\n");
4504 return;
4505 }
4506 pr_info("Running tests again, along with the function tracer\n");
4507 event_trace_self_tests();
4508 unregister_ftrace_function(&trace_ops);
4509 }
4510 #else
event_trace_self_test_with_function(void)4511 static __init void event_trace_self_test_with_function(void)
4512 {
4513 }
4514 #endif
4515
event_trace_self_tests_init(void)4516 static __init int event_trace_self_tests_init(void)
4517 {
4518 if (!tracing_selftest_disabled) {
4519 event_trace_self_tests();
4520 event_trace_self_test_with_function();
4521 }
4522
4523 return 0;
4524 }
4525
4526 late_initcall(event_trace_self_tests_init);
4527
4528 #endif
4529