1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/tracefs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
49 })
50
51 #define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
57 })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 .func = ftrace_stub,
81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 INIT_OPS_HASH(ftrace_list_end)
83 };
84
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids);
96 struct ftrace_pid {
97 struct list_head list;
98 struct pid *pid;
99 };
100
101 /*
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
104 */
105 static int ftrace_disabled __read_mostly;
106
107 static DEFINE_MUTEX(ftrace_lock);
108
109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113 static struct ftrace_ops global_ops;
114 static struct ftrace_ops control_ops;
115
116 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 struct ftrace_ops *op, struct pt_regs *regs);
118
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121 struct ftrace_ops *op, struct pt_regs *regs);
122 #else
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
126 #endif
127
128 /*
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131 * are simply leaked, so there is no need to interact with a grace-period
132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
133 * concurrent insertions into the ftrace_global_list.
134 *
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
136 */
137 #define do_for_each_ftrace_op(op, list) \
138 op = rcu_dereference_raw_notrace(list); \
139 do
140
141 /*
142 * Optimized for just a single item in the list (as that is the normal case).
143 */
144 #define while_for_each_ftrace_op(op) \
145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
146 unlikely((op) != &ftrace_list_end))
147
ftrace_ops_init(struct ftrace_ops * ops)148 static inline void ftrace_ops_init(struct ftrace_ops *ops)
149 {
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 mutex_init(&ops->local_hash.regex_lock);
153 ops->func_hash = &ops->local_hash;
154 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
155 }
156 #endif
157 }
158
159 /**
160 * ftrace_nr_registered_ops - return number of ops registered
161 *
162 * Returns the number of ftrace_ops registered and tracing functions
163 */
ftrace_nr_registered_ops(void)164 int ftrace_nr_registered_ops(void)
165 {
166 struct ftrace_ops *ops;
167 int cnt = 0;
168
169 mutex_lock(&ftrace_lock);
170
171 for (ops = ftrace_ops_list;
172 ops != &ftrace_list_end; ops = ops->next)
173 cnt++;
174
175 mutex_unlock(&ftrace_lock);
176
177 return cnt;
178 }
179
ftrace_pid_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * regs)180 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181 struct ftrace_ops *op, struct pt_regs *regs)
182 {
183 if (!test_tsk_trace_trace(current))
184 return;
185
186 ftrace_pid_function(ip, parent_ip, op, regs);
187 }
188
set_ftrace_pid_function(ftrace_func_t func)189 static void set_ftrace_pid_function(ftrace_func_t func)
190 {
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194 }
195
196 /**
197 * clear_ftrace_function - reset the ftrace function
198 *
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
201 */
clear_ftrace_function(void)202 void clear_ftrace_function(void)
203 {
204 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206 }
207
control_ops_disable_all(struct ftrace_ops * ops)208 static void control_ops_disable_all(struct ftrace_ops *ops)
209 {
210 int cpu;
211
212 for_each_possible_cpu(cpu)
213 *per_cpu_ptr(ops->disabled, cpu) = 1;
214 }
215
control_ops_alloc(struct ftrace_ops * ops)216 static int control_ops_alloc(struct ftrace_ops *ops)
217 {
218 int __percpu *disabled;
219
220 disabled = alloc_percpu(int);
221 if (!disabled)
222 return -ENOMEM;
223
224 ops->disabled = disabled;
225 control_ops_disable_all(ops);
226 return 0;
227 }
228
ftrace_sync(struct work_struct * work)229 static void ftrace_sync(struct work_struct *work)
230 {
231 /*
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
235 *
236 * Yes, function tracing is rude.
237 */
238 }
239
ftrace_sync_ipi(void * data)240 static void ftrace_sync_ipi(void *data)
241 {
242 /* Probably not needed, but do it anyway */
243 smp_rmb();
244 }
245
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
248 #else
update_function_graph_func(void)249 static inline void update_function_graph_func(void) { }
250 #endif
251
update_ftrace_function(void)252 static void update_ftrace_function(void)
253 {
254 ftrace_func_t func;
255
256 /*
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
260 */
261 set_function_trace_op = ftrace_ops_list;
262
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list == &ftrace_list_end) {
265 func = ftrace_stub;
266
267 /*
268 * If we are at the end of the list and this ops is
269 * recursion safe and not dynamic and the arch supports passing ops,
270 * then have the mcount trampoline call the function directly.
271 */
272 } else if (ftrace_ops_list->next == &ftrace_list_end) {
273 func = ftrace_ops_get_func(ftrace_ops_list);
274
275 } else {
276 /* Just use the default ftrace_ops */
277 set_function_trace_op = &ftrace_list_end;
278 func = ftrace_ops_list_func;
279 }
280
281 update_function_graph_func();
282
283 /* If there's no change, then do nothing more here */
284 if (ftrace_trace_function == func)
285 return;
286
287 /*
288 * If we are using the list function, it doesn't care
289 * about the function_trace_ops.
290 */
291 if (func == ftrace_ops_list_func) {
292 ftrace_trace_function = func;
293 /*
294 * Don't even bother setting function_trace_ops,
295 * it would be racy to do so anyway.
296 */
297 return;
298 }
299
300 #ifndef CONFIG_DYNAMIC_FTRACE
301 /*
302 * For static tracing, we need to be a bit more careful.
303 * The function change takes affect immediately. Thus,
304 * we need to coorditate the setting of the function_trace_ops
305 * with the setting of the ftrace_trace_function.
306 *
307 * Set the function to the list ops, which will call the
308 * function we want, albeit indirectly, but it handles the
309 * ftrace_ops and doesn't depend on function_trace_op.
310 */
311 ftrace_trace_function = ftrace_ops_list_func;
312 /*
313 * Make sure all CPUs see this. Yes this is slow, but static
314 * tracing is slow and nasty to have enabled.
315 */
316 schedule_on_each_cpu(ftrace_sync);
317 /* Now all cpus are using the list ops. */
318 function_trace_op = set_function_trace_op;
319 /* Make sure the function_trace_op is visible on all CPUs */
320 smp_wmb();
321 /* Nasty way to force a rmb on all cpus */
322 smp_call_function(ftrace_sync_ipi, NULL, 1);
323 /* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
325
326 ftrace_trace_function = func;
327 }
328
using_ftrace_ops_list_func(void)329 int using_ftrace_ops_list_func(void)
330 {
331 return ftrace_trace_function == ftrace_ops_list_func;
332 }
333
add_ftrace_ops(struct ftrace_ops ** list,struct ftrace_ops * ops)334 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
335 {
336 ops->next = *list;
337 /*
338 * We are entering ops into the list but another
339 * CPU might be walking that list. We need to make sure
340 * the ops->next pointer is valid before another CPU sees
341 * the ops pointer included into the list.
342 */
343 rcu_assign_pointer(*list, ops);
344 }
345
remove_ftrace_ops(struct ftrace_ops ** list,struct ftrace_ops * ops)346 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
347 {
348 struct ftrace_ops **p;
349
350 /*
351 * If we are removing the last function, then simply point
352 * to the ftrace_stub.
353 */
354 if (*list == ops && ops->next == &ftrace_list_end) {
355 *list = &ftrace_list_end;
356 return 0;
357 }
358
359 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
360 if (*p == ops)
361 break;
362
363 if (*p != ops)
364 return -1;
365
366 *p = (*p)->next;
367 return 0;
368 }
369
add_ftrace_list_ops(struct ftrace_ops ** list,struct ftrace_ops * main_ops,struct ftrace_ops * ops)370 static void add_ftrace_list_ops(struct ftrace_ops **list,
371 struct ftrace_ops *main_ops,
372 struct ftrace_ops *ops)
373 {
374 int first = *list == &ftrace_list_end;
375 add_ftrace_ops(list, ops);
376 if (first)
377 add_ftrace_ops(&ftrace_ops_list, main_ops);
378 }
379
remove_ftrace_list_ops(struct ftrace_ops ** list,struct ftrace_ops * main_ops,struct ftrace_ops * ops)380 static int remove_ftrace_list_ops(struct ftrace_ops **list,
381 struct ftrace_ops *main_ops,
382 struct ftrace_ops *ops)
383 {
384 int ret = remove_ftrace_ops(list, ops);
385 if (!ret && *list == &ftrace_list_end)
386 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
387 return ret;
388 }
389
__register_ftrace_function(struct ftrace_ops * ops)390 static int __register_ftrace_function(struct ftrace_ops *ops)
391 {
392 if (ops->flags & FTRACE_OPS_FL_DELETED)
393 return -EINVAL;
394
395 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
396 return -EBUSY;
397
398 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
399 /*
400 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
401 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
402 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
403 */
404 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
405 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
406 return -EINVAL;
407
408 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
409 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
410 #endif
411
412 if (!core_kernel_data((unsigned long)ops))
413 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
414
415 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
416 if (control_ops_alloc(ops))
417 return -ENOMEM;
418 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
419 } else
420 add_ftrace_ops(&ftrace_ops_list, ops);
421
422 if (ftrace_enabled)
423 update_ftrace_function();
424
425 return 0;
426 }
427
__unregister_ftrace_function(struct ftrace_ops * ops)428 static int __unregister_ftrace_function(struct ftrace_ops *ops)
429 {
430 int ret;
431
432 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
433 return -EBUSY;
434
435 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
436 ret = remove_ftrace_list_ops(&ftrace_control_list,
437 &control_ops, ops);
438 } else
439 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
440
441 if (ret < 0)
442 return ret;
443
444 if (ftrace_enabled)
445 update_ftrace_function();
446
447 return 0;
448 }
449
ftrace_update_pid_func(void)450 static void ftrace_update_pid_func(void)
451 {
452 /* Only do something if we are tracing something */
453 if (ftrace_trace_function == ftrace_stub)
454 return;
455
456 update_ftrace_function();
457 }
458
459 #ifdef CONFIG_FUNCTION_PROFILER
460 struct ftrace_profile {
461 struct hlist_node node;
462 unsigned long ip;
463 unsigned long counter;
464 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
465 unsigned long long time;
466 unsigned long long time_squared;
467 #endif
468 };
469
470 struct ftrace_profile_page {
471 struct ftrace_profile_page *next;
472 unsigned long index;
473 struct ftrace_profile records[];
474 };
475
476 struct ftrace_profile_stat {
477 atomic_t disabled;
478 struct hlist_head *hash;
479 struct ftrace_profile_page *pages;
480 struct ftrace_profile_page *start;
481 struct tracer_stat stat;
482 };
483
484 #define PROFILE_RECORDS_SIZE \
485 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
486
487 #define PROFILES_PER_PAGE \
488 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
489
490 static int ftrace_profile_enabled __read_mostly;
491
492 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
493 static DEFINE_MUTEX(ftrace_profile_lock);
494
495 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
496
497 #define FTRACE_PROFILE_HASH_BITS 10
498 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
499
500 static void *
function_stat_next(void * v,int idx)501 function_stat_next(void *v, int idx)
502 {
503 struct ftrace_profile *rec = v;
504 struct ftrace_profile_page *pg;
505
506 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
507
508 again:
509 if (idx != 0)
510 rec++;
511
512 if ((void *)rec >= (void *)&pg->records[pg->index]) {
513 pg = pg->next;
514 if (!pg)
515 return NULL;
516 rec = &pg->records[0];
517 if (!rec->counter)
518 goto again;
519 }
520
521 return rec;
522 }
523
function_stat_start(struct tracer_stat * trace)524 static void *function_stat_start(struct tracer_stat *trace)
525 {
526 struct ftrace_profile_stat *stat =
527 container_of(trace, struct ftrace_profile_stat, stat);
528
529 if (!stat || !stat->start)
530 return NULL;
531
532 return function_stat_next(&stat->start->records[0], 0);
533 }
534
535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
536 /* function graph compares on total time */
function_stat_cmp(void * p1,void * p2)537 static int function_stat_cmp(void *p1, void *p2)
538 {
539 struct ftrace_profile *a = p1;
540 struct ftrace_profile *b = p2;
541
542 if (a->time < b->time)
543 return -1;
544 if (a->time > b->time)
545 return 1;
546 else
547 return 0;
548 }
549 #else
550 /* not function graph compares against hits */
function_stat_cmp(void * p1,void * p2)551 static int function_stat_cmp(void *p1, void *p2)
552 {
553 struct ftrace_profile *a = p1;
554 struct ftrace_profile *b = p2;
555
556 if (a->counter < b->counter)
557 return -1;
558 if (a->counter > b->counter)
559 return 1;
560 else
561 return 0;
562 }
563 #endif
564
function_stat_headers(struct seq_file * m)565 static int function_stat_headers(struct seq_file *m)
566 {
567 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
568 seq_printf(m, " Function "
569 "Hit Time Avg s^2\n"
570 " -------- "
571 "--- ---- --- ---\n");
572 #else
573 seq_printf(m, " Function Hit\n"
574 " -------- ---\n");
575 #endif
576 return 0;
577 }
578
function_stat_show(struct seq_file * m,void * v)579 static int function_stat_show(struct seq_file *m, void *v)
580 {
581 struct ftrace_profile *rec = v;
582 char str[KSYM_SYMBOL_LEN];
583 int ret = 0;
584 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
585 static struct trace_seq s;
586 unsigned long long avg;
587 unsigned long long stddev;
588 #endif
589 mutex_lock(&ftrace_profile_lock);
590
591 /* we raced with function_profile_reset() */
592 if (unlikely(rec->counter == 0)) {
593 ret = -EBUSY;
594 goto out;
595 }
596
597 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
598 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
599
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
601 seq_printf(m, " ");
602 avg = rec->time;
603 do_div(avg, rec->counter);
604
605 /* Sample standard deviation (s^2) */
606 if (rec->counter <= 1)
607 stddev = 0;
608 else {
609 /*
610 * Apply Welford's method:
611 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
612 */
613 stddev = rec->counter * rec->time_squared -
614 rec->time * rec->time;
615
616 /*
617 * Divide only 1000 for ns^2 -> us^2 conversion.
618 * trace_print_graph_duration will divide 1000 again.
619 */
620 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
621 }
622
623 trace_seq_init(&s);
624 trace_print_graph_duration(rec->time, &s);
625 trace_seq_puts(&s, " ");
626 trace_print_graph_duration(avg, &s);
627 trace_seq_puts(&s, " ");
628 trace_print_graph_duration(stddev, &s);
629 trace_print_seq(m, &s);
630 #endif
631 seq_putc(m, '\n');
632 out:
633 mutex_unlock(&ftrace_profile_lock);
634
635 return ret;
636 }
637
ftrace_profile_reset(struct ftrace_profile_stat * stat)638 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
639 {
640 struct ftrace_profile_page *pg;
641
642 pg = stat->pages = stat->start;
643
644 while (pg) {
645 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
646 pg->index = 0;
647 pg = pg->next;
648 }
649
650 memset(stat->hash, 0,
651 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
652 }
653
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)654 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
655 {
656 struct ftrace_profile_page *pg;
657 int functions;
658 int pages;
659 int i;
660
661 /* If we already allocated, do nothing */
662 if (stat->pages)
663 return 0;
664
665 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
666 if (!stat->pages)
667 return -ENOMEM;
668
669 #ifdef CONFIG_DYNAMIC_FTRACE
670 functions = ftrace_update_tot_cnt;
671 #else
672 /*
673 * We do not know the number of functions that exist because
674 * dynamic tracing is what counts them. With past experience
675 * we have around 20K functions. That should be more than enough.
676 * It is highly unlikely we will execute every function in
677 * the kernel.
678 */
679 functions = 20000;
680 #endif
681
682 pg = stat->start = stat->pages;
683
684 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
685
686 for (i = 1; i < pages; i++) {
687 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
688 if (!pg->next)
689 goto out_free;
690 pg = pg->next;
691 }
692
693 return 0;
694
695 out_free:
696 pg = stat->start;
697 while (pg) {
698 unsigned long tmp = (unsigned long)pg;
699
700 pg = pg->next;
701 free_page(tmp);
702 }
703
704 stat->pages = NULL;
705 stat->start = NULL;
706
707 return -ENOMEM;
708 }
709
ftrace_profile_init_cpu(int cpu)710 static int ftrace_profile_init_cpu(int cpu)
711 {
712 struct ftrace_profile_stat *stat;
713 int size;
714
715 stat = &per_cpu(ftrace_profile_stats, cpu);
716
717 if (stat->hash) {
718 /* If the profile is already created, simply reset it */
719 ftrace_profile_reset(stat);
720 return 0;
721 }
722
723 /*
724 * We are profiling all functions, but usually only a few thousand
725 * functions are hit. We'll make a hash of 1024 items.
726 */
727 size = FTRACE_PROFILE_HASH_SIZE;
728
729 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
730
731 if (!stat->hash)
732 return -ENOMEM;
733
734 /* Preallocate the function profiling pages */
735 if (ftrace_profile_pages_init(stat) < 0) {
736 kfree(stat->hash);
737 stat->hash = NULL;
738 return -ENOMEM;
739 }
740
741 return 0;
742 }
743
ftrace_profile_init(void)744 static int ftrace_profile_init(void)
745 {
746 int cpu;
747 int ret = 0;
748
749 for_each_possible_cpu(cpu) {
750 ret = ftrace_profile_init_cpu(cpu);
751 if (ret)
752 break;
753 }
754
755 return ret;
756 }
757
758 /* interrupts must be disabled */
759 static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)760 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
761 {
762 struct ftrace_profile *rec;
763 struct hlist_head *hhd;
764 unsigned long key;
765
766 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
767 hhd = &stat->hash[key];
768
769 if (hlist_empty(hhd))
770 return NULL;
771
772 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
773 if (rec->ip == ip)
774 return rec;
775 }
776
777 return NULL;
778 }
779
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)780 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
781 struct ftrace_profile *rec)
782 {
783 unsigned long key;
784
785 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
786 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
787 }
788
789 /*
790 * The memory is already allocated, this simply finds a new record to use.
791 */
792 static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)793 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
794 {
795 struct ftrace_profile *rec = NULL;
796
797 /* prevent recursion (from NMIs) */
798 if (atomic_inc_return(&stat->disabled) != 1)
799 goto out;
800
801 /*
802 * Try to find the function again since an NMI
803 * could have added it
804 */
805 rec = ftrace_find_profiled_func(stat, ip);
806 if (rec)
807 goto out;
808
809 if (stat->pages->index == PROFILES_PER_PAGE) {
810 if (!stat->pages->next)
811 goto out;
812 stat->pages = stat->pages->next;
813 }
814
815 rec = &stat->pages->records[stat->pages->index++];
816 rec->ip = ip;
817 ftrace_add_profile(stat, rec);
818
819 out:
820 atomic_dec(&stat->disabled);
821
822 return rec;
823 }
824
825 static void
function_profile_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct pt_regs * regs)826 function_profile_call(unsigned long ip, unsigned long parent_ip,
827 struct ftrace_ops *ops, struct pt_regs *regs)
828 {
829 struct ftrace_profile_stat *stat;
830 struct ftrace_profile *rec;
831 unsigned long flags;
832
833 if (!ftrace_profile_enabled)
834 return;
835
836 local_irq_save(flags);
837
838 stat = this_cpu_ptr(&ftrace_profile_stats);
839 if (!stat->hash || !ftrace_profile_enabled)
840 goto out;
841
842 rec = ftrace_find_profiled_func(stat, ip);
843 if (!rec) {
844 rec = ftrace_profile_alloc(stat, ip);
845 if (!rec)
846 goto out;
847 }
848
849 rec->counter++;
850 out:
851 local_irq_restore(flags);
852 }
853
854 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
profile_graph_entry(struct ftrace_graph_ent * trace)855 static int profile_graph_entry(struct ftrace_graph_ent *trace)
856 {
857 function_profile_call(trace->func, 0, NULL, NULL);
858 return 1;
859 }
860
profile_graph_return(struct ftrace_graph_ret * trace)861 static void profile_graph_return(struct ftrace_graph_ret *trace)
862 {
863 struct ftrace_profile_stat *stat;
864 unsigned long long calltime;
865 struct ftrace_profile *rec;
866 unsigned long flags;
867
868 local_irq_save(flags);
869 stat = this_cpu_ptr(&ftrace_profile_stats);
870 if (!stat->hash || !ftrace_profile_enabled)
871 goto out;
872
873 /* If the calltime was zero'd ignore it */
874 if (!trace->calltime)
875 goto out;
876
877 calltime = trace->rettime - trace->calltime;
878
879 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
880 int index;
881
882 index = trace->depth;
883
884 /* Append this call time to the parent time to subtract */
885 if (index)
886 current->ret_stack[index - 1].subtime += calltime;
887
888 if (current->ret_stack[index].subtime < calltime)
889 calltime -= current->ret_stack[index].subtime;
890 else
891 calltime = 0;
892 }
893
894 rec = ftrace_find_profiled_func(stat, trace->func);
895 if (rec) {
896 rec->time += calltime;
897 rec->time_squared += calltime * calltime;
898 }
899
900 out:
901 local_irq_restore(flags);
902 }
903
register_ftrace_profiler(void)904 static int register_ftrace_profiler(void)
905 {
906 return register_ftrace_graph(&profile_graph_return,
907 &profile_graph_entry);
908 }
909
unregister_ftrace_profiler(void)910 static void unregister_ftrace_profiler(void)
911 {
912 unregister_ftrace_graph();
913 }
914 #else
915 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
916 .func = function_profile_call,
917 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
918 INIT_OPS_HASH(ftrace_profile_ops)
919 };
920
register_ftrace_profiler(void)921 static int register_ftrace_profiler(void)
922 {
923 return register_ftrace_function(&ftrace_profile_ops);
924 }
925
unregister_ftrace_profiler(void)926 static void unregister_ftrace_profiler(void)
927 {
928 unregister_ftrace_function(&ftrace_profile_ops);
929 }
930 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
931
932 static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)933 ftrace_profile_write(struct file *filp, const char __user *ubuf,
934 size_t cnt, loff_t *ppos)
935 {
936 unsigned long val;
937 int ret;
938
939 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
940 if (ret)
941 return ret;
942
943 val = !!val;
944
945 mutex_lock(&ftrace_profile_lock);
946 if (ftrace_profile_enabled ^ val) {
947 if (val) {
948 ret = ftrace_profile_init();
949 if (ret < 0) {
950 cnt = ret;
951 goto out;
952 }
953
954 ret = register_ftrace_profiler();
955 if (ret < 0) {
956 cnt = ret;
957 goto out;
958 }
959 ftrace_profile_enabled = 1;
960 } else {
961 ftrace_profile_enabled = 0;
962 /*
963 * unregister_ftrace_profiler calls stop_machine
964 * so this acts like an synchronize_sched.
965 */
966 unregister_ftrace_profiler();
967 }
968 }
969 out:
970 mutex_unlock(&ftrace_profile_lock);
971
972 *ppos += cnt;
973
974 return cnt;
975 }
976
977 static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)978 ftrace_profile_read(struct file *filp, char __user *ubuf,
979 size_t cnt, loff_t *ppos)
980 {
981 char buf[64]; /* big enough to hold a number */
982 int r;
983
984 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
986 }
987
988 static const struct file_operations ftrace_profile_fops = {
989 .open = tracing_open_generic,
990 .read = ftrace_profile_read,
991 .write = ftrace_profile_write,
992 .llseek = default_llseek,
993 };
994
995 /* used to initialize the real stat files */
996 static struct tracer_stat function_stats __initdata = {
997 .name = "functions",
998 .stat_start = function_stat_start,
999 .stat_next = function_stat_next,
1000 .stat_cmp = function_stat_cmp,
1001 .stat_headers = function_stat_headers,
1002 .stat_show = function_stat_show
1003 };
1004
ftrace_profile_tracefs(struct dentry * d_tracer)1005 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1006 {
1007 struct ftrace_profile_stat *stat;
1008 struct dentry *entry;
1009 char *name;
1010 int ret;
1011 int cpu;
1012
1013 for_each_possible_cpu(cpu) {
1014 stat = &per_cpu(ftrace_profile_stats, cpu);
1015
1016 /* allocate enough for function name + cpu number */
1017 name = kmalloc(32, GFP_KERNEL);
1018 if (!name) {
1019 /*
1020 * The files created are permanent, if something happens
1021 * we still do not free memory.
1022 */
1023 WARN(1,
1024 "Could not allocate stat file for cpu %d\n",
1025 cpu);
1026 return;
1027 }
1028 stat->stat = function_stats;
1029 snprintf(name, 32, "function%d", cpu);
1030 stat->stat.name = name;
1031 ret = register_stat_tracer(&stat->stat);
1032 if (ret) {
1033 WARN(1,
1034 "Could not register function stat for cpu %d\n",
1035 cpu);
1036 kfree(name);
1037 return;
1038 }
1039 }
1040
1041 entry = tracefs_create_file("function_profile_enabled", 0644,
1042 d_tracer, NULL, &ftrace_profile_fops);
1043 if (!entry)
1044 pr_warning("Could not create tracefs "
1045 "'function_profile_enabled' entry\n");
1046 }
1047
1048 #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_tracefs(struct dentry * d_tracer)1049 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1050 {
1051 }
1052 #endif /* CONFIG_FUNCTION_PROFILER */
1053
1054 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1055
1056 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1057 static int ftrace_graph_active;
1058 #else
1059 # define ftrace_graph_active 0
1060 #endif
1061
1062 #ifdef CONFIG_DYNAMIC_FTRACE
1063
1064 static struct ftrace_ops *removed_ops;
1065
1066 /*
1067 * Set when doing a global update, like enabling all recs or disabling them.
1068 * It is not set when just updating a single ftrace_ops.
1069 */
1070 static bool update_all_ops;
1071
1072 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1073 # error Dynamic ftrace depends on MCOUNT_RECORD
1074 #endif
1075
1076 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1077
1078 struct ftrace_func_probe {
1079 struct hlist_node node;
1080 struct ftrace_probe_ops *ops;
1081 unsigned long flags;
1082 unsigned long ip;
1083 void *data;
1084 struct list_head free_list;
1085 };
1086
1087 struct ftrace_func_entry {
1088 struct hlist_node hlist;
1089 unsigned long ip;
1090 };
1091
1092 struct ftrace_hash {
1093 unsigned long size_bits;
1094 struct hlist_head *buckets;
1095 unsigned long count;
1096 struct rcu_head rcu;
1097 };
1098
1099 /*
1100 * We make these constant because no one should touch them,
1101 * but they are used as the default "empty hash", to avoid allocating
1102 * it all the time. These are in a read only section such that if
1103 * anyone does try to modify it, it will cause an exception.
1104 */
1105 static const struct hlist_head empty_buckets[1];
1106 static const struct ftrace_hash empty_hash = {
1107 .buckets = (struct hlist_head *)empty_buckets,
1108 };
1109 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1110
1111 static struct ftrace_ops global_ops = {
1112 .func = ftrace_stub,
1113 .local_hash.notrace_hash = EMPTY_HASH,
1114 .local_hash.filter_hash = EMPTY_HASH,
1115 INIT_OPS_HASH(global_ops)
1116 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1117 FTRACE_OPS_FL_INITIALIZED,
1118 };
1119
1120 struct ftrace_page {
1121 struct ftrace_page *next;
1122 struct dyn_ftrace *records;
1123 int index;
1124 int size;
1125 };
1126
1127 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1128 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1129
1130 /* estimate from running different kernels */
1131 #define NR_TO_INIT 10000
1132
1133 static struct ftrace_page *ftrace_pages_start;
1134 static struct ftrace_page *ftrace_pages;
1135
ftrace_hash_empty(struct ftrace_hash * hash)1136 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1137 {
1138 return !hash || !hash->count;
1139 }
1140
1141 static struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1142 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1143 {
1144 unsigned long key;
1145 struct ftrace_func_entry *entry;
1146 struct hlist_head *hhd;
1147
1148 if (ftrace_hash_empty(hash))
1149 return NULL;
1150
1151 if (hash->size_bits > 0)
1152 key = hash_long(ip, hash->size_bits);
1153 else
1154 key = 0;
1155
1156 hhd = &hash->buckets[key];
1157
1158 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1159 if (entry->ip == ip)
1160 return entry;
1161 }
1162 return NULL;
1163 }
1164
__add_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1165 static void __add_hash_entry(struct ftrace_hash *hash,
1166 struct ftrace_func_entry *entry)
1167 {
1168 struct hlist_head *hhd;
1169 unsigned long key;
1170
1171 if (hash->size_bits)
1172 key = hash_long(entry->ip, hash->size_bits);
1173 else
1174 key = 0;
1175
1176 hhd = &hash->buckets[key];
1177 hlist_add_head(&entry->hlist, hhd);
1178 hash->count++;
1179 }
1180
add_hash_entry(struct ftrace_hash * hash,unsigned long ip)1181 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1182 {
1183 struct ftrace_func_entry *entry;
1184
1185 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1186 if (!entry)
1187 return -ENOMEM;
1188
1189 entry->ip = ip;
1190 __add_hash_entry(hash, entry);
1191
1192 return 0;
1193 }
1194
1195 static void
free_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1196 free_hash_entry(struct ftrace_hash *hash,
1197 struct ftrace_func_entry *entry)
1198 {
1199 hlist_del(&entry->hlist);
1200 kfree(entry);
1201 hash->count--;
1202 }
1203
1204 static void
remove_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1205 remove_hash_entry(struct ftrace_hash *hash,
1206 struct ftrace_func_entry *entry)
1207 {
1208 hlist_del(&entry->hlist);
1209 hash->count--;
1210 }
1211
ftrace_hash_clear(struct ftrace_hash * hash)1212 static void ftrace_hash_clear(struct ftrace_hash *hash)
1213 {
1214 struct hlist_head *hhd;
1215 struct hlist_node *tn;
1216 struct ftrace_func_entry *entry;
1217 int size = 1 << hash->size_bits;
1218 int i;
1219
1220 if (!hash->count)
1221 return;
1222
1223 for (i = 0; i < size; i++) {
1224 hhd = &hash->buckets[i];
1225 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1226 free_hash_entry(hash, entry);
1227 }
1228 FTRACE_WARN_ON(hash->count);
1229 }
1230
free_ftrace_hash(struct ftrace_hash * hash)1231 static void free_ftrace_hash(struct ftrace_hash *hash)
1232 {
1233 if (!hash || hash == EMPTY_HASH)
1234 return;
1235 ftrace_hash_clear(hash);
1236 kfree(hash->buckets);
1237 kfree(hash);
1238 }
1239
__free_ftrace_hash_rcu(struct rcu_head * rcu)1240 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1241 {
1242 struct ftrace_hash *hash;
1243
1244 hash = container_of(rcu, struct ftrace_hash, rcu);
1245 free_ftrace_hash(hash);
1246 }
1247
free_ftrace_hash_rcu(struct ftrace_hash * hash)1248 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1249 {
1250 if (!hash || hash == EMPTY_HASH)
1251 return;
1252 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1253 }
1254
ftrace_free_filter(struct ftrace_ops * ops)1255 void ftrace_free_filter(struct ftrace_ops *ops)
1256 {
1257 ftrace_ops_init(ops);
1258 free_ftrace_hash(ops->func_hash->filter_hash);
1259 free_ftrace_hash(ops->func_hash->notrace_hash);
1260 }
1261
alloc_ftrace_hash(int size_bits)1262 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1263 {
1264 struct ftrace_hash *hash;
1265 int size;
1266
1267 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1268 if (!hash)
1269 return NULL;
1270
1271 size = 1 << size_bits;
1272 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1273
1274 if (!hash->buckets) {
1275 kfree(hash);
1276 return NULL;
1277 }
1278
1279 hash->size_bits = size_bits;
1280
1281 return hash;
1282 }
1283
1284 static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits,struct ftrace_hash * hash)1285 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1286 {
1287 struct ftrace_func_entry *entry;
1288 struct ftrace_hash *new_hash;
1289 int size;
1290 int ret;
1291 int i;
1292
1293 new_hash = alloc_ftrace_hash(size_bits);
1294 if (!new_hash)
1295 return NULL;
1296
1297 /* Empty hash? */
1298 if (ftrace_hash_empty(hash))
1299 return new_hash;
1300
1301 size = 1 << hash->size_bits;
1302 for (i = 0; i < size; i++) {
1303 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1304 ret = add_hash_entry(new_hash, entry->ip);
1305 if (ret < 0)
1306 goto free_hash;
1307 }
1308 }
1309
1310 FTRACE_WARN_ON(new_hash->count != hash->count);
1311
1312 return new_hash;
1313
1314 free_hash:
1315 free_ftrace_hash(new_hash);
1316 return NULL;
1317 }
1318
1319 static void
1320 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1321 static void
1322 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1323
1324 static int
ftrace_hash_move(struct ftrace_ops * ops,int enable,struct ftrace_hash ** dst,struct ftrace_hash * src)1325 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1326 struct ftrace_hash **dst, struct ftrace_hash *src)
1327 {
1328 struct ftrace_func_entry *entry;
1329 struct hlist_node *tn;
1330 struct hlist_head *hhd;
1331 struct ftrace_hash *new_hash;
1332 int size = src->count;
1333 int bits = 0;
1334 int i;
1335
1336 /*
1337 * If the new source is empty, just free dst and assign it
1338 * the empty_hash.
1339 */
1340 if (!src->count) {
1341 new_hash = EMPTY_HASH;
1342 goto update;
1343 }
1344
1345 /*
1346 * Make the hash size about 1/2 the # found
1347 */
1348 for (size /= 2; size; size >>= 1)
1349 bits++;
1350
1351 /* Don't allocate too much */
1352 if (bits > FTRACE_HASH_MAX_BITS)
1353 bits = FTRACE_HASH_MAX_BITS;
1354
1355 new_hash = alloc_ftrace_hash(bits);
1356 if (!new_hash)
1357 return -ENOMEM;
1358
1359 size = 1 << src->size_bits;
1360 for (i = 0; i < size; i++) {
1361 hhd = &src->buckets[i];
1362 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1363 remove_hash_entry(src, entry);
1364 __add_hash_entry(new_hash, entry);
1365 }
1366 }
1367
1368 update:
1369 /*
1370 * Remove the current set, update the hash and add
1371 * them back.
1372 */
1373 ftrace_hash_rec_disable_modify(ops, enable);
1374
1375 rcu_assign_pointer(*dst, new_hash);
1376
1377 ftrace_hash_rec_enable_modify(ops, enable);
1378
1379 return 0;
1380 }
1381
hash_contains_ip(unsigned long ip,struct ftrace_ops_hash * hash)1382 static bool hash_contains_ip(unsigned long ip,
1383 struct ftrace_ops_hash *hash)
1384 {
1385 /*
1386 * The function record is a match if it exists in the filter
1387 * hash and not in the notrace hash. Note, an emty hash is
1388 * considered a match for the filter hash, but an empty
1389 * notrace hash is considered not in the notrace hash.
1390 */
1391 return (ftrace_hash_empty(hash->filter_hash) ||
1392 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1393 (ftrace_hash_empty(hash->notrace_hash) ||
1394 !ftrace_lookup_ip(hash->notrace_hash, ip));
1395 }
1396
1397 /*
1398 * Test the hashes for this ops to see if we want to call
1399 * the ops->func or not.
1400 *
1401 * It's a match if the ip is in the ops->filter_hash or
1402 * the filter_hash does not exist or is empty,
1403 * AND
1404 * the ip is not in the ops->notrace_hash.
1405 *
1406 * This needs to be called with preemption disabled as
1407 * the hashes are freed with call_rcu_sched().
1408 */
1409 static int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)1410 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1411 {
1412 struct ftrace_ops_hash hash;
1413 int ret;
1414
1415 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1416 /*
1417 * There's a small race when adding ops that the ftrace handler
1418 * that wants regs, may be called without them. We can not
1419 * allow that handler to be called if regs is NULL.
1420 */
1421 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1422 return 0;
1423 #endif
1424
1425 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1426 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1427
1428 if (hash_contains_ip(ip, &hash))
1429 ret = 1;
1430 else
1431 ret = 0;
1432
1433 return ret;
1434 }
1435
1436 /*
1437 * This is a double for. Do not use 'break' to break out of the loop,
1438 * you must use a goto.
1439 */
1440 #define do_for_each_ftrace_rec(pg, rec) \
1441 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1442 int _____i; \
1443 for (_____i = 0; _____i < pg->index; _____i++) { \
1444 rec = &pg->records[_____i];
1445
1446 #define while_for_each_ftrace_rec() \
1447 } \
1448 }
1449
1450
ftrace_cmp_recs(const void * a,const void * b)1451 static int ftrace_cmp_recs(const void *a, const void *b)
1452 {
1453 const struct dyn_ftrace *key = a;
1454 const struct dyn_ftrace *rec = b;
1455
1456 if (key->flags < rec->ip)
1457 return -1;
1458 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1459 return 1;
1460 return 0;
1461 }
1462
ftrace_location_range(unsigned long start,unsigned long end)1463 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1464 {
1465 struct ftrace_page *pg;
1466 struct dyn_ftrace *rec;
1467 struct dyn_ftrace key;
1468
1469 key.ip = start;
1470 key.flags = end; /* overload flags, as it is unsigned long */
1471
1472 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1473 if (end < pg->records[0].ip ||
1474 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1475 continue;
1476 rec = bsearch(&key, pg->records, pg->index,
1477 sizeof(struct dyn_ftrace),
1478 ftrace_cmp_recs);
1479 if (rec)
1480 return rec->ip;
1481 }
1482
1483 return 0;
1484 }
1485
1486 /**
1487 * ftrace_location - return true if the ip giving is a traced location
1488 * @ip: the instruction pointer to check
1489 *
1490 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1491 * That is, the instruction that is either a NOP or call to
1492 * the function tracer. It checks the ftrace internal tables to
1493 * determine if the address belongs or not.
1494 */
ftrace_location(unsigned long ip)1495 unsigned long ftrace_location(unsigned long ip)
1496 {
1497 return ftrace_location_range(ip, ip);
1498 }
1499
1500 /**
1501 * ftrace_text_reserved - return true if range contains an ftrace location
1502 * @start: start of range to search
1503 * @end: end of range to search (inclusive). @end points to the last byte to check.
1504 *
1505 * Returns 1 if @start and @end contains a ftrace location.
1506 * That is, the instruction that is either a NOP or call to
1507 * the function tracer. It checks the ftrace internal tables to
1508 * determine if the address belongs or not.
1509 */
ftrace_text_reserved(const void * start,const void * end)1510 int ftrace_text_reserved(const void *start, const void *end)
1511 {
1512 unsigned long ret;
1513
1514 ret = ftrace_location_range((unsigned long)start,
1515 (unsigned long)end);
1516
1517 return (int)!!ret;
1518 }
1519
1520 /* Test if ops registered to this rec needs regs */
test_rec_ops_needs_regs(struct dyn_ftrace * rec)1521 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1522 {
1523 struct ftrace_ops *ops;
1524 bool keep_regs = false;
1525
1526 for (ops = ftrace_ops_list;
1527 ops != &ftrace_list_end; ops = ops->next) {
1528 /* pass rec in as regs to have non-NULL val */
1529 if (ftrace_ops_test(ops, rec->ip, rec)) {
1530 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1531 keep_regs = true;
1532 break;
1533 }
1534 }
1535 }
1536
1537 return keep_regs;
1538 }
1539
__ftrace_hash_rec_update(struct ftrace_ops * ops,int filter_hash,bool inc)1540 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1541 int filter_hash,
1542 bool inc)
1543 {
1544 struct ftrace_hash *hash;
1545 struct ftrace_hash *other_hash;
1546 struct ftrace_page *pg;
1547 struct dyn_ftrace *rec;
1548 int count = 0;
1549 int all = 0;
1550
1551 /* Only update if the ops has been registered */
1552 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1553 return;
1554
1555 /*
1556 * In the filter_hash case:
1557 * If the count is zero, we update all records.
1558 * Otherwise we just update the items in the hash.
1559 *
1560 * In the notrace_hash case:
1561 * We enable the update in the hash.
1562 * As disabling notrace means enabling the tracing,
1563 * and enabling notrace means disabling, the inc variable
1564 * gets inversed.
1565 */
1566 if (filter_hash) {
1567 hash = ops->func_hash->filter_hash;
1568 other_hash = ops->func_hash->notrace_hash;
1569 if (ftrace_hash_empty(hash))
1570 all = 1;
1571 } else {
1572 inc = !inc;
1573 hash = ops->func_hash->notrace_hash;
1574 other_hash = ops->func_hash->filter_hash;
1575 /*
1576 * If the notrace hash has no items,
1577 * then there's nothing to do.
1578 */
1579 if (ftrace_hash_empty(hash))
1580 return;
1581 }
1582
1583 do_for_each_ftrace_rec(pg, rec) {
1584 int in_other_hash = 0;
1585 int in_hash = 0;
1586 int match = 0;
1587
1588 if (all) {
1589 /*
1590 * Only the filter_hash affects all records.
1591 * Update if the record is not in the notrace hash.
1592 */
1593 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1594 match = 1;
1595 } else {
1596 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1597 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1598
1599 /*
1600 * If filter_hash is set, we want to match all functions
1601 * that are in the hash but not in the other hash.
1602 *
1603 * If filter_hash is not set, then we are decrementing.
1604 * That means we match anything that is in the hash
1605 * and also in the other_hash. That is, we need to turn
1606 * off functions in the other hash because they are disabled
1607 * by this hash.
1608 */
1609 if (filter_hash && in_hash && !in_other_hash)
1610 match = 1;
1611 else if (!filter_hash && in_hash &&
1612 (in_other_hash || ftrace_hash_empty(other_hash)))
1613 match = 1;
1614 }
1615 if (!match)
1616 continue;
1617
1618 if (inc) {
1619 rec->flags++;
1620 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1621 return;
1622
1623 /*
1624 * If there's only a single callback registered to a
1625 * function, and the ops has a trampoline registered
1626 * for it, then we can call it directly.
1627 */
1628 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1629 rec->flags |= FTRACE_FL_TRAMP;
1630 else
1631 /*
1632 * If we are adding another function callback
1633 * to this function, and the previous had a
1634 * custom trampoline in use, then we need to go
1635 * back to the default trampoline.
1636 */
1637 rec->flags &= ~FTRACE_FL_TRAMP;
1638
1639 /*
1640 * If any ops wants regs saved for this function
1641 * then all ops will get saved regs.
1642 */
1643 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1644 rec->flags |= FTRACE_FL_REGS;
1645 } else {
1646 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1647 return;
1648 rec->flags--;
1649
1650 /*
1651 * If the rec had REGS enabled and the ops that is
1652 * being removed had REGS set, then see if there is
1653 * still any ops for this record that wants regs.
1654 * If not, we can stop recording them.
1655 */
1656 if (ftrace_rec_count(rec) > 0 &&
1657 rec->flags & FTRACE_FL_REGS &&
1658 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1659 if (!test_rec_ops_needs_regs(rec))
1660 rec->flags &= ~FTRACE_FL_REGS;
1661 }
1662
1663 /*
1664 * If the rec had TRAMP enabled, then it needs to
1665 * be cleared. As TRAMP can only be enabled iff
1666 * there is only a single ops attached to it.
1667 * In otherwords, always disable it on decrementing.
1668 * In the future, we may set it if rec count is
1669 * decremented to one, and the ops that is left
1670 * has a trampoline.
1671 */
1672 rec->flags &= ~FTRACE_FL_TRAMP;
1673
1674 /*
1675 * flags will be cleared in ftrace_check_record()
1676 * if rec count is zero.
1677 */
1678 }
1679 count++;
1680 /* Shortcut, if we handled all records, we are done. */
1681 if (!all && count == hash->count)
1682 return;
1683 } while_for_each_ftrace_rec();
1684 }
1685
ftrace_hash_rec_disable(struct ftrace_ops * ops,int filter_hash)1686 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1687 int filter_hash)
1688 {
1689 __ftrace_hash_rec_update(ops, filter_hash, 0);
1690 }
1691
ftrace_hash_rec_enable(struct ftrace_ops * ops,int filter_hash)1692 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1693 int filter_hash)
1694 {
1695 __ftrace_hash_rec_update(ops, filter_hash, 1);
1696 }
1697
ftrace_hash_rec_update_modify(struct ftrace_ops * ops,int filter_hash,int inc)1698 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1699 int filter_hash, int inc)
1700 {
1701 struct ftrace_ops *op;
1702
1703 __ftrace_hash_rec_update(ops, filter_hash, inc);
1704
1705 if (ops->func_hash != &global_ops.local_hash)
1706 return;
1707
1708 /*
1709 * If the ops shares the global_ops hash, then we need to update
1710 * all ops that are enabled and use this hash.
1711 */
1712 do_for_each_ftrace_op(op, ftrace_ops_list) {
1713 /* Already done */
1714 if (op == ops)
1715 continue;
1716 if (op->func_hash == &global_ops.local_hash)
1717 __ftrace_hash_rec_update(op, filter_hash, inc);
1718 } while_for_each_ftrace_op(op);
1719 }
1720
ftrace_hash_rec_disable_modify(struct ftrace_ops * ops,int filter_hash)1721 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1722 int filter_hash)
1723 {
1724 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1725 }
1726
ftrace_hash_rec_enable_modify(struct ftrace_ops * ops,int filter_hash)1727 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1728 int filter_hash)
1729 {
1730 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1731 }
1732
print_ip_ins(const char * fmt,unsigned char * p)1733 static void print_ip_ins(const char *fmt, unsigned char *p)
1734 {
1735 int i;
1736
1737 printk(KERN_CONT "%s", fmt);
1738
1739 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1740 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1741 }
1742
1743 /**
1744 * ftrace_bug - report and shutdown function tracer
1745 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1746 * @ip: The address that failed
1747 *
1748 * The arch code that enables or disables the function tracing
1749 * can call ftrace_bug() when it has detected a problem in
1750 * modifying the code. @failed should be one of either:
1751 * EFAULT - if the problem happens on reading the @ip address
1752 * EINVAL - if what is read at @ip is not what was expected
1753 * EPERM - if the problem happens on writting to the @ip address
1754 */
ftrace_bug(int failed,unsigned long ip)1755 void ftrace_bug(int failed, unsigned long ip)
1756 {
1757 switch (failed) {
1758 case -EFAULT:
1759 FTRACE_WARN_ON_ONCE(1);
1760 pr_info("ftrace faulted on modifying ");
1761 print_ip_sym(ip);
1762 break;
1763 case -EINVAL:
1764 FTRACE_WARN_ON_ONCE(1);
1765 pr_info("ftrace failed to modify ");
1766 print_ip_sym(ip);
1767 print_ip_ins(" actual: ", (unsigned char *)ip);
1768 printk(KERN_CONT "\n");
1769 break;
1770 case -EPERM:
1771 FTRACE_WARN_ON_ONCE(1);
1772 pr_info("ftrace faulted on writing ");
1773 print_ip_sym(ip);
1774 break;
1775 default:
1776 FTRACE_WARN_ON_ONCE(1);
1777 pr_info("ftrace faulted on unknown error ");
1778 print_ip_sym(ip);
1779 }
1780 }
1781
ftrace_check_record(struct dyn_ftrace * rec,int enable,int update)1782 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1783 {
1784 unsigned long flag = 0UL;
1785
1786 /*
1787 * If we are updating calls:
1788 *
1789 * If the record has a ref count, then we need to enable it
1790 * because someone is using it.
1791 *
1792 * Otherwise we make sure its disabled.
1793 *
1794 * If we are disabling calls, then disable all records that
1795 * are enabled.
1796 */
1797 if (enable && ftrace_rec_count(rec))
1798 flag = FTRACE_FL_ENABLED;
1799
1800 /*
1801 * If enabling and the REGS flag does not match the REGS_EN, or
1802 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1803 * this record. Set flags to fail the compare against ENABLED.
1804 */
1805 if (flag) {
1806 if (!(rec->flags & FTRACE_FL_REGS) !=
1807 !(rec->flags & FTRACE_FL_REGS_EN))
1808 flag |= FTRACE_FL_REGS;
1809
1810 if (!(rec->flags & FTRACE_FL_TRAMP) !=
1811 !(rec->flags & FTRACE_FL_TRAMP_EN))
1812 flag |= FTRACE_FL_TRAMP;
1813 }
1814
1815 /* If the state of this record hasn't changed, then do nothing */
1816 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1817 return FTRACE_UPDATE_IGNORE;
1818
1819 if (flag) {
1820 /* Save off if rec is being enabled (for return value) */
1821 flag ^= rec->flags & FTRACE_FL_ENABLED;
1822
1823 if (update) {
1824 rec->flags |= FTRACE_FL_ENABLED;
1825 if (flag & FTRACE_FL_REGS) {
1826 if (rec->flags & FTRACE_FL_REGS)
1827 rec->flags |= FTRACE_FL_REGS_EN;
1828 else
1829 rec->flags &= ~FTRACE_FL_REGS_EN;
1830 }
1831 if (flag & FTRACE_FL_TRAMP) {
1832 if (rec->flags & FTRACE_FL_TRAMP)
1833 rec->flags |= FTRACE_FL_TRAMP_EN;
1834 else
1835 rec->flags &= ~FTRACE_FL_TRAMP_EN;
1836 }
1837 }
1838
1839 /*
1840 * If this record is being updated from a nop, then
1841 * return UPDATE_MAKE_CALL.
1842 * Otherwise,
1843 * return UPDATE_MODIFY_CALL to tell the caller to convert
1844 * from the save regs, to a non-save regs function or
1845 * vice versa, or from a trampoline call.
1846 */
1847 if (flag & FTRACE_FL_ENABLED)
1848 return FTRACE_UPDATE_MAKE_CALL;
1849
1850 return FTRACE_UPDATE_MODIFY_CALL;
1851 }
1852
1853 if (update) {
1854 /* If there's no more users, clear all flags */
1855 if (!ftrace_rec_count(rec))
1856 rec->flags = 0;
1857 else
1858 /*
1859 * Just disable the record, but keep the ops TRAMP
1860 * and REGS states. The _EN flags must be disabled though.
1861 */
1862 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
1863 FTRACE_FL_REGS_EN);
1864 }
1865
1866 return FTRACE_UPDATE_MAKE_NOP;
1867 }
1868
1869 /**
1870 * ftrace_update_record, set a record that now is tracing or not
1871 * @rec: the record to update
1872 * @enable: set to 1 if the record is tracing, zero to force disable
1873 *
1874 * The records that represent all functions that can be traced need
1875 * to be updated when tracing has been enabled.
1876 */
ftrace_update_record(struct dyn_ftrace * rec,int enable)1877 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1878 {
1879 return ftrace_check_record(rec, enable, 1);
1880 }
1881
1882 /**
1883 * ftrace_test_record, check if the record has been enabled or not
1884 * @rec: the record to test
1885 * @enable: set to 1 to check if enabled, 0 if it is disabled
1886 *
1887 * The arch code may need to test if a record is already set to
1888 * tracing to determine how to modify the function code that it
1889 * represents.
1890 */
ftrace_test_record(struct dyn_ftrace * rec,int enable)1891 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1892 {
1893 return ftrace_check_record(rec, enable, 0);
1894 }
1895
1896 static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace * rec)1897 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
1898 {
1899 struct ftrace_ops *op;
1900 unsigned long ip = rec->ip;
1901
1902 do_for_each_ftrace_op(op, ftrace_ops_list) {
1903
1904 if (!op->trampoline)
1905 continue;
1906
1907 if (hash_contains_ip(ip, op->func_hash))
1908 return op;
1909 } while_for_each_ftrace_op(op);
1910
1911 return NULL;
1912 }
1913
1914 static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace * rec)1915 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1916 {
1917 struct ftrace_ops *op;
1918 unsigned long ip = rec->ip;
1919
1920 /*
1921 * Need to check removed ops first.
1922 * If they are being removed, and this rec has a tramp,
1923 * and this rec is in the ops list, then it would be the
1924 * one with the tramp.
1925 */
1926 if (removed_ops) {
1927 if (hash_contains_ip(ip, &removed_ops->old_hash))
1928 return removed_ops;
1929 }
1930
1931 /*
1932 * Need to find the current trampoline for a rec.
1933 * Now, a trampoline is only attached to a rec if there
1934 * was a single 'ops' attached to it. But this can be called
1935 * when we are adding another op to the rec or removing the
1936 * current one. Thus, if the op is being added, we can
1937 * ignore it because it hasn't attached itself to the rec
1938 * yet.
1939 *
1940 * If an ops is being modified (hooking to different functions)
1941 * then we don't care about the new functions that are being
1942 * added, just the old ones (that are probably being removed).
1943 *
1944 * If we are adding an ops to a function that already is using
1945 * a trampoline, it needs to be removed (trampolines are only
1946 * for single ops connected), then an ops that is not being
1947 * modified also needs to be checked.
1948 */
1949 do_for_each_ftrace_op(op, ftrace_ops_list) {
1950
1951 if (!op->trampoline)
1952 continue;
1953
1954 /*
1955 * If the ops is being added, it hasn't gotten to
1956 * the point to be removed from this tree yet.
1957 */
1958 if (op->flags & FTRACE_OPS_FL_ADDING)
1959 continue;
1960
1961
1962 /*
1963 * If the ops is being modified and is in the old
1964 * hash, then it is probably being removed from this
1965 * function.
1966 */
1967 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1968 hash_contains_ip(ip, &op->old_hash))
1969 return op;
1970 /*
1971 * If the ops is not being added or modified, and it's
1972 * in its normal filter hash, then this must be the one
1973 * we want!
1974 */
1975 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
1976 hash_contains_ip(ip, op->func_hash))
1977 return op;
1978
1979 } while_for_each_ftrace_op(op);
1980
1981 return NULL;
1982 }
1983
1984 static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace * rec)1985 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1986 {
1987 struct ftrace_ops *op;
1988 unsigned long ip = rec->ip;
1989
1990 do_for_each_ftrace_op(op, ftrace_ops_list) {
1991 /* pass rec in as regs to have non-NULL val */
1992 if (hash_contains_ip(ip, op->func_hash))
1993 return op;
1994 } while_for_each_ftrace_op(op);
1995
1996 return NULL;
1997 }
1998
1999 /**
2000 * ftrace_get_addr_new - Get the call address to set to
2001 * @rec: The ftrace record descriptor
2002 *
2003 * If the record has the FTRACE_FL_REGS set, that means that it
2004 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2005 * is not not set, then it wants to convert to the normal callback.
2006 *
2007 * Returns the address of the trampoline to set to
2008 */
ftrace_get_addr_new(struct dyn_ftrace * rec)2009 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2010 {
2011 struct ftrace_ops *ops;
2012
2013 /* Trampolines take precedence over regs */
2014 if (rec->flags & FTRACE_FL_TRAMP) {
2015 ops = ftrace_find_tramp_ops_new(rec);
2016 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2017 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2018 (void *)rec->ip, (void *)rec->ip, rec->flags);
2019 /* Ftrace is shutting down, return anything */
2020 return (unsigned long)FTRACE_ADDR;
2021 }
2022 return ops->trampoline;
2023 }
2024
2025 if (rec->flags & FTRACE_FL_REGS)
2026 return (unsigned long)FTRACE_REGS_ADDR;
2027 else
2028 return (unsigned long)FTRACE_ADDR;
2029 }
2030
2031 /**
2032 * ftrace_get_addr_curr - Get the call address that is already there
2033 * @rec: The ftrace record descriptor
2034 *
2035 * The FTRACE_FL_REGS_EN is set when the record already points to
2036 * a function that saves all the regs. Basically the '_EN' version
2037 * represents the current state of the function.
2038 *
2039 * Returns the address of the trampoline that is currently being called
2040 */
ftrace_get_addr_curr(struct dyn_ftrace * rec)2041 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2042 {
2043 struct ftrace_ops *ops;
2044
2045 /* Trampolines take precedence over regs */
2046 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2047 ops = ftrace_find_tramp_ops_curr(rec);
2048 if (FTRACE_WARN_ON(!ops)) {
2049 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2050 (void *)rec->ip, (void *)rec->ip);
2051 /* Ftrace is shutting down, return anything */
2052 return (unsigned long)FTRACE_ADDR;
2053 }
2054 return ops->trampoline;
2055 }
2056
2057 if (rec->flags & FTRACE_FL_REGS_EN)
2058 return (unsigned long)FTRACE_REGS_ADDR;
2059 else
2060 return (unsigned long)FTRACE_ADDR;
2061 }
2062
2063 static int
__ftrace_replace_code(struct dyn_ftrace * rec,int enable)2064 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2065 {
2066 unsigned long ftrace_old_addr;
2067 unsigned long ftrace_addr;
2068 int ret;
2069
2070 ftrace_addr = ftrace_get_addr_new(rec);
2071
2072 /* This needs to be done before we call ftrace_update_record */
2073 ftrace_old_addr = ftrace_get_addr_curr(rec);
2074
2075 ret = ftrace_update_record(rec, enable);
2076
2077 switch (ret) {
2078 case FTRACE_UPDATE_IGNORE:
2079 return 0;
2080
2081 case FTRACE_UPDATE_MAKE_CALL:
2082 return ftrace_make_call(rec, ftrace_addr);
2083
2084 case FTRACE_UPDATE_MAKE_NOP:
2085 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2086
2087 case FTRACE_UPDATE_MODIFY_CALL:
2088 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2089 }
2090
2091 return -1; /* unknow ftrace bug */
2092 }
2093
ftrace_replace_code(int enable)2094 void __weak ftrace_replace_code(int enable)
2095 {
2096 struct dyn_ftrace *rec;
2097 struct ftrace_page *pg;
2098 int failed;
2099
2100 if (unlikely(ftrace_disabled))
2101 return;
2102
2103 do_for_each_ftrace_rec(pg, rec) {
2104 failed = __ftrace_replace_code(rec, enable);
2105 if (failed) {
2106 ftrace_bug(failed, rec->ip);
2107 /* Stop processing */
2108 return;
2109 }
2110 } while_for_each_ftrace_rec();
2111 }
2112
2113 struct ftrace_rec_iter {
2114 struct ftrace_page *pg;
2115 int index;
2116 };
2117
2118 /**
2119 * ftrace_rec_iter_start, start up iterating over traced functions
2120 *
2121 * Returns an iterator handle that is used to iterate over all
2122 * the records that represent address locations where functions
2123 * are traced.
2124 *
2125 * May return NULL if no records are available.
2126 */
ftrace_rec_iter_start(void)2127 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2128 {
2129 /*
2130 * We only use a single iterator.
2131 * Protected by the ftrace_lock mutex.
2132 */
2133 static struct ftrace_rec_iter ftrace_rec_iter;
2134 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2135
2136 iter->pg = ftrace_pages_start;
2137 iter->index = 0;
2138
2139 /* Could have empty pages */
2140 while (iter->pg && !iter->pg->index)
2141 iter->pg = iter->pg->next;
2142
2143 if (!iter->pg)
2144 return NULL;
2145
2146 return iter;
2147 }
2148
2149 /**
2150 * ftrace_rec_iter_next, get the next record to process.
2151 * @iter: The handle to the iterator.
2152 *
2153 * Returns the next iterator after the given iterator @iter.
2154 */
ftrace_rec_iter_next(struct ftrace_rec_iter * iter)2155 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2156 {
2157 iter->index++;
2158
2159 if (iter->index >= iter->pg->index) {
2160 iter->pg = iter->pg->next;
2161 iter->index = 0;
2162
2163 /* Could have empty pages */
2164 while (iter->pg && !iter->pg->index)
2165 iter->pg = iter->pg->next;
2166 }
2167
2168 if (!iter->pg)
2169 return NULL;
2170
2171 return iter;
2172 }
2173
2174 /**
2175 * ftrace_rec_iter_record, get the record at the iterator location
2176 * @iter: The current iterator location
2177 *
2178 * Returns the record that the current @iter is at.
2179 */
ftrace_rec_iter_record(struct ftrace_rec_iter * iter)2180 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2181 {
2182 return &iter->pg->records[iter->index];
2183 }
2184
2185 static int
ftrace_code_disable(struct module * mod,struct dyn_ftrace * rec)2186 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2187 {
2188 unsigned long ip;
2189 int ret;
2190
2191 ip = rec->ip;
2192
2193 if (unlikely(ftrace_disabled))
2194 return 0;
2195
2196 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2197 if (ret) {
2198 ftrace_bug(ret, ip);
2199 return 0;
2200 }
2201 return 1;
2202 }
2203
2204 /*
2205 * archs can override this function if they must do something
2206 * before the modifying code is performed.
2207 */
ftrace_arch_code_modify_prepare(void)2208 int __weak ftrace_arch_code_modify_prepare(void)
2209 {
2210 return 0;
2211 }
2212
2213 /*
2214 * archs can override this function if they must do something
2215 * after the modifying code is performed.
2216 */
ftrace_arch_code_modify_post_process(void)2217 int __weak ftrace_arch_code_modify_post_process(void)
2218 {
2219 return 0;
2220 }
2221
ftrace_modify_all_code(int command)2222 void ftrace_modify_all_code(int command)
2223 {
2224 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2225 int err = 0;
2226
2227 /*
2228 * If the ftrace_caller calls a ftrace_ops func directly,
2229 * we need to make sure that it only traces functions it
2230 * expects to trace. When doing the switch of functions,
2231 * we need to update to the ftrace_ops_list_func first
2232 * before the transition between old and new calls are set,
2233 * as the ftrace_ops_list_func will check the ops hashes
2234 * to make sure the ops are having the right functions
2235 * traced.
2236 */
2237 if (update) {
2238 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2239 if (FTRACE_WARN_ON(err))
2240 return;
2241 }
2242
2243 if (command & FTRACE_UPDATE_CALLS)
2244 ftrace_replace_code(1);
2245 else if (command & FTRACE_DISABLE_CALLS)
2246 ftrace_replace_code(0);
2247
2248 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2249 function_trace_op = set_function_trace_op;
2250 smp_wmb();
2251 /* If irqs are disabled, we are in stop machine */
2252 if (!irqs_disabled())
2253 smp_call_function(ftrace_sync_ipi, NULL, 1);
2254 err = ftrace_update_ftrace_func(ftrace_trace_function);
2255 if (FTRACE_WARN_ON(err))
2256 return;
2257 }
2258
2259 if (command & FTRACE_START_FUNC_RET)
2260 err = ftrace_enable_ftrace_graph_caller();
2261 else if (command & FTRACE_STOP_FUNC_RET)
2262 err = ftrace_disable_ftrace_graph_caller();
2263 FTRACE_WARN_ON(err);
2264 }
2265
__ftrace_modify_code(void * data)2266 static int __ftrace_modify_code(void *data)
2267 {
2268 int *command = data;
2269
2270 ftrace_modify_all_code(*command);
2271
2272 return 0;
2273 }
2274
2275 /**
2276 * ftrace_run_stop_machine, go back to the stop machine method
2277 * @command: The command to tell ftrace what to do
2278 *
2279 * If an arch needs to fall back to the stop machine method, the
2280 * it can call this function.
2281 */
ftrace_run_stop_machine(int command)2282 void ftrace_run_stop_machine(int command)
2283 {
2284 stop_machine(__ftrace_modify_code, &command, NULL);
2285 }
2286
2287 /**
2288 * arch_ftrace_update_code, modify the code to trace or not trace
2289 * @command: The command that needs to be done
2290 *
2291 * Archs can override this function if it does not need to
2292 * run stop_machine() to modify code.
2293 */
arch_ftrace_update_code(int command)2294 void __weak arch_ftrace_update_code(int command)
2295 {
2296 ftrace_run_stop_machine(command);
2297 }
2298
ftrace_run_update_code(int command)2299 static void ftrace_run_update_code(int command)
2300 {
2301 int ret;
2302
2303 ret = ftrace_arch_code_modify_prepare();
2304 FTRACE_WARN_ON(ret);
2305 if (ret)
2306 return;
2307
2308 /*
2309 * By default we use stop_machine() to modify the code.
2310 * But archs can do what ever they want as long as it
2311 * is safe. The stop_machine() is the safest, but also
2312 * produces the most overhead.
2313 */
2314 arch_ftrace_update_code(command);
2315
2316 ret = ftrace_arch_code_modify_post_process();
2317 FTRACE_WARN_ON(ret);
2318 }
2319
ftrace_run_modify_code(struct ftrace_ops * ops,int command,struct ftrace_ops_hash * old_hash)2320 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2321 struct ftrace_ops_hash *old_hash)
2322 {
2323 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2324 ops->old_hash.filter_hash = old_hash->filter_hash;
2325 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2326 ftrace_run_update_code(command);
2327 ops->old_hash.filter_hash = NULL;
2328 ops->old_hash.notrace_hash = NULL;
2329 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2330 }
2331
2332 static ftrace_func_t saved_ftrace_func;
2333 static int ftrace_start_up;
2334
control_ops_free(struct ftrace_ops * ops)2335 static void control_ops_free(struct ftrace_ops *ops)
2336 {
2337 free_percpu(ops->disabled);
2338 }
2339
ftrace_startup_enable(int command)2340 static void ftrace_startup_enable(int command)
2341 {
2342 if (saved_ftrace_func != ftrace_trace_function) {
2343 saved_ftrace_func = ftrace_trace_function;
2344 command |= FTRACE_UPDATE_TRACE_FUNC;
2345 }
2346
2347 if (!command || !ftrace_enabled)
2348 return;
2349
2350 ftrace_run_update_code(command);
2351 }
2352
ftrace_startup_all(int command)2353 static void ftrace_startup_all(int command)
2354 {
2355 update_all_ops = true;
2356 ftrace_startup_enable(command);
2357 update_all_ops = false;
2358 }
2359
ftrace_startup(struct ftrace_ops * ops,int command)2360 static int ftrace_startup(struct ftrace_ops *ops, int command)
2361 {
2362 int ret;
2363
2364 if (unlikely(ftrace_disabled))
2365 return -ENODEV;
2366
2367 ret = __register_ftrace_function(ops);
2368 if (ret)
2369 return ret;
2370
2371 ftrace_start_up++;
2372 command |= FTRACE_UPDATE_CALLS;
2373
2374 /*
2375 * Note that ftrace probes uses this to start up
2376 * and modify functions it will probe. But we still
2377 * set the ADDING flag for modification, as probes
2378 * do not have trampolines. If they add them in the
2379 * future, then the probes will need to distinguish
2380 * between adding and updating probes.
2381 */
2382 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2383
2384 ftrace_hash_rec_enable(ops, 1);
2385
2386 ftrace_startup_enable(command);
2387
2388 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2389
2390 return 0;
2391 }
2392
ftrace_shutdown(struct ftrace_ops * ops,int command)2393 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2394 {
2395 int ret;
2396
2397 if (unlikely(ftrace_disabled))
2398 return -ENODEV;
2399
2400 ret = __unregister_ftrace_function(ops);
2401 if (ret)
2402 return ret;
2403
2404 ftrace_start_up--;
2405 /*
2406 * Just warn in case of unbalance, no need to kill ftrace, it's not
2407 * critical but the ftrace_call callers may be never nopped again after
2408 * further ftrace uses.
2409 */
2410 WARN_ON_ONCE(ftrace_start_up < 0);
2411
2412 ftrace_hash_rec_disable(ops, 1);
2413
2414 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2415
2416 command |= FTRACE_UPDATE_CALLS;
2417
2418 if (saved_ftrace_func != ftrace_trace_function) {
2419 saved_ftrace_func = ftrace_trace_function;
2420 command |= FTRACE_UPDATE_TRACE_FUNC;
2421 }
2422
2423 if (!command || !ftrace_enabled) {
2424 /*
2425 * If these are control ops, they still need their
2426 * per_cpu field freed. Since, function tracing is
2427 * not currently active, we can just free them
2428 * without synchronizing all CPUs.
2429 */
2430 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2431 control_ops_free(ops);
2432 return 0;
2433 }
2434
2435 /*
2436 * If the ops uses a trampoline, then it needs to be
2437 * tested first on update.
2438 */
2439 ops->flags |= FTRACE_OPS_FL_REMOVING;
2440 removed_ops = ops;
2441
2442 /* The trampoline logic checks the old hashes */
2443 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2444 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2445
2446 ftrace_run_update_code(command);
2447
2448 /*
2449 * If there's no more ops registered with ftrace, run a
2450 * sanity check to make sure all rec flags are cleared.
2451 */
2452 if (ftrace_ops_list == &ftrace_list_end) {
2453 struct ftrace_page *pg;
2454 struct dyn_ftrace *rec;
2455
2456 do_for_each_ftrace_rec(pg, rec) {
2457 if (FTRACE_WARN_ON_ONCE(rec->flags))
2458 pr_warn(" %pS flags:%lx\n",
2459 (void *)rec->ip, rec->flags);
2460 } while_for_each_ftrace_rec();
2461 }
2462
2463 ops->old_hash.filter_hash = NULL;
2464 ops->old_hash.notrace_hash = NULL;
2465
2466 removed_ops = NULL;
2467 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2468
2469 /*
2470 * Dynamic ops may be freed, we must make sure that all
2471 * callers are done before leaving this function.
2472 * The same goes for freeing the per_cpu data of the control
2473 * ops.
2474 *
2475 * Again, normal synchronize_sched() is not good enough.
2476 * We need to do a hard force of sched synchronization.
2477 * This is because we use preempt_disable() to do RCU, but
2478 * the function tracers can be called where RCU is not watching
2479 * (like before user_exit()). We can not rely on the RCU
2480 * infrastructure to do the synchronization, thus we must do it
2481 * ourselves.
2482 */
2483 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2484 schedule_on_each_cpu(ftrace_sync);
2485
2486 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2487 control_ops_free(ops);
2488 }
2489
2490 return 0;
2491 }
2492
ftrace_startup_sysctl(void)2493 static void ftrace_startup_sysctl(void)
2494 {
2495 int command;
2496
2497 if (unlikely(ftrace_disabled))
2498 return;
2499
2500 /* Force update next time */
2501 saved_ftrace_func = NULL;
2502 /* ftrace_start_up is true if we want ftrace running */
2503 if (ftrace_start_up) {
2504 command = FTRACE_UPDATE_CALLS;
2505 if (ftrace_graph_active)
2506 command |= FTRACE_START_FUNC_RET;
2507 ftrace_startup_enable(command);
2508 }
2509 }
2510
ftrace_shutdown_sysctl(void)2511 static void ftrace_shutdown_sysctl(void)
2512 {
2513 int command;
2514
2515 if (unlikely(ftrace_disabled))
2516 return;
2517
2518 /* ftrace_start_up is true if ftrace is running */
2519 if (ftrace_start_up) {
2520 command = FTRACE_DISABLE_CALLS;
2521 if (ftrace_graph_active)
2522 command |= FTRACE_STOP_FUNC_RET;
2523 ftrace_run_update_code(command);
2524 }
2525 }
2526
2527 static cycle_t ftrace_update_time;
2528 unsigned long ftrace_update_tot_cnt;
2529
ops_traces_mod(struct ftrace_ops * ops)2530 static inline int ops_traces_mod(struct ftrace_ops *ops)
2531 {
2532 /*
2533 * Filter_hash being empty will default to trace module.
2534 * But notrace hash requires a test of individual module functions.
2535 */
2536 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2537 ftrace_hash_empty(ops->func_hash->notrace_hash);
2538 }
2539
2540 /*
2541 * Check if the current ops references the record.
2542 *
2543 * If the ops traces all functions, then it was already accounted for.
2544 * If the ops does not trace the current record function, skip it.
2545 * If the ops ignores the function via notrace filter, skip it.
2546 */
2547 static inline bool
ops_references_rec(struct ftrace_ops * ops,struct dyn_ftrace * rec)2548 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2549 {
2550 /* If ops isn't enabled, ignore it */
2551 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2552 return 0;
2553
2554 /* If ops traces all mods, we already accounted for it */
2555 if (ops_traces_mod(ops))
2556 return 0;
2557
2558 /* The function must be in the filter */
2559 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2560 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2561 return 0;
2562
2563 /* If in notrace hash, we ignore it too */
2564 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2565 return 0;
2566
2567 return 1;
2568 }
2569
referenced_filters(struct dyn_ftrace * rec)2570 static int referenced_filters(struct dyn_ftrace *rec)
2571 {
2572 struct ftrace_ops *ops;
2573 int cnt = 0;
2574
2575 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2576 if (ops_references_rec(ops, rec))
2577 cnt++;
2578 }
2579
2580 return cnt;
2581 }
2582
ftrace_update_code(struct module * mod,struct ftrace_page * new_pgs)2583 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2584 {
2585 struct ftrace_page *pg;
2586 struct dyn_ftrace *p;
2587 cycle_t start, stop;
2588 unsigned long update_cnt = 0;
2589 unsigned long ref = 0;
2590 bool test = false;
2591 int i;
2592
2593 /*
2594 * When adding a module, we need to check if tracers are
2595 * currently enabled and if they are set to trace all functions.
2596 * If they are, we need to enable the module functions as well
2597 * as update the reference counts for those function records.
2598 */
2599 if (mod) {
2600 struct ftrace_ops *ops;
2601
2602 for (ops = ftrace_ops_list;
2603 ops != &ftrace_list_end; ops = ops->next) {
2604 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2605 if (ops_traces_mod(ops))
2606 ref++;
2607 else
2608 test = true;
2609 }
2610 }
2611 }
2612
2613 start = ftrace_now(raw_smp_processor_id());
2614
2615 for (pg = new_pgs; pg; pg = pg->next) {
2616
2617 for (i = 0; i < pg->index; i++) {
2618 int cnt = ref;
2619
2620 /* If something went wrong, bail without enabling anything */
2621 if (unlikely(ftrace_disabled))
2622 return -1;
2623
2624 p = &pg->records[i];
2625 if (test)
2626 cnt += referenced_filters(p);
2627 p->flags = cnt;
2628
2629 /*
2630 * Do the initial record conversion from mcount jump
2631 * to the NOP instructions.
2632 */
2633 if (!ftrace_code_disable(mod, p))
2634 break;
2635
2636 update_cnt++;
2637
2638 /*
2639 * If the tracing is enabled, go ahead and enable the record.
2640 *
2641 * The reason not to enable the record immediatelly is the
2642 * inherent check of ftrace_make_nop/ftrace_make_call for
2643 * correct previous instructions. Making first the NOP
2644 * conversion puts the module to the correct state, thus
2645 * passing the ftrace_make_call check.
2646 */
2647 if (ftrace_start_up && cnt) {
2648 int failed = __ftrace_replace_code(p, 1);
2649 if (failed)
2650 ftrace_bug(failed, p->ip);
2651 }
2652 }
2653 }
2654
2655 stop = ftrace_now(raw_smp_processor_id());
2656 ftrace_update_time = stop - start;
2657 ftrace_update_tot_cnt += update_cnt;
2658
2659 return 0;
2660 }
2661
ftrace_allocate_records(struct ftrace_page * pg,int count)2662 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2663 {
2664 int order;
2665 int cnt;
2666
2667 if (WARN_ON(!count))
2668 return -EINVAL;
2669
2670 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2671
2672 /*
2673 * We want to fill as much as possible. No more than a page
2674 * may be empty.
2675 */
2676 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2677 order--;
2678
2679 again:
2680 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2681
2682 if (!pg->records) {
2683 /* if we can't allocate this size, try something smaller */
2684 if (!order)
2685 return -ENOMEM;
2686 order >>= 1;
2687 goto again;
2688 }
2689
2690 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2691 pg->size = cnt;
2692
2693 if (cnt > count)
2694 cnt = count;
2695
2696 return cnt;
2697 }
2698
2699 static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)2700 ftrace_allocate_pages(unsigned long num_to_init)
2701 {
2702 struct ftrace_page *start_pg;
2703 struct ftrace_page *pg;
2704 int order;
2705 int cnt;
2706
2707 if (!num_to_init)
2708 return 0;
2709
2710 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2711 if (!pg)
2712 return NULL;
2713
2714 /*
2715 * Try to allocate as much as possible in one continues
2716 * location that fills in all of the space. We want to
2717 * waste as little space as possible.
2718 */
2719 for (;;) {
2720 cnt = ftrace_allocate_records(pg, num_to_init);
2721 if (cnt < 0)
2722 goto free_pages;
2723
2724 num_to_init -= cnt;
2725 if (!num_to_init)
2726 break;
2727
2728 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2729 if (!pg->next)
2730 goto free_pages;
2731
2732 pg = pg->next;
2733 }
2734
2735 return start_pg;
2736
2737 free_pages:
2738 pg = start_pg;
2739 while (pg) {
2740 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2741 free_pages((unsigned long)pg->records, order);
2742 start_pg = pg->next;
2743 kfree(pg);
2744 pg = start_pg;
2745 }
2746 pr_info("ftrace: FAILED to allocate memory for functions\n");
2747 return NULL;
2748 }
2749
2750 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2751
2752 struct ftrace_iterator {
2753 loff_t pos;
2754 loff_t func_pos;
2755 struct ftrace_page *pg;
2756 struct dyn_ftrace *func;
2757 struct ftrace_func_probe *probe;
2758 struct trace_parser parser;
2759 struct ftrace_hash *hash;
2760 struct ftrace_ops *ops;
2761 int hidx;
2762 int idx;
2763 unsigned flags;
2764 };
2765
2766 static void *
t_hash_next(struct seq_file * m,loff_t * pos)2767 t_hash_next(struct seq_file *m, loff_t *pos)
2768 {
2769 struct ftrace_iterator *iter = m->private;
2770 struct hlist_node *hnd = NULL;
2771 struct hlist_head *hhd;
2772
2773 (*pos)++;
2774 iter->pos = *pos;
2775
2776 if (iter->probe)
2777 hnd = &iter->probe->node;
2778 retry:
2779 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2780 return NULL;
2781
2782 hhd = &ftrace_func_hash[iter->hidx];
2783
2784 if (hlist_empty(hhd)) {
2785 iter->hidx++;
2786 hnd = NULL;
2787 goto retry;
2788 }
2789
2790 if (!hnd)
2791 hnd = hhd->first;
2792 else {
2793 hnd = hnd->next;
2794 if (!hnd) {
2795 iter->hidx++;
2796 goto retry;
2797 }
2798 }
2799
2800 if (WARN_ON_ONCE(!hnd))
2801 return NULL;
2802
2803 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2804
2805 return iter;
2806 }
2807
t_hash_start(struct seq_file * m,loff_t * pos)2808 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2809 {
2810 struct ftrace_iterator *iter = m->private;
2811 void *p = NULL;
2812 loff_t l;
2813
2814 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2815 return NULL;
2816
2817 if (iter->func_pos > *pos)
2818 return NULL;
2819
2820 iter->hidx = 0;
2821 for (l = 0; l <= (*pos - iter->func_pos); ) {
2822 p = t_hash_next(m, &l);
2823 if (!p)
2824 break;
2825 }
2826 if (!p)
2827 return NULL;
2828
2829 /* Only set this if we have an item */
2830 iter->flags |= FTRACE_ITER_HASH;
2831
2832 return iter;
2833 }
2834
2835 static int
t_hash_show(struct seq_file * m,struct ftrace_iterator * iter)2836 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2837 {
2838 struct ftrace_func_probe *rec;
2839
2840 rec = iter->probe;
2841 if (WARN_ON_ONCE(!rec))
2842 return -EIO;
2843
2844 if (rec->ops->print)
2845 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2846
2847 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2848
2849 if (rec->data)
2850 seq_printf(m, ":%p", rec->data);
2851 seq_putc(m, '\n');
2852
2853 return 0;
2854 }
2855
2856 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)2857 t_next(struct seq_file *m, void *v, loff_t *pos)
2858 {
2859 struct ftrace_iterator *iter = m->private;
2860 struct ftrace_ops *ops = iter->ops;
2861 struct dyn_ftrace *rec = NULL;
2862
2863 if (unlikely(ftrace_disabled))
2864 return NULL;
2865
2866 if (iter->flags & FTRACE_ITER_HASH)
2867 return t_hash_next(m, pos);
2868
2869 (*pos)++;
2870 iter->pos = iter->func_pos = *pos;
2871
2872 if (iter->flags & FTRACE_ITER_PRINTALL)
2873 return t_hash_start(m, pos);
2874
2875 retry:
2876 if (iter->idx >= iter->pg->index) {
2877 if (iter->pg->next) {
2878 iter->pg = iter->pg->next;
2879 iter->idx = 0;
2880 goto retry;
2881 }
2882 } else {
2883 rec = &iter->pg->records[iter->idx++];
2884 if (((iter->flags & FTRACE_ITER_FILTER) &&
2885 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
2886
2887 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2888 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2889
2890 ((iter->flags & FTRACE_ITER_ENABLED) &&
2891 !(rec->flags & FTRACE_FL_ENABLED))) {
2892
2893 rec = NULL;
2894 goto retry;
2895 }
2896 }
2897
2898 if (!rec)
2899 return t_hash_start(m, pos);
2900
2901 iter->func = rec;
2902
2903 return iter;
2904 }
2905
reset_iter_read(struct ftrace_iterator * iter)2906 static void reset_iter_read(struct ftrace_iterator *iter)
2907 {
2908 iter->pos = 0;
2909 iter->func_pos = 0;
2910 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2911 }
2912
t_start(struct seq_file * m,loff_t * pos)2913 static void *t_start(struct seq_file *m, loff_t *pos)
2914 {
2915 struct ftrace_iterator *iter = m->private;
2916 struct ftrace_ops *ops = iter->ops;
2917 void *p = NULL;
2918 loff_t l;
2919
2920 mutex_lock(&ftrace_lock);
2921
2922 if (unlikely(ftrace_disabled))
2923 return NULL;
2924
2925 /*
2926 * If an lseek was done, then reset and start from beginning.
2927 */
2928 if (*pos < iter->pos)
2929 reset_iter_read(iter);
2930
2931 /*
2932 * For set_ftrace_filter reading, if we have the filter
2933 * off, we can short cut and just print out that all
2934 * functions are enabled.
2935 */
2936 if ((iter->flags & FTRACE_ITER_FILTER &&
2937 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2938 (iter->flags & FTRACE_ITER_NOTRACE &&
2939 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2940 if (*pos > 0)
2941 return t_hash_start(m, pos);
2942 iter->flags |= FTRACE_ITER_PRINTALL;
2943 /* reset in case of seek/pread */
2944 iter->flags &= ~FTRACE_ITER_HASH;
2945 return iter;
2946 }
2947
2948 if (iter->flags & FTRACE_ITER_HASH)
2949 return t_hash_start(m, pos);
2950
2951 /*
2952 * Unfortunately, we need to restart at ftrace_pages_start
2953 * every time we let go of the ftrace_mutex. This is because
2954 * those pointers can change without the lock.
2955 */
2956 iter->pg = ftrace_pages_start;
2957 iter->idx = 0;
2958 for (l = 0; l <= *pos; ) {
2959 p = t_next(m, p, &l);
2960 if (!p)
2961 break;
2962 }
2963
2964 if (!p)
2965 return t_hash_start(m, pos);
2966
2967 return iter;
2968 }
2969
t_stop(struct seq_file * m,void * p)2970 static void t_stop(struct seq_file *m, void *p)
2971 {
2972 mutex_unlock(&ftrace_lock);
2973 }
2974
t_show(struct seq_file * m,void * v)2975 static int t_show(struct seq_file *m, void *v)
2976 {
2977 struct ftrace_iterator *iter = m->private;
2978 struct dyn_ftrace *rec;
2979
2980 if (iter->flags & FTRACE_ITER_HASH)
2981 return t_hash_show(m, iter);
2982
2983 if (iter->flags & FTRACE_ITER_PRINTALL) {
2984 if (iter->flags & FTRACE_ITER_NOTRACE)
2985 seq_printf(m, "#### no functions disabled ####\n");
2986 else
2987 seq_printf(m, "#### all functions enabled ####\n");
2988 return 0;
2989 }
2990
2991 rec = iter->func;
2992
2993 if (!rec)
2994 return 0;
2995
2996 seq_printf(m, "%ps", (void *)rec->ip);
2997 if (iter->flags & FTRACE_ITER_ENABLED) {
2998 seq_printf(m, " (%ld)%s",
2999 ftrace_rec_count(rec),
3000 rec->flags & FTRACE_FL_REGS ? " R" : " ");
3001 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3002 struct ftrace_ops *ops;
3003
3004 ops = ftrace_find_tramp_ops_any(rec);
3005 if (ops)
3006 seq_printf(m, "\ttramp: %pS",
3007 (void *)ops->trampoline);
3008 else
3009 seq_printf(m, "\ttramp: ERROR!");
3010 }
3011 }
3012
3013 seq_printf(m, "\n");
3014
3015 return 0;
3016 }
3017
3018 static const struct seq_operations show_ftrace_seq_ops = {
3019 .start = t_start,
3020 .next = t_next,
3021 .stop = t_stop,
3022 .show = t_show,
3023 };
3024
3025 static int
ftrace_avail_open(struct inode * inode,struct file * file)3026 ftrace_avail_open(struct inode *inode, struct file *file)
3027 {
3028 struct ftrace_iterator *iter;
3029
3030 if (unlikely(ftrace_disabled))
3031 return -ENODEV;
3032
3033 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3034 if (iter) {
3035 iter->pg = ftrace_pages_start;
3036 iter->ops = &global_ops;
3037 }
3038
3039 return iter ? 0 : -ENOMEM;
3040 }
3041
3042 static int
ftrace_enabled_open(struct inode * inode,struct file * file)3043 ftrace_enabled_open(struct inode *inode, struct file *file)
3044 {
3045 struct ftrace_iterator *iter;
3046
3047 if (unlikely(ftrace_disabled))
3048 return -ENODEV;
3049
3050 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3051 if (iter) {
3052 iter->pg = ftrace_pages_start;
3053 iter->flags = FTRACE_ITER_ENABLED;
3054 iter->ops = &global_ops;
3055 }
3056
3057 return iter ? 0 : -ENOMEM;
3058 }
3059
3060 /**
3061 * ftrace_regex_open - initialize function tracer filter files
3062 * @ops: The ftrace_ops that hold the hash filters
3063 * @flag: The type of filter to process
3064 * @inode: The inode, usually passed in to your open routine
3065 * @file: The file, usually passed in to your open routine
3066 *
3067 * ftrace_regex_open() initializes the filter files for the
3068 * @ops. Depending on @flag it may process the filter hash or
3069 * the notrace hash of @ops. With this called from the open
3070 * routine, you can use ftrace_filter_write() for the write
3071 * routine if @flag has FTRACE_ITER_FILTER set, or
3072 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3073 * tracing_lseek() should be used as the lseek routine, and
3074 * release must call ftrace_regex_release().
3075 */
3076 int
ftrace_regex_open(struct ftrace_ops * ops,int flag,struct inode * inode,struct file * file)3077 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3078 struct inode *inode, struct file *file)
3079 {
3080 struct ftrace_iterator *iter;
3081 struct ftrace_hash *hash;
3082 int ret = 0;
3083
3084 ftrace_ops_init(ops);
3085
3086 if (unlikely(ftrace_disabled))
3087 return -ENODEV;
3088
3089 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3090 if (!iter)
3091 return -ENOMEM;
3092
3093 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3094 kfree(iter);
3095 return -ENOMEM;
3096 }
3097
3098 iter->ops = ops;
3099 iter->flags = flag;
3100
3101 mutex_lock(&ops->func_hash->regex_lock);
3102
3103 if (flag & FTRACE_ITER_NOTRACE)
3104 hash = ops->func_hash->notrace_hash;
3105 else
3106 hash = ops->func_hash->filter_hash;
3107
3108 if (file->f_mode & FMODE_WRITE) {
3109 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3110
3111 if (file->f_flags & O_TRUNC)
3112 iter->hash = alloc_ftrace_hash(size_bits);
3113 else
3114 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3115
3116 if (!iter->hash) {
3117 trace_parser_put(&iter->parser);
3118 kfree(iter);
3119 ret = -ENOMEM;
3120 goto out_unlock;
3121 }
3122 }
3123
3124 if (file->f_mode & FMODE_READ) {
3125 iter->pg = ftrace_pages_start;
3126
3127 ret = seq_open(file, &show_ftrace_seq_ops);
3128 if (!ret) {
3129 struct seq_file *m = file->private_data;
3130 m->private = iter;
3131 } else {
3132 /* Failed */
3133 free_ftrace_hash(iter->hash);
3134 trace_parser_put(&iter->parser);
3135 kfree(iter);
3136 }
3137 } else
3138 file->private_data = iter;
3139
3140 out_unlock:
3141 mutex_unlock(&ops->func_hash->regex_lock);
3142
3143 return ret;
3144 }
3145
3146 static int
ftrace_filter_open(struct inode * inode,struct file * file)3147 ftrace_filter_open(struct inode *inode, struct file *file)
3148 {
3149 struct ftrace_ops *ops = inode->i_private;
3150
3151 return ftrace_regex_open(ops,
3152 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3153 inode, file);
3154 }
3155
3156 static int
ftrace_notrace_open(struct inode * inode,struct file * file)3157 ftrace_notrace_open(struct inode *inode, struct file *file)
3158 {
3159 struct ftrace_ops *ops = inode->i_private;
3160
3161 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3162 inode, file);
3163 }
3164
ftrace_match(char * str,char * regex,int len,int type)3165 static int ftrace_match(char *str, char *regex, int len, int type)
3166 {
3167 int matched = 0;
3168 int slen;
3169
3170 switch (type) {
3171 case MATCH_FULL:
3172 if (strcmp(str, regex) == 0)
3173 matched = 1;
3174 break;
3175 case MATCH_FRONT_ONLY:
3176 if (strncmp(str, regex, len) == 0)
3177 matched = 1;
3178 break;
3179 case MATCH_MIDDLE_ONLY:
3180 if (strstr(str, regex))
3181 matched = 1;
3182 break;
3183 case MATCH_END_ONLY:
3184 slen = strlen(str);
3185 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3186 matched = 1;
3187 break;
3188 }
3189
3190 return matched;
3191 }
3192
3193 static int
enter_record(struct ftrace_hash * hash,struct dyn_ftrace * rec,int not)3194 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3195 {
3196 struct ftrace_func_entry *entry;
3197 int ret = 0;
3198
3199 entry = ftrace_lookup_ip(hash, rec->ip);
3200 if (not) {
3201 /* Do nothing if it doesn't exist */
3202 if (!entry)
3203 return 0;
3204
3205 free_hash_entry(hash, entry);
3206 } else {
3207 /* Do nothing if it exists */
3208 if (entry)
3209 return 0;
3210
3211 ret = add_hash_entry(hash, rec->ip);
3212 }
3213 return ret;
3214 }
3215
3216 static int
ftrace_match_record(struct dyn_ftrace * rec,char * mod,char * regex,int len,int type)3217 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3218 char *regex, int len, int type)
3219 {
3220 char str[KSYM_SYMBOL_LEN];
3221 char *modname;
3222
3223 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3224
3225 if (mod) {
3226 /* module lookup requires matching the module */
3227 if (!modname || strcmp(modname, mod))
3228 return 0;
3229
3230 /* blank search means to match all funcs in the mod */
3231 if (!len)
3232 return 1;
3233 }
3234
3235 return ftrace_match(str, regex, len, type);
3236 }
3237
3238 static int
match_records(struct ftrace_hash * hash,char * buff,int len,char * mod,int not)3239 match_records(struct ftrace_hash *hash, char *buff,
3240 int len, char *mod, int not)
3241 {
3242 unsigned search_len = 0;
3243 struct ftrace_page *pg;
3244 struct dyn_ftrace *rec;
3245 int type = MATCH_FULL;
3246 char *search = buff;
3247 int found = 0;
3248 int ret;
3249
3250 if (len) {
3251 type = filter_parse_regex(buff, len, &search, ¬);
3252 search_len = strlen(search);
3253 }
3254
3255 mutex_lock(&ftrace_lock);
3256
3257 if (unlikely(ftrace_disabled))
3258 goto out_unlock;
3259
3260 do_for_each_ftrace_rec(pg, rec) {
3261 if (ftrace_match_record(rec, mod, search, search_len, type)) {
3262 ret = enter_record(hash, rec, not);
3263 if (ret < 0) {
3264 found = ret;
3265 goto out_unlock;
3266 }
3267 found = 1;
3268 }
3269 } while_for_each_ftrace_rec();
3270 out_unlock:
3271 mutex_unlock(&ftrace_lock);
3272
3273 return found;
3274 }
3275
3276 static int
ftrace_match_records(struct ftrace_hash * hash,char * buff,int len)3277 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3278 {
3279 return match_records(hash, buff, len, NULL, 0);
3280 }
3281
3282 static int
ftrace_match_module_records(struct ftrace_hash * hash,char * buff,char * mod)3283 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3284 {
3285 int not = 0;
3286
3287 /* blank or '*' mean the same */
3288 if (strcmp(buff, "*") == 0)
3289 buff[0] = 0;
3290
3291 /* handle the case of 'dont filter this module' */
3292 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3293 buff[0] = 0;
3294 not = 1;
3295 }
3296
3297 return match_records(hash, buff, strlen(buff), mod, not);
3298 }
3299
3300 /*
3301 * We register the module command as a template to show others how
3302 * to register the a command as well.
3303 */
3304
3305 static int
ftrace_mod_callback(struct ftrace_hash * hash,char * func,char * cmd,char * param,int enable)3306 ftrace_mod_callback(struct ftrace_hash *hash,
3307 char *func, char *cmd, char *param, int enable)
3308 {
3309 char *mod;
3310 int ret = -EINVAL;
3311
3312 /*
3313 * cmd == 'mod' because we only registered this func
3314 * for the 'mod' ftrace_func_command.
3315 * But if you register one func with multiple commands,
3316 * you can tell which command was used by the cmd
3317 * parameter.
3318 */
3319
3320 /* we must have a module name */
3321 if (!param)
3322 return ret;
3323
3324 mod = strsep(¶m, ":");
3325 if (!strlen(mod))
3326 return ret;
3327
3328 ret = ftrace_match_module_records(hash, func, mod);
3329 if (!ret)
3330 ret = -EINVAL;
3331 if (ret < 0)
3332 return ret;
3333
3334 return 0;
3335 }
3336
3337 static struct ftrace_func_command ftrace_mod_cmd = {
3338 .name = "mod",
3339 .func = ftrace_mod_callback,
3340 };
3341
ftrace_mod_cmd_init(void)3342 static int __init ftrace_mod_cmd_init(void)
3343 {
3344 return register_ftrace_command(&ftrace_mod_cmd);
3345 }
3346 core_initcall(ftrace_mod_cmd_init);
3347
function_trace_probe_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * pt_regs)3348 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3349 struct ftrace_ops *op, struct pt_regs *pt_regs)
3350 {
3351 struct ftrace_func_probe *entry;
3352 struct hlist_head *hhd;
3353 unsigned long key;
3354
3355 key = hash_long(ip, FTRACE_HASH_BITS);
3356
3357 hhd = &ftrace_func_hash[key];
3358
3359 if (hlist_empty(hhd))
3360 return;
3361
3362 /*
3363 * Disable preemption for these calls to prevent a RCU grace
3364 * period. This syncs the hash iteration and freeing of items
3365 * on the hash. rcu_read_lock is too dangerous here.
3366 */
3367 preempt_disable_notrace();
3368 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3369 if (entry->ip == ip)
3370 entry->ops->func(ip, parent_ip, &entry->data);
3371 }
3372 preempt_enable_notrace();
3373 }
3374
3375 static struct ftrace_ops trace_probe_ops __read_mostly =
3376 {
3377 .func = function_trace_probe_call,
3378 .flags = FTRACE_OPS_FL_INITIALIZED,
3379 INIT_OPS_HASH(trace_probe_ops)
3380 };
3381
3382 static int ftrace_probe_registered;
3383
__enable_ftrace_function_probe(struct ftrace_ops_hash * old_hash)3384 static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3385 {
3386 int ret;
3387 int i;
3388
3389 if (ftrace_probe_registered) {
3390 /* still need to update the function call sites */
3391 if (ftrace_enabled)
3392 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3393 old_hash);
3394 return;
3395 }
3396
3397 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3398 struct hlist_head *hhd = &ftrace_func_hash[i];
3399 if (hhd->first)
3400 break;
3401 }
3402 /* Nothing registered? */
3403 if (i == FTRACE_FUNC_HASHSIZE)
3404 return;
3405
3406 ret = ftrace_startup(&trace_probe_ops, 0);
3407
3408 ftrace_probe_registered = 1;
3409 }
3410
__disable_ftrace_function_probe(void)3411 static bool __disable_ftrace_function_probe(void)
3412 {
3413 int i;
3414
3415 if (!ftrace_probe_registered)
3416 return false;
3417
3418 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3419 struct hlist_head *hhd = &ftrace_func_hash[i];
3420 if (hhd->first)
3421 return false;
3422 }
3423
3424 /* no more funcs left */
3425 ftrace_shutdown(&trace_probe_ops, 0);
3426
3427 ftrace_probe_registered = 0;
3428 return true;
3429 }
3430
3431
ftrace_free_entry(struct ftrace_func_probe * entry)3432 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3433 {
3434 if (entry->ops->free)
3435 entry->ops->free(entry->ops, entry->ip, &entry->data);
3436 kfree(entry);
3437 }
3438
3439 int
register_ftrace_function_probe(char * glob,struct ftrace_probe_ops * ops,void * data)3440 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3441 void *data)
3442 {
3443 struct ftrace_ops_hash old_hash_ops;
3444 struct ftrace_func_probe *entry;
3445 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3446 struct ftrace_hash *old_hash = *orig_hash;
3447 struct ftrace_hash *hash;
3448 struct ftrace_page *pg;
3449 struct dyn_ftrace *rec;
3450 int type, len, not;
3451 unsigned long key;
3452 int count = 0;
3453 char *search;
3454 int ret;
3455
3456 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
3457 len = strlen(search);
3458
3459 /* we do not support '!' for function probes */
3460 if (WARN_ON(not))
3461 return -EINVAL;
3462
3463 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3464
3465 old_hash_ops.filter_hash = old_hash;
3466 /* Probes only have filters */
3467 old_hash_ops.notrace_hash = NULL;
3468
3469 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3470 if (!hash) {
3471 count = -ENOMEM;
3472 goto out;
3473 }
3474
3475 if (unlikely(ftrace_disabled)) {
3476 count = -ENODEV;
3477 goto out;
3478 }
3479
3480 mutex_lock(&ftrace_lock);
3481
3482 do_for_each_ftrace_rec(pg, rec) {
3483
3484 if (!ftrace_match_record(rec, NULL, search, len, type))
3485 continue;
3486
3487 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3488 if (!entry) {
3489 /* If we did not process any, then return error */
3490 if (!count)
3491 count = -ENOMEM;
3492 goto out_unlock;
3493 }
3494
3495 count++;
3496
3497 entry->data = data;
3498
3499 /*
3500 * The caller might want to do something special
3501 * for each function we find. We call the callback
3502 * to give the caller an opportunity to do so.
3503 */
3504 if (ops->init) {
3505 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3506 /* caller does not like this func */
3507 kfree(entry);
3508 continue;
3509 }
3510 }
3511
3512 ret = enter_record(hash, rec, 0);
3513 if (ret < 0) {
3514 kfree(entry);
3515 count = ret;
3516 goto out_unlock;
3517 }
3518
3519 entry->ops = ops;
3520 entry->ip = rec->ip;
3521
3522 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3523 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3524
3525 } while_for_each_ftrace_rec();
3526
3527 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3528
3529 __enable_ftrace_function_probe(&old_hash_ops);
3530
3531 if (!ret)
3532 free_ftrace_hash_rcu(old_hash);
3533 else
3534 count = ret;
3535
3536 out_unlock:
3537 mutex_unlock(&ftrace_lock);
3538 out:
3539 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3540 free_ftrace_hash(hash);
3541
3542 return count;
3543 }
3544
3545 enum {
3546 PROBE_TEST_FUNC = 1,
3547 PROBE_TEST_DATA = 2
3548 };
3549
3550 static void
__unregister_ftrace_function_probe(char * glob,struct ftrace_probe_ops * ops,void * data,int flags)3551 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3552 void *data, int flags)
3553 {
3554 struct ftrace_ops_hash old_hash_ops;
3555 struct ftrace_func_entry *rec_entry;
3556 struct ftrace_func_probe *entry;
3557 struct ftrace_func_probe *p;
3558 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3559 struct ftrace_hash *old_hash = *orig_hash;
3560 struct list_head free_list;
3561 struct ftrace_hash *hash;
3562 struct hlist_node *tmp;
3563 char str[KSYM_SYMBOL_LEN];
3564 int type = MATCH_FULL;
3565 int i, len = 0;
3566 char *search;
3567 int ret;
3568 bool disabled;
3569
3570 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3571 glob = NULL;
3572 else if (glob) {
3573 int not;
3574
3575 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
3576 len = strlen(search);
3577
3578 /* we do not support '!' for function probes */
3579 if (WARN_ON(not))
3580 return;
3581 }
3582
3583 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3584
3585 old_hash_ops.filter_hash = old_hash;
3586 /* Probes only have filters */
3587 old_hash_ops.notrace_hash = NULL;
3588
3589 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3590 if (!hash)
3591 /* Hmm, should report this somehow */
3592 goto out_unlock;
3593
3594 INIT_LIST_HEAD(&free_list);
3595
3596 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3597 struct hlist_head *hhd = &ftrace_func_hash[i];
3598
3599 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3600
3601 /* break up if statements for readability */
3602 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3603 continue;
3604
3605 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3606 continue;
3607
3608 /* do this last, since it is the most expensive */
3609 if (glob) {
3610 kallsyms_lookup(entry->ip, NULL, NULL,
3611 NULL, str);
3612 if (!ftrace_match(str, glob, len, type))
3613 continue;
3614 }
3615
3616 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3617 /* It is possible more than one entry had this ip */
3618 if (rec_entry)
3619 free_hash_entry(hash, rec_entry);
3620
3621 hlist_del_rcu(&entry->node);
3622 list_add(&entry->free_list, &free_list);
3623 }
3624 }
3625 mutex_lock(&ftrace_lock);
3626 disabled = __disable_ftrace_function_probe();
3627 /*
3628 * Remove after the disable is called. Otherwise, if the last
3629 * probe is removed, a null hash means *all enabled*.
3630 */
3631 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3632
3633 /* still need to update the function call sites */
3634 if (ftrace_enabled && !disabled)
3635 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3636 &old_hash_ops);
3637 synchronize_sched();
3638 if (!ret)
3639 free_ftrace_hash_rcu(old_hash);
3640
3641 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3642 list_del(&entry->free_list);
3643 ftrace_free_entry(entry);
3644 }
3645 mutex_unlock(&ftrace_lock);
3646
3647 out_unlock:
3648 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3649 free_ftrace_hash(hash);
3650 }
3651
3652 void
unregister_ftrace_function_probe(char * glob,struct ftrace_probe_ops * ops,void * data)3653 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3654 void *data)
3655 {
3656 __unregister_ftrace_function_probe(glob, ops, data,
3657 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3658 }
3659
3660 void
unregister_ftrace_function_probe_func(char * glob,struct ftrace_probe_ops * ops)3661 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3662 {
3663 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3664 }
3665
unregister_ftrace_function_probe_all(char * glob)3666 void unregister_ftrace_function_probe_all(char *glob)
3667 {
3668 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3669 }
3670
3671 static LIST_HEAD(ftrace_commands);
3672 static DEFINE_MUTEX(ftrace_cmd_mutex);
3673
3674 /*
3675 * Currently we only register ftrace commands from __init, so mark this
3676 * __init too.
3677 */
register_ftrace_command(struct ftrace_func_command * cmd)3678 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3679 {
3680 struct ftrace_func_command *p;
3681 int ret = 0;
3682
3683 mutex_lock(&ftrace_cmd_mutex);
3684 list_for_each_entry(p, &ftrace_commands, list) {
3685 if (strcmp(cmd->name, p->name) == 0) {
3686 ret = -EBUSY;
3687 goto out_unlock;
3688 }
3689 }
3690 list_add(&cmd->list, &ftrace_commands);
3691 out_unlock:
3692 mutex_unlock(&ftrace_cmd_mutex);
3693
3694 return ret;
3695 }
3696
3697 /*
3698 * Currently we only unregister ftrace commands from __init, so mark
3699 * this __init too.
3700 */
unregister_ftrace_command(struct ftrace_func_command * cmd)3701 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3702 {
3703 struct ftrace_func_command *p, *n;
3704 int ret = -ENODEV;
3705
3706 mutex_lock(&ftrace_cmd_mutex);
3707 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3708 if (strcmp(cmd->name, p->name) == 0) {
3709 ret = 0;
3710 list_del_init(&p->list);
3711 goto out_unlock;
3712 }
3713 }
3714 out_unlock:
3715 mutex_unlock(&ftrace_cmd_mutex);
3716
3717 return ret;
3718 }
3719
ftrace_process_regex(struct ftrace_hash * hash,char * buff,int len,int enable)3720 static int ftrace_process_regex(struct ftrace_hash *hash,
3721 char *buff, int len, int enable)
3722 {
3723 char *func, *command, *next = buff;
3724 struct ftrace_func_command *p;
3725 int ret = -EINVAL;
3726
3727 func = strsep(&next, ":");
3728
3729 if (!next) {
3730 ret = ftrace_match_records(hash, func, len);
3731 if (!ret)
3732 ret = -EINVAL;
3733 if (ret < 0)
3734 return ret;
3735 return 0;
3736 }
3737
3738 /* command found */
3739
3740 command = strsep(&next, ":");
3741
3742 mutex_lock(&ftrace_cmd_mutex);
3743 list_for_each_entry(p, &ftrace_commands, list) {
3744 if (strcmp(p->name, command) == 0) {
3745 ret = p->func(hash, func, command, next, enable);
3746 goto out_unlock;
3747 }
3748 }
3749 out_unlock:
3750 mutex_unlock(&ftrace_cmd_mutex);
3751
3752 return ret;
3753 }
3754
3755 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)3756 ftrace_regex_write(struct file *file, const char __user *ubuf,
3757 size_t cnt, loff_t *ppos, int enable)
3758 {
3759 struct ftrace_iterator *iter;
3760 struct trace_parser *parser;
3761 ssize_t ret, read;
3762
3763 if (!cnt)
3764 return 0;
3765
3766 if (file->f_mode & FMODE_READ) {
3767 struct seq_file *m = file->private_data;
3768 iter = m->private;
3769 } else
3770 iter = file->private_data;
3771
3772 if (unlikely(ftrace_disabled))
3773 return -ENODEV;
3774
3775 /* iter->hash is a local copy, so we don't need regex_lock */
3776
3777 parser = &iter->parser;
3778 read = trace_get_user(parser, ubuf, cnt, ppos);
3779
3780 if (read >= 0 && trace_parser_loaded(parser) &&
3781 !trace_parser_cont(parser)) {
3782 ret = ftrace_process_regex(iter->hash, parser->buffer,
3783 parser->idx, enable);
3784 trace_parser_clear(parser);
3785 if (ret < 0)
3786 goto out;
3787 }
3788
3789 ret = read;
3790 out:
3791 return ret;
3792 }
3793
3794 ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)3795 ftrace_filter_write(struct file *file, const char __user *ubuf,
3796 size_t cnt, loff_t *ppos)
3797 {
3798 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3799 }
3800
3801 ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)3802 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3803 size_t cnt, loff_t *ppos)
3804 {
3805 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3806 }
3807
3808 static int
ftrace_match_addr(struct ftrace_hash * hash,unsigned long ip,int remove)3809 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3810 {
3811 struct ftrace_func_entry *entry;
3812
3813 if (!ftrace_location(ip))
3814 return -EINVAL;
3815
3816 if (remove) {
3817 entry = ftrace_lookup_ip(hash, ip);
3818 if (!entry)
3819 return -ENOENT;
3820 free_hash_entry(hash, entry);
3821 return 0;
3822 }
3823
3824 return add_hash_entry(hash, ip);
3825 }
3826
ftrace_ops_update_code(struct ftrace_ops * ops,struct ftrace_ops_hash * old_hash)3827 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3828 struct ftrace_ops_hash *old_hash)
3829 {
3830 struct ftrace_ops *op;
3831
3832 if (!ftrace_enabled)
3833 return;
3834
3835 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3836 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3837 return;
3838 }
3839
3840 /*
3841 * If this is the shared global_ops filter, then we need to
3842 * check if there is another ops that shares it, is enabled.
3843 * If so, we still need to run the modify code.
3844 */
3845 if (ops->func_hash != &global_ops.local_hash)
3846 return;
3847
3848 do_for_each_ftrace_op(op, ftrace_ops_list) {
3849 if (op->func_hash == &global_ops.local_hash &&
3850 op->flags & FTRACE_OPS_FL_ENABLED) {
3851 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3852 /* Only need to do this once */
3853 return;
3854 }
3855 } while_for_each_ftrace_op(op);
3856 }
3857
3858 static int
ftrace_set_hash(struct ftrace_ops * ops,unsigned char * buf,int len,unsigned long ip,int remove,int reset,int enable)3859 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3860 unsigned long ip, int remove, int reset, int enable)
3861 {
3862 struct ftrace_hash **orig_hash;
3863 struct ftrace_ops_hash old_hash_ops;
3864 struct ftrace_hash *old_hash;
3865 struct ftrace_hash *hash;
3866 int ret;
3867
3868 if (unlikely(ftrace_disabled))
3869 return -ENODEV;
3870
3871 mutex_lock(&ops->func_hash->regex_lock);
3872
3873 if (enable)
3874 orig_hash = &ops->func_hash->filter_hash;
3875 else
3876 orig_hash = &ops->func_hash->notrace_hash;
3877
3878 if (reset)
3879 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3880 else
3881 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3882
3883 if (!hash) {
3884 ret = -ENOMEM;
3885 goto out_regex_unlock;
3886 }
3887
3888 if (buf && !ftrace_match_records(hash, buf, len)) {
3889 ret = -EINVAL;
3890 goto out_regex_unlock;
3891 }
3892 if (ip) {
3893 ret = ftrace_match_addr(hash, ip, remove);
3894 if (ret < 0)
3895 goto out_regex_unlock;
3896 }
3897
3898 mutex_lock(&ftrace_lock);
3899 old_hash = *orig_hash;
3900 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3901 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3902 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3903 if (!ret) {
3904 ftrace_ops_update_code(ops, &old_hash_ops);
3905 free_ftrace_hash_rcu(old_hash);
3906 }
3907 mutex_unlock(&ftrace_lock);
3908
3909 out_regex_unlock:
3910 mutex_unlock(&ops->func_hash->regex_lock);
3911
3912 free_ftrace_hash(hash);
3913 return ret;
3914 }
3915
3916 static int
ftrace_set_addr(struct ftrace_ops * ops,unsigned long ip,int remove,int reset,int enable)3917 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3918 int reset, int enable)
3919 {
3920 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3921 }
3922
3923 /**
3924 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3925 * @ops - the ops to set the filter with
3926 * @ip - the address to add to or remove from the filter.
3927 * @remove - non zero to remove the ip from the filter
3928 * @reset - non zero to reset all filters before applying this filter.
3929 *
3930 * Filters denote which functions should be enabled when tracing is enabled
3931 * If @ip is NULL, it failes to update filter.
3932 */
ftrace_set_filter_ip(struct ftrace_ops * ops,unsigned long ip,int remove,int reset)3933 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3934 int remove, int reset)
3935 {
3936 ftrace_ops_init(ops);
3937 return ftrace_set_addr(ops, ip, remove, reset, 1);
3938 }
3939 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3940
3941 static int
ftrace_set_regex(struct ftrace_ops * ops,unsigned char * buf,int len,int reset,int enable)3942 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3943 int reset, int enable)
3944 {
3945 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3946 }
3947
3948 /**
3949 * ftrace_set_filter - set a function to filter on in ftrace
3950 * @ops - the ops to set the filter with
3951 * @buf - the string that holds the function filter text.
3952 * @len - the length of the string.
3953 * @reset - non zero to reset all filters before applying this filter.
3954 *
3955 * Filters denote which functions should be enabled when tracing is enabled.
3956 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3957 */
ftrace_set_filter(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)3958 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3959 int len, int reset)
3960 {
3961 ftrace_ops_init(ops);
3962 return ftrace_set_regex(ops, buf, len, reset, 1);
3963 }
3964 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3965
3966 /**
3967 * ftrace_set_notrace - set a function to not trace in ftrace
3968 * @ops - the ops to set the notrace filter with
3969 * @buf - the string that holds the function notrace text.
3970 * @len - the length of the string.
3971 * @reset - non zero to reset all filters before applying this filter.
3972 *
3973 * Notrace Filters denote which functions should not be enabled when tracing
3974 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3975 * for tracing.
3976 */
ftrace_set_notrace(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)3977 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3978 int len, int reset)
3979 {
3980 ftrace_ops_init(ops);
3981 return ftrace_set_regex(ops, buf, len, reset, 0);
3982 }
3983 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3984 /**
3985 * ftrace_set_global_filter - set a function to filter on with global tracers
3986 * @buf - the string that holds the function filter text.
3987 * @len - the length of the string.
3988 * @reset - non zero to reset all filters before applying this filter.
3989 *
3990 * Filters denote which functions should be enabled when tracing is enabled.
3991 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3992 */
ftrace_set_global_filter(unsigned char * buf,int len,int reset)3993 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3994 {
3995 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3996 }
3997 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3998
3999 /**
4000 * ftrace_set_global_notrace - set a function to not trace with global tracers
4001 * @buf - the string that holds the function notrace text.
4002 * @len - the length of the string.
4003 * @reset - non zero to reset all filters before applying this filter.
4004 *
4005 * Notrace Filters denote which functions should not be enabled when tracing
4006 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4007 * for tracing.
4008 */
ftrace_set_global_notrace(unsigned char * buf,int len,int reset)4009 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4010 {
4011 ftrace_set_regex(&global_ops, buf, len, reset, 0);
4012 }
4013 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4014
4015 /*
4016 * command line interface to allow users to set filters on boot up.
4017 */
4018 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4019 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4020 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4021
4022 /* Used by function selftest to not test if filter is set */
4023 bool ftrace_filter_param __initdata;
4024
set_ftrace_notrace(char * str)4025 static int __init set_ftrace_notrace(char *str)
4026 {
4027 ftrace_filter_param = true;
4028 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4029 return 1;
4030 }
4031 __setup("ftrace_notrace=", set_ftrace_notrace);
4032
set_ftrace_filter(char * str)4033 static int __init set_ftrace_filter(char *str)
4034 {
4035 ftrace_filter_param = true;
4036 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4037 return 1;
4038 }
4039 __setup("ftrace_filter=", set_ftrace_filter);
4040
4041 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4042 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4043 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4044 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4045
set_graph_function(char * str)4046 static int __init set_graph_function(char *str)
4047 {
4048 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4049 return 1;
4050 }
4051 __setup("ftrace_graph_filter=", set_graph_function);
4052
set_graph_notrace_function(char * str)4053 static int __init set_graph_notrace_function(char *str)
4054 {
4055 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4056 return 1;
4057 }
4058 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
4059
set_ftrace_early_graph(char * buf,int enable)4060 static void __init set_ftrace_early_graph(char *buf, int enable)
4061 {
4062 int ret;
4063 char *func;
4064 unsigned long *table = ftrace_graph_funcs;
4065 int *count = &ftrace_graph_count;
4066
4067 if (!enable) {
4068 table = ftrace_graph_notrace_funcs;
4069 count = &ftrace_graph_notrace_count;
4070 }
4071
4072 while (buf) {
4073 func = strsep(&buf, ",");
4074 /* we allow only one expression at a time */
4075 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4076 if (ret)
4077 printk(KERN_DEBUG "ftrace: function %s not "
4078 "traceable\n", func);
4079 }
4080 }
4081 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4082
4083 void __init
ftrace_set_early_filter(struct ftrace_ops * ops,char * buf,int enable)4084 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4085 {
4086 char *func;
4087
4088 ftrace_ops_init(ops);
4089
4090 while (buf) {
4091 func = strsep(&buf, ",");
4092 ftrace_set_regex(ops, func, strlen(func), 0, enable);
4093 }
4094 }
4095
set_ftrace_early_filters(void)4096 static void __init set_ftrace_early_filters(void)
4097 {
4098 if (ftrace_filter_buf[0])
4099 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4100 if (ftrace_notrace_buf[0])
4101 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4102 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4103 if (ftrace_graph_buf[0])
4104 set_ftrace_early_graph(ftrace_graph_buf, 1);
4105 if (ftrace_graph_notrace_buf[0])
4106 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4107 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4108 }
4109
ftrace_regex_release(struct inode * inode,struct file * file)4110 int ftrace_regex_release(struct inode *inode, struct file *file)
4111 {
4112 struct seq_file *m = (struct seq_file *)file->private_data;
4113 struct ftrace_ops_hash old_hash_ops;
4114 struct ftrace_iterator *iter;
4115 struct ftrace_hash **orig_hash;
4116 struct ftrace_hash *old_hash;
4117 struct trace_parser *parser;
4118 int filter_hash;
4119 int ret;
4120
4121 if (file->f_mode & FMODE_READ) {
4122 iter = m->private;
4123 seq_release(inode, file);
4124 } else
4125 iter = file->private_data;
4126
4127 parser = &iter->parser;
4128 if (trace_parser_loaded(parser)) {
4129 parser->buffer[parser->idx] = 0;
4130 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4131 }
4132
4133 trace_parser_put(parser);
4134
4135 mutex_lock(&iter->ops->func_hash->regex_lock);
4136
4137 if (file->f_mode & FMODE_WRITE) {
4138 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4139
4140 if (filter_hash)
4141 orig_hash = &iter->ops->func_hash->filter_hash;
4142 else
4143 orig_hash = &iter->ops->func_hash->notrace_hash;
4144
4145 mutex_lock(&ftrace_lock);
4146 old_hash = *orig_hash;
4147 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4148 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4149 ret = ftrace_hash_move(iter->ops, filter_hash,
4150 orig_hash, iter->hash);
4151 if (!ret) {
4152 ftrace_ops_update_code(iter->ops, &old_hash_ops);
4153 free_ftrace_hash_rcu(old_hash);
4154 }
4155 mutex_unlock(&ftrace_lock);
4156 }
4157
4158 mutex_unlock(&iter->ops->func_hash->regex_lock);
4159 free_ftrace_hash(iter->hash);
4160 kfree(iter);
4161
4162 return 0;
4163 }
4164
4165 static const struct file_operations ftrace_avail_fops = {
4166 .open = ftrace_avail_open,
4167 .read = seq_read,
4168 .llseek = seq_lseek,
4169 .release = seq_release_private,
4170 };
4171
4172 static const struct file_operations ftrace_enabled_fops = {
4173 .open = ftrace_enabled_open,
4174 .read = seq_read,
4175 .llseek = seq_lseek,
4176 .release = seq_release_private,
4177 };
4178
4179 static const struct file_operations ftrace_filter_fops = {
4180 .open = ftrace_filter_open,
4181 .read = seq_read,
4182 .write = ftrace_filter_write,
4183 .llseek = tracing_lseek,
4184 .release = ftrace_regex_release,
4185 };
4186
4187 static const struct file_operations ftrace_notrace_fops = {
4188 .open = ftrace_notrace_open,
4189 .read = seq_read,
4190 .write = ftrace_notrace_write,
4191 .llseek = tracing_lseek,
4192 .release = ftrace_regex_release,
4193 };
4194
4195 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4196
4197 static DEFINE_MUTEX(graph_lock);
4198
4199 int ftrace_graph_count;
4200 int ftrace_graph_notrace_count;
4201 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4202 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4203
4204 struct ftrace_graph_data {
4205 unsigned long *table;
4206 size_t size;
4207 int *count;
4208 const struct seq_operations *seq_ops;
4209 };
4210
4211 static void *
__g_next(struct seq_file * m,loff_t * pos)4212 __g_next(struct seq_file *m, loff_t *pos)
4213 {
4214 struct ftrace_graph_data *fgd = m->private;
4215
4216 if (*pos >= *fgd->count)
4217 return NULL;
4218 return &fgd->table[*pos];
4219 }
4220
4221 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)4222 g_next(struct seq_file *m, void *v, loff_t *pos)
4223 {
4224 (*pos)++;
4225 return __g_next(m, pos);
4226 }
4227
g_start(struct seq_file * m,loff_t * pos)4228 static void *g_start(struct seq_file *m, loff_t *pos)
4229 {
4230 struct ftrace_graph_data *fgd = m->private;
4231
4232 mutex_lock(&graph_lock);
4233
4234 /* Nothing, tell g_show to print all functions are enabled */
4235 if (!*fgd->count && !*pos)
4236 return (void *)1;
4237
4238 return __g_next(m, pos);
4239 }
4240
g_stop(struct seq_file * m,void * p)4241 static void g_stop(struct seq_file *m, void *p)
4242 {
4243 mutex_unlock(&graph_lock);
4244 }
4245
g_show(struct seq_file * m,void * v)4246 static int g_show(struct seq_file *m, void *v)
4247 {
4248 unsigned long *ptr = v;
4249
4250 if (!ptr)
4251 return 0;
4252
4253 if (ptr == (unsigned long *)1) {
4254 struct ftrace_graph_data *fgd = m->private;
4255
4256 if (fgd->table == ftrace_graph_funcs)
4257 seq_printf(m, "#### all functions enabled ####\n");
4258 else
4259 seq_printf(m, "#### no functions disabled ####\n");
4260 return 0;
4261 }
4262
4263 seq_printf(m, "%ps\n", (void *)*ptr);
4264
4265 return 0;
4266 }
4267
4268 static const struct seq_operations ftrace_graph_seq_ops = {
4269 .start = g_start,
4270 .next = g_next,
4271 .stop = g_stop,
4272 .show = g_show,
4273 };
4274
4275 static int
__ftrace_graph_open(struct inode * inode,struct file * file,struct ftrace_graph_data * fgd)4276 __ftrace_graph_open(struct inode *inode, struct file *file,
4277 struct ftrace_graph_data *fgd)
4278 {
4279 int ret = 0;
4280
4281 mutex_lock(&graph_lock);
4282 if ((file->f_mode & FMODE_WRITE) &&
4283 (file->f_flags & O_TRUNC)) {
4284 *fgd->count = 0;
4285 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4286 }
4287 mutex_unlock(&graph_lock);
4288
4289 if (file->f_mode & FMODE_READ) {
4290 ret = seq_open(file, fgd->seq_ops);
4291 if (!ret) {
4292 struct seq_file *m = file->private_data;
4293 m->private = fgd;
4294 }
4295 } else
4296 file->private_data = fgd;
4297
4298 return ret;
4299 }
4300
4301 static int
ftrace_graph_open(struct inode * inode,struct file * file)4302 ftrace_graph_open(struct inode *inode, struct file *file)
4303 {
4304 struct ftrace_graph_data *fgd;
4305
4306 if (unlikely(ftrace_disabled))
4307 return -ENODEV;
4308
4309 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4310 if (fgd == NULL)
4311 return -ENOMEM;
4312
4313 fgd->table = ftrace_graph_funcs;
4314 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4315 fgd->count = &ftrace_graph_count;
4316 fgd->seq_ops = &ftrace_graph_seq_ops;
4317
4318 return __ftrace_graph_open(inode, file, fgd);
4319 }
4320
4321 static int
ftrace_graph_notrace_open(struct inode * inode,struct file * file)4322 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4323 {
4324 struct ftrace_graph_data *fgd;
4325
4326 if (unlikely(ftrace_disabled))
4327 return -ENODEV;
4328
4329 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4330 if (fgd == NULL)
4331 return -ENOMEM;
4332
4333 fgd->table = ftrace_graph_notrace_funcs;
4334 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4335 fgd->count = &ftrace_graph_notrace_count;
4336 fgd->seq_ops = &ftrace_graph_seq_ops;
4337
4338 return __ftrace_graph_open(inode, file, fgd);
4339 }
4340
4341 static int
ftrace_graph_release(struct inode * inode,struct file * file)4342 ftrace_graph_release(struct inode *inode, struct file *file)
4343 {
4344 if (file->f_mode & FMODE_READ) {
4345 struct seq_file *m = file->private_data;
4346
4347 kfree(m->private);
4348 seq_release(inode, file);
4349 } else {
4350 kfree(file->private_data);
4351 }
4352
4353 return 0;
4354 }
4355
4356 static int
ftrace_set_func(unsigned long * array,int * idx,int size,char * buffer)4357 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4358 {
4359 struct dyn_ftrace *rec;
4360 struct ftrace_page *pg;
4361 int search_len;
4362 int fail = 1;
4363 int type, not;
4364 char *search;
4365 bool exists;
4366 int i;
4367
4368 /* decode regex */
4369 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
4370 if (!not && *idx >= size)
4371 return -EBUSY;
4372
4373 search_len = strlen(search);
4374
4375 mutex_lock(&ftrace_lock);
4376
4377 if (unlikely(ftrace_disabled)) {
4378 mutex_unlock(&ftrace_lock);
4379 return -ENODEV;
4380 }
4381
4382 do_for_each_ftrace_rec(pg, rec) {
4383
4384 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4385 /* if it is in the array */
4386 exists = false;
4387 for (i = 0; i < *idx; i++) {
4388 if (array[i] == rec->ip) {
4389 exists = true;
4390 break;
4391 }
4392 }
4393
4394 if (!not) {
4395 fail = 0;
4396 if (!exists) {
4397 array[(*idx)++] = rec->ip;
4398 if (*idx >= size)
4399 goto out;
4400 }
4401 } else {
4402 if (exists) {
4403 array[i] = array[--(*idx)];
4404 array[*idx] = 0;
4405 fail = 0;
4406 }
4407 }
4408 }
4409 } while_for_each_ftrace_rec();
4410 out:
4411 mutex_unlock(&ftrace_lock);
4412
4413 if (fail)
4414 return -EINVAL;
4415
4416 return 0;
4417 }
4418
4419 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)4420 ftrace_graph_write(struct file *file, const char __user *ubuf,
4421 size_t cnt, loff_t *ppos)
4422 {
4423 struct trace_parser parser;
4424 ssize_t read, ret = 0;
4425 struct ftrace_graph_data *fgd = file->private_data;
4426
4427 if (!cnt)
4428 return 0;
4429
4430 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4431 return -ENOMEM;
4432
4433 read = trace_get_user(&parser, ubuf, cnt, ppos);
4434
4435 if (read >= 0 && trace_parser_loaded((&parser))) {
4436 parser.buffer[parser.idx] = 0;
4437
4438 mutex_lock(&graph_lock);
4439
4440 /* we allow only one expression at a time */
4441 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4442 parser.buffer);
4443
4444 mutex_unlock(&graph_lock);
4445 }
4446
4447 if (!ret)
4448 ret = read;
4449
4450 trace_parser_put(&parser);
4451
4452 return ret;
4453 }
4454
4455 static const struct file_operations ftrace_graph_fops = {
4456 .open = ftrace_graph_open,
4457 .read = seq_read,
4458 .write = ftrace_graph_write,
4459 .llseek = tracing_lseek,
4460 .release = ftrace_graph_release,
4461 };
4462
4463 static const struct file_operations ftrace_graph_notrace_fops = {
4464 .open = ftrace_graph_notrace_open,
4465 .read = seq_read,
4466 .write = ftrace_graph_write,
4467 .llseek = tracing_lseek,
4468 .release = ftrace_graph_release,
4469 };
4470 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4471
ftrace_create_filter_files(struct ftrace_ops * ops,struct dentry * parent)4472 void ftrace_create_filter_files(struct ftrace_ops *ops,
4473 struct dentry *parent)
4474 {
4475
4476 trace_create_file("set_ftrace_filter", 0644, parent,
4477 ops, &ftrace_filter_fops);
4478
4479 trace_create_file("set_ftrace_notrace", 0644, parent,
4480 ops, &ftrace_notrace_fops);
4481 }
4482
4483 /*
4484 * The name "destroy_filter_files" is really a misnomer. Although
4485 * in the future, it may actualy delete the files, but this is
4486 * really intended to make sure the ops passed in are disabled
4487 * and that when this function returns, the caller is free to
4488 * free the ops.
4489 *
4490 * The "destroy" name is only to match the "create" name that this
4491 * should be paired with.
4492 */
ftrace_destroy_filter_files(struct ftrace_ops * ops)4493 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4494 {
4495 mutex_lock(&ftrace_lock);
4496 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4497 ftrace_shutdown(ops, 0);
4498 ops->flags |= FTRACE_OPS_FL_DELETED;
4499 mutex_unlock(&ftrace_lock);
4500 }
4501
ftrace_init_dyn_tracefs(struct dentry * d_tracer)4502 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
4503 {
4504
4505 trace_create_file("available_filter_functions", 0444,
4506 d_tracer, NULL, &ftrace_avail_fops);
4507
4508 trace_create_file("enabled_functions", 0444,
4509 d_tracer, NULL, &ftrace_enabled_fops);
4510
4511 ftrace_create_filter_files(&global_ops, d_tracer);
4512
4513 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4514 trace_create_file("set_graph_function", 0444, d_tracer,
4515 NULL,
4516 &ftrace_graph_fops);
4517 trace_create_file("set_graph_notrace", 0444, d_tracer,
4518 NULL,
4519 &ftrace_graph_notrace_fops);
4520 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4521
4522 return 0;
4523 }
4524
ftrace_cmp_ips(const void * a,const void * b)4525 static int ftrace_cmp_ips(const void *a, const void *b)
4526 {
4527 const unsigned long *ipa = a;
4528 const unsigned long *ipb = b;
4529
4530 if (*ipa > *ipb)
4531 return 1;
4532 if (*ipa < *ipb)
4533 return -1;
4534 return 0;
4535 }
4536
ftrace_swap_ips(void * a,void * b,int size)4537 static void ftrace_swap_ips(void *a, void *b, int size)
4538 {
4539 unsigned long *ipa = a;
4540 unsigned long *ipb = b;
4541 unsigned long t;
4542
4543 t = *ipa;
4544 *ipa = *ipb;
4545 *ipb = t;
4546 }
4547
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)4548 static int ftrace_process_locs(struct module *mod,
4549 unsigned long *start,
4550 unsigned long *end)
4551 {
4552 struct ftrace_page *start_pg;
4553 struct ftrace_page *pg;
4554 struct dyn_ftrace *rec;
4555 unsigned long count;
4556 unsigned long *p;
4557 unsigned long addr;
4558 unsigned long flags = 0; /* Shut up gcc */
4559 int ret = -ENOMEM;
4560
4561 count = end - start;
4562
4563 if (!count)
4564 return 0;
4565
4566 sort(start, count, sizeof(*start),
4567 ftrace_cmp_ips, ftrace_swap_ips);
4568
4569 start_pg = ftrace_allocate_pages(count);
4570 if (!start_pg)
4571 return -ENOMEM;
4572
4573 mutex_lock(&ftrace_lock);
4574
4575 /*
4576 * Core and each module needs their own pages, as
4577 * modules will free them when they are removed.
4578 * Force a new page to be allocated for modules.
4579 */
4580 if (!mod) {
4581 WARN_ON(ftrace_pages || ftrace_pages_start);
4582 /* First initialization */
4583 ftrace_pages = ftrace_pages_start = start_pg;
4584 } else {
4585 if (!ftrace_pages)
4586 goto out;
4587
4588 if (WARN_ON(ftrace_pages->next)) {
4589 /* Hmm, we have free pages? */
4590 while (ftrace_pages->next)
4591 ftrace_pages = ftrace_pages->next;
4592 }
4593
4594 ftrace_pages->next = start_pg;
4595 }
4596
4597 p = start;
4598 pg = start_pg;
4599 while (p < end) {
4600 addr = ftrace_call_adjust(*p++);
4601 /*
4602 * Some architecture linkers will pad between
4603 * the different mcount_loc sections of different
4604 * object files to satisfy alignments.
4605 * Skip any NULL pointers.
4606 */
4607 if (!addr)
4608 continue;
4609
4610 if (pg->index == pg->size) {
4611 /* We should have allocated enough */
4612 if (WARN_ON(!pg->next))
4613 break;
4614 pg = pg->next;
4615 }
4616
4617 rec = &pg->records[pg->index++];
4618 rec->ip = addr;
4619 }
4620
4621 /* We should have used all pages */
4622 WARN_ON(pg->next);
4623
4624 /* Assign the last page to ftrace_pages */
4625 ftrace_pages = pg;
4626
4627 /*
4628 * We only need to disable interrupts on start up
4629 * because we are modifying code that an interrupt
4630 * may execute, and the modification is not atomic.
4631 * But for modules, nothing runs the code we modify
4632 * until we are finished with it, and there's no
4633 * reason to cause large interrupt latencies while we do it.
4634 */
4635 if (!mod)
4636 local_irq_save(flags);
4637 ftrace_update_code(mod, start_pg);
4638 if (!mod)
4639 local_irq_restore(flags);
4640 ret = 0;
4641 out:
4642 mutex_unlock(&ftrace_lock);
4643
4644 return ret;
4645 }
4646
4647 #ifdef CONFIG_MODULES
4648
4649 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4650
ftrace_release_mod(struct module * mod)4651 void ftrace_release_mod(struct module *mod)
4652 {
4653 struct dyn_ftrace *rec;
4654 struct ftrace_page **last_pg;
4655 struct ftrace_page *pg;
4656 int order;
4657
4658 mutex_lock(&ftrace_lock);
4659
4660 if (ftrace_disabled)
4661 goto out_unlock;
4662
4663 /*
4664 * Each module has its own ftrace_pages, remove
4665 * them from the list.
4666 */
4667 last_pg = &ftrace_pages_start;
4668 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4669 rec = &pg->records[0];
4670 if (within_module_core(rec->ip, mod)) {
4671 /*
4672 * As core pages are first, the first
4673 * page should never be a module page.
4674 */
4675 if (WARN_ON(pg == ftrace_pages_start))
4676 goto out_unlock;
4677
4678 /* Check if we are deleting the last page */
4679 if (pg == ftrace_pages)
4680 ftrace_pages = next_to_ftrace_page(last_pg);
4681
4682 *last_pg = pg->next;
4683 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4684 free_pages((unsigned long)pg->records, order);
4685 kfree(pg);
4686 } else
4687 last_pg = &pg->next;
4688 }
4689 out_unlock:
4690 mutex_unlock(&ftrace_lock);
4691 }
4692
ftrace_init_module(struct module * mod,unsigned long * start,unsigned long * end)4693 static void ftrace_init_module(struct module *mod,
4694 unsigned long *start, unsigned long *end)
4695 {
4696 if (ftrace_disabled || start == end)
4697 return;
4698 ftrace_process_locs(mod, start, end);
4699 }
4700
ftrace_module_init(struct module * mod)4701 void ftrace_module_init(struct module *mod)
4702 {
4703 ftrace_init_module(mod, mod->ftrace_callsites,
4704 mod->ftrace_callsites +
4705 mod->num_ftrace_callsites);
4706 }
4707
ftrace_module_notify_exit(struct notifier_block * self,unsigned long val,void * data)4708 static int ftrace_module_notify_exit(struct notifier_block *self,
4709 unsigned long val, void *data)
4710 {
4711 struct module *mod = data;
4712
4713 if (val == MODULE_STATE_GOING)
4714 ftrace_release_mod(mod);
4715
4716 return 0;
4717 }
4718 #else
ftrace_module_notify_exit(struct notifier_block * self,unsigned long val,void * data)4719 static int ftrace_module_notify_exit(struct notifier_block *self,
4720 unsigned long val, void *data)
4721 {
4722 return 0;
4723 }
4724 #endif /* CONFIG_MODULES */
4725
4726 struct notifier_block ftrace_module_exit_nb = {
4727 .notifier_call = ftrace_module_notify_exit,
4728 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4729 };
4730
ftrace_init(void)4731 void __init ftrace_init(void)
4732 {
4733 extern unsigned long __start_mcount_loc[];
4734 extern unsigned long __stop_mcount_loc[];
4735 unsigned long count, flags;
4736 int ret;
4737
4738 local_irq_save(flags);
4739 ret = ftrace_dyn_arch_init();
4740 local_irq_restore(flags);
4741 if (ret)
4742 goto failed;
4743
4744 count = __stop_mcount_loc - __start_mcount_loc;
4745 if (!count) {
4746 pr_info("ftrace: No functions to be traced?\n");
4747 goto failed;
4748 }
4749
4750 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4751 count, count / ENTRIES_PER_PAGE + 1);
4752
4753 last_ftrace_enabled = ftrace_enabled = 1;
4754
4755 ret = ftrace_process_locs(NULL,
4756 __start_mcount_loc,
4757 __stop_mcount_loc);
4758
4759 ret = register_module_notifier(&ftrace_module_exit_nb);
4760 if (ret)
4761 pr_warning("Failed to register trace ftrace module exit notifier\n");
4762
4763 set_ftrace_early_filters();
4764
4765 return;
4766 failed:
4767 ftrace_disabled = 1;
4768 }
4769
4770 #else
4771
4772 static struct ftrace_ops global_ops = {
4773 .func = ftrace_stub,
4774 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4775 };
4776
ftrace_nodyn_init(void)4777 static int __init ftrace_nodyn_init(void)
4778 {
4779 ftrace_enabled = 1;
4780 return 0;
4781 }
4782 core_initcall(ftrace_nodyn_init);
4783
ftrace_init_dyn_tracefs(struct dentry * d_tracer)4784 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
ftrace_startup_enable(int command)4785 static inline void ftrace_startup_enable(int command) { }
ftrace_startup_all(int command)4786 static inline void ftrace_startup_all(int command) { }
4787 /* Keep as macros so we do not need to define the commands */
4788 # define ftrace_startup(ops, command) \
4789 ({ \
4790 int ___ret = __register_ftrace_function(ops); \
4791 if (!___ret) \
4792 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4793 ___ret; \
4794 })
4795 # define ftrace_shutdown(ops, command) \
4796 ({ \
4797 int ___ret = __unregister_ftrace_function(ops); \
4798 if (!___ret) \
4799 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4800 ___ret; \
4801 })
4802
4803 # define ftrace_startup_sysctl() do { } while (0)
4804 # define ftrace_shutdown_sysctl() do { } while (0)
4805
4806 static inline int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)4807 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4808 {
4809 return 1;
4810 }
4811
4812 #endif /* CONFIG_DYNAMIC_FTRACE */
4813
ftrace_init_global_array_ops(struct trace_array * tr)4814 __init void ftrace_init_global_array_ops(struct trace_array *tr)
4815 {
4816 tr->ops = &global_ops;
4817 tr->ops->private = tr;
4818 }
4819
ftrace_init_array_ops(struct trace_array * tr,ftrace_func_t func)4820 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4821 {
4822 /* If we filter on pids, update to use the pid function */
4823 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4824 if (WARN_ON(tr->ops->func != ftrace_stub))
4825 printk("ftrace ops had %pS for function\n",
4826 tr->ops->func);
4827 /* Only the top level instance does pid tracing */
4828 if (!list_empty(&ftrace_pids)) {
4829 set_ftrace_pid_function(func);
4830 func = ftrace_pid_func;
4831 }
4832 }
4833 tr->ops->func = func;
4834 tr->ops->private = tr;
4835 }
4836
ftrace_reset_array_ops(struct trace_array * tr)4837 void ftrace_reset_array_ops(struct trace_array *tr)
4838 {
4839 tr->ops->func = ftrace_stub;
4840 }
4841
4842 static void
ftrace_ops_control_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * regs)4843 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4844 struct ftrace_ops *op, struct pt_regs *regs)
4845 {
4846 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4847 return;
4848
4849 /*
4850 * Some of the ops may be dynamically allocated,
4851 * they must be freed after a synchronize_sched().
4852 */
4853 preempt_disable_notrace();
4854 trace_recursion_set(TRACE_CONTROL_BIT);
4855
4856 /*
4857 * Control funcs (perf) uses RCU. Only trace if
4858 * RCU is currently active.
4859 */
4860 if (!rcu_is_watching())
4861 goto out;
4862
4863 do_for_each_ftrace_op(op, ftrace_control_list) {
4864 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4865 !ftrace_function_local_disabled(op) &&
4866 ftrace_ops_test(op, ip, regs))
4867 op->func(ip, parent_ip, op, regs);
4868 } while_for_each_ftrace_op(op);
4869 out:
4870 trace_recursion_clear(TRACE_CONTROL_BIT);
4871 preempt_enable_notrace();
4872 }
4873
4874 static struct ftrace_ops control_ops = {
4875 .func = ftrace_ops_control_func,
4876 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4877 INIT_OPS_HASH(control_ops)
4878 };
4879
4880 static inline void
__ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ignored,struct pt_regs * regs)4881 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4882 struct ftrace_ops *ignored, struct pt_regs *regs)
4883 {
4884 struct ftrace_ops *op;
4885 int bit;
4886
4887 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4888 if (bit < 0)
4889 return;
4890
4891 /*
4892 * Some of the ops may be dynamically allocated,
4893 * they must be freed after a synchronize_sched().
4894 */
4895 preempt_disable_notrace();
4896 do_for_each_ftrace_op(op, ftrace_ops_list) {
4897 if (ftrace_ops_test(op, ip, regs)) {
4898 if (FTRACE_WARN_ON(!op->func)) {
4899 pr_warn("op=%p %pS\n", op, op);
4900 goto out;
4901 }
4902 op->func(ip, parent_ip, op, regs);
4903 }
4904 } while_for_each_ftrace_op(op);
4905 out:
4906 preempt_enable_notrace();
4907 trace_clear_recursion(bit);
4908 }
4909
4910 /*
4911 * Some archs only support passing ip and parent_ip. Even though
4912 * the list function ignores the op parameter, we do not want any
4913 * C side effects, where a function is called without the caller
4914 * sending a third parameter.
4915 * Archs are to support both the regs and ftrace_ops at the same time.
4916 * If they support ftrace_ops, it is assumed they support regs.
4917 * If call backs want to use regs, they must either check for regs
4918 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4919 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4920 * An architecture can pass partial regs with ftrace_ops and still
4921 * set the ARCH_SUPPORT_FTARCE_OPS.
4922 */
4923 #if ARCH_SUPPORTS_FTRACE_OPS
ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * regs)4924 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4925 struct ftrace_ops *op, struct pt_regs *regs)
4926 {
4927 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4928 }
4929 #else
ftrace_ops_no_ops(unsigned long ip,unsigned long parent_ip)4930 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4931 {
4932 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4933 }
4934 #endif
4935
4936 /*
4937 * If there's only one function registered but it does not support
4938 * recursion, this function will be called by the mcount trampoline.
4939 * This function will handle recursion protection.
4940 */
ftrace_ops_recurs_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * regs)4941 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
4942 struct ftrace_ops *op, struct pt_regs *regs)
4943 {
4944 int bit;
4945
4946 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4947 if (bit < 0)
4948 return;
4949
4950 op->func(ip, parent_ip, op, regs);
4951
4952 trace_clear_recursion(bit);
4953 }
4954
4955 /**
4956 * ftrace_ops_get_func - get the function a trampoline should call
4957 * @ops: the ops to get the function for
4958 *
4959 * Normally the mcount trampoline will call the ops->func, but there
4960 * are times that it should not. For example, if the ops does not
4961 * have its own recursion protection, then it should call the
4962 * ftrace_ops_recurs_func() instead.
4963 *
4964 * Returns the function that the trampoline should call for @ops.
4965 */
ftrace_ops_get_func(struct ftrace_ops * ops)4966 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
4967 {
4968 /*
4969 * If this is a dynamic ops or we force list func,
4970 * then it needs to call the list anyway.
4971 */
4972 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
4973 return ftrace_ops_list_func;
4974
4975 /*
4976 * If the func handles its own recursion, call it directly.
4977 * Otherwise call the recursion protected function that
4978 * will call the ftrace ops function.
4979 */
4980 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
4981 return ftrace_ops_recurs_func;
4982
4983 return ops->func;
4984 }
4985
clear_ftrace_swapper(void)4986 static void clear_ftrace_swapper(void)
4987 {
4988 struct task_struct *p;
4989 int cpu;
4990
4991 get_online_cpus();
4992 for_each_online_cpu(cpu) {
4993 p = idle_task(cpu);
4994 clear_tsk_trace_trace(p);
4995 }
4996 put_online_cpus();
4997 }
4998
set_ftrace_swapper(void)4999 static void set_ftrace_swapper(void)
5000 {
5001 struct task_struct *p;
5002 int cpu;
5003
5004 get_online_cpus();
5005 for_each_online_cpu(cpu) {
5006 p = idle_task(cpu);
5007 set_tsk_trace_trace(p);
5008 }
5009 put_online_cpus();
5010 }
5011
clear_ftrace_pid(struct pid * pid)5012 static void clear_ftrace_pid(struct pid *pid)
5013 {
5014 struct task_struct *p;
5015
5016 rcu_read_lock();
5017 do_each_pid_task(pid, PIDTYPE_PID, p) {
5018 clear_tsk_trace_trace(p);
5019 } while_each_pid_task(pid, PIDTYPE_PID, p);
5020 rcu_read_unlock();
5021
5022 put_pid(pid);
5023 }
5024
set_ftrace_pid(struct pid * pid)5025 static void set_ftrace_pid(struct pid *pid)
5026 {
5027 struct task_struct *p;
5028
5029 rcu_read_lock();
5030 do_each_pid_task(pid, PIDTYPE_PID, p) {
5031 set_tsk_trace_trace(p);
5032 } while_each_pid_task(pid, PIDTYPE_PID, p);
5033 rcu_read_unlock();
5034 }
5035
clear_ftrace_pid_task(struct pid * pid)5036 static void clear_ftrace_pid_task(struct pid *pid)
5037 {
5038 if (pid == ftrace_swapper_pid)
5039 clear_ftrace_swapper();
5040 else
5041 clear_ftrace_pid(pid);
5042 }
5043
set_ftrace_pid_task(struct pid * pid)5044 static void set_ftrace_pid_task(struct pid *pid)
5045 {
5046 if (pid == ftrace_swapper_pid)
5047 set_ftrace_swapper();
5048 else
5049 set_ftrace_pid(pid);
5050 }
5051
ftrace_pid_add(int p)5052 static int ftrace_pid_add(int p)
5053 {
5054 struct pid *pid;
5055 struct ftrace_pid *fpid;
5056 int ret = -EINVAL;
5057
5058 mutex_lock(&ftrace_lock);
5059
5060 if (!p)
5061 pid = ftrace_swapper_pid;
5062 else
5063 pid = find_get_pid(p);
5064
5065 if (!pid)
5066 goto out;
5067
5068 ret = 0;
5069
5070 list_for_each_entry(fpid, &ftrace_pids, list)
5071 if (fpid->pid == pid)
5072 goto out_put;
5073
5074 ret = -ENOMEM;
5075
5076 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5077 if (!fpid)
5078 goto out_put;
5079
5080 list_add(&fpid->list, &ftrace_pids);
5081 fpid->pid = pid;
5082
5083 set_ftrace_pid_task(pid);
5084
5085 ftrace_update_pid_func();
5086
5087 ftrace_startup_all(0);
5088
5089 mutex_unlock(&ftrace_lock);
5090 return 0;
5091
5092 out_put:
5093 if (pid != ftrace_swapper_pid)
5094 put_pid(pid);
5095
5096 out:
5097 mutex_unlock(&ftrace_lock);
5098 return ret;
5099 }
5100
ftrace_pid_reset(void)5101 static void ftrace_pid_reset(void)
5102 {
5103 struct ftrace_pid *fpid, *safe;
5104
5105 mutex_lock(&ftrace_lock);
5106 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5107 struct pid *pid = fpid->pid;
5108
5109 clear_ftrace_pid_task(pid);
5110
5111 list_del(&fpid->list);
5112 kfree(fpid);
5113 }
5114
5115 ftrace_update_pid_func();
5116 ftrace_startup_all(0);
5117
5118 mutex_unlock(&ftrace_lock);
5119 }
5120
fpid_start(struct seq_file * m,loff_t * pos)5121 static void *fpid_start(struct seq_file *m, loff_t *pos)
5122 {
5123 mutex_lock(&ftrace_lock);
5124
5125 if (list_empty(&ftrace_pids) && (!*pos))
5126 return (void *) 1;
5127
5128 return seq_list_start(&ftrace_pids, *pos);
5129 }
5130
fpid_next(struct seq_file * m,void * v,loff_t * pos)5131 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5132 {
5133 if (v == (void *)1)
5134 return NULL;
5135
5136 return seq_list_next(v, &ftrace_pids, pos);
5137 }
5138
fpid_stop(struct seq_file * m,void * p)5139 static void fpid_stop(struct seq_file *m, void *p)
5140 {
5141 mutex_unlock(&ftrace_lock);
5142 }
5143
fpid_show(struct seq_file * m,void * v)5144 static int fpid_show(struct seq_file *m, void *v)
5145 {
5146 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5147
5148 if (v == (void *)1) {
5149 seq_printf(m, "no pid\n");
5150 return 0;
5151 }
5152
5153 if (fpid->pid == ftrace_swapper_pid)
5154 seq_printf(m, "swapper tasks\n");
5155 else
5156 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5157
5158 return 0;
5159 }
5160
5161 static const struct seq_operations ftrace_pid_sops = {
5162 .start = fpid_start,
5163 .next = fpid_next,
5164 .stop = fpid_stop,
5165 .show = fpid_show,
5166 };
5167
5168 static int
ftrace_pid_open(struct inode * inode,struct file * file)5169 ftrace_pid_open(struct inode *inode, struct file *file)
5170 {
5171 int ret = 0;
5172
5173 if ((file->f_mode & FMODE_WRITE) &&
5174 (file->f_flags & O_TRUNC))
5175 ftrace_pid_reset();
5176
5177 if (file->f_mode & FMODE_READ)
5178 ret = seq_open(file, &ftrace_pid_sops);
5179
5180 return ret;
5181 }
5182
5183 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)5184 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5185 size_t cnt, loff_t *ppos)
5186 {
5187 char buf[64], *tmp;
5188 long val;
5189 int ret;
5190
5191 if (cnt >= sizeof(buf))
5192 return -EINVAL;
5193
5194 if (copy_from_user(&buf, ubuf, cnt))
5195 return -EFAULT;
5196
5197 buf[cnt] = 0;
5198
5199 /*
5200 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5201 * to clean the filter quietly.
5202 */
5203 tmp = strstrip(buf);
5204 if (strlen(tmp) == 0)
5205 return 1;
5206
5207 ret = kstrtol(tmp, 10, &val);
5208 if (ret < 0)
5209 return ret;
5210
5211 ret = ftrace_pid_add(val);
5212
5213 return ret ? ret : cnt;
5214 }
5215
5216 static int
ftrace_pid_release(struct inode * inode,struct file * file)5217 ftrace_pid_release(struct inode *inode, struct file *file)
5218 {
5219 if (file->f_mode & FMODE_READ)
5220 seq_release(inode, file);
5221
5222 return 0;
5223 }
5224
5225 static const struct file_operations ftrace_pid_fops = {
5226 .open = ftrace_pid_open,
5227 .write = ftrace_pid_write,
5228 .read = seq_read,
5229 .llseek = tracing_lseek,
5230 .release = ftrace_pid_release,
5231 };
5232
ftrace_init_tracefs(void)5233 static __init int ftrace_init_tracefs(void)
5234 {
5235 struct dentry *d_tracer;
5236
5237 d_tracer = tracing_init_dentry();
5238 if (IS_ERR(d_tracer))
5239 return 0;
5240
5241 ftrace_init_dyn_tracefs(d_tracer);
5242
5243 trace_create_file("set_ftrace_pid", 0644, d_tracer,
5244 NULL, &ftrace_pid_fops);
5245
5246 ftrace_profile_tracefs(d_tracer);
5247
5248 return 0;
5249 }
5250 fs_initcall(ftrace_init_tracefs);
5251
5252 /**
5253 * ftrace_kill - kill ftrace
5254 *
5255 * This function should be used by panic code. It stops ftrace
5256 * but in a not so nice way. If you need to simply kill ftrace
5257 * from a non-atomic section, use ftrace_kill.
5258 */
ftrace_kill(void)5259 void ftrace_kill(void)
5260 {
5261 ftrace_disabled = 1;
5262 ftrace_enabled = 0;
5263 clear_ftrace_function();
5264 }
5265
5266 /**
5267 * Test if ftrace is dead or not.
5268 */
ftrace_is_dead(void)5269 int ftrace_is_dead(void)
5270 {
5271 return ftrace_disabled;
5272 }
5273
5274 /**
5275 * register_ftrace_function - register a function for profiling
5276 * @ops - ops structure that holds the function for profiling.
5277 *
5278 * Register a function to be called by all functions in the
5279 * kernel.
5280 *
5281 * Note: @ops->func and all the functions it calls must be labeled
5282 * with "notrace", otherwise it will go into a
5283 * recursive loop.
5284 */
register_ftrace_function(struct ftrace_ops * ops)5285 int register_ftrace_function(struct ftrace_ops *ops)
5286 {
5287 int ret = -1;
5288
5289 ftrace_ops_init(ops);
5290
5291 mutex_lock(&ftrace_lock);
5292
5293 ret = ftrace_startup(ops, 0);
5294
5295 mutex_unlock(&ftrace_lock);
5296
5297 return ret;
5298 }
5299 EXPORT_SYMBOL_GPL(register_ftrace_function);
5300
5301 /**
5302 * unregister_ftrace_function - unregister a function for profiling.
5303 * @ops - ops structure that holds the function to unregister
5304 *
5305 * Unregister a function that was added to be called by ftrace profiling.
5306 */
unregister_ftrace_function(struct ftrace_ops * ops)5307 int unregister_ftrace_function(struct ftrace_ops *ops)
5308 {
5309 int ret;
5310
5311 mutex_lock(&ftrace_lock);
5312 ret = ftrace_shutdown(ops, 0);
5313 mutex_unlock(&ftrace_lock);
5314
5315 return ret;
5316 }
5317 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5318
5319 int
ftrace_enable_sysctl(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)5320 ftrace_enable_sysctl(struct ctl_table *table, int write,
5321 void __user *buffer, size_t *lenp,
5322 loff_t *ppos)
5323 {
5324 int ret = -ENODEV;
5325
5326 mutex_lock(&ftrace_lock);
5327
5328 if (unlikely(ftrace_disabled))
5329 goto out;
5330
5331 ret = proc_dointvec(table, write, buffer, lenp, ppos);
5332
5333 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5334 goto out;
5335
5336 last_ftrace_enabled = !!ftrace_enabled;
5337
5338 if (ftrace_enabled) {
5339
5340 /* we are starting ftrace again */
5341 if (ftrace_ops_list != &ftrace_list_end)
5342 update_ftrace_function();
5343
5344 ftrace_startup_sysctl();
5345
5346 } else {
5347 /* stopping ftrace calls (just send to ftrace_stub) */
5348 ftrace_trace_function = ftrace_stub;
5349
5350 ftrace_shutdown_sysctl();
5351 }
5352
5353 out:
5354 mutex_unlock(&ftrace_lock);
5355 return ret;
5356 }
5357
5358 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5359
5360 static struct ftrace_ops graph_ops = {
5361 .func = ftrace_stub,
5362 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5363 FTRACE_OPS_FL_INITIALIZED |
5364 FTRACE_OPS_FL_STUB,
5365 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5366 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5367 #endif
5368 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5369 };
5370
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace)5371 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5372 {
5373 return 0;
5374 }
5375
5376 /* The callbacks that hook a function */
5377 trace_func_graph_ret_t ftrace_graph_return =
5378 (trace_func_graph_ret_t)ftrace_stub;
5379 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5380 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5381
5382 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(struct ftrace_ret_stack ** ret_stack_list)5383 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5384 {
5385 int i;
5386 int ret = 0;
5387 unsigned long flags;
5388 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5389 struct task_struct *g, *t;
5390
5391 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5392 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5393 * sizeof(struct ftrace_ret_stack),
5394 GFP_KERNEL);
5395 if (!ret_stack_list[i]) {
5396 start = 0;
5397 end = i;
5398 ret = -ENOMEM;
5399 goto free;
5400 }
5401 }
5402
5403 read_lock_irqsave(&tasklist_lock, flags);
5404 do_each_thread(g, t) {
5405 if (start == end) {
5406 ret = -EAGAIN;
5407 goto unlock;
5408 }
5409
5410 if (t->ret_stack == NULL) {
5411 atomic_set(&t->tracing_graph_pause, 0);
5412 atomic_set(&t->trace_overrun, 0);
5413 t->curr_ret_stack = -1;
5414 /* Make sure the tasks see the -1 first: */
5415 smp_wmb();
5416 t->ret_stack = ret_stack_list[start++];
5417 }
5418 } while_each_thread(g, t);
5419
5420 unlock:
5421 read_unlock_irqrestore(&tasklist_lock, flags);
5422 free:
5423 for (i = start; i < end; i++)
5424 kfree(ret_stack_list[i]);
5425 return ret;
5426 }
5427
5428 static void
ftrace_graph_probe_sched_switch(void * ignore,struct task_struct * prev,struct task_struct * next)5429 ftrace_graph_probe_sched_switch(void *ignore,
5430 struct task_struct *prev, struct task_struct *next)
5431 {
5432 unsigned long long timestamp;
5433 int index;
5434
5435 /*
5436 * Does the user want to count the time a function was asleep.
5437 * If so, do not update the time stamps.
5438 */
5439 if (trace_flags & TRACE_ITER_SLEEP_TIME)
5440 return;
5441
5442 timestamp = trace_clock_local();
5443
5444 prev->ftrace_timestamp = timestamp;
5445
5446 /* only process tasks that we timestamped */
5447 if (!next->ftrace_timestamp)
5448 return;
5449
5450 /*
5451 * Update all the counters in next to make up for the
5452 * time next was sleeping.
5453 */
5454 timestamp -= next->ftrace_timestamp;
5455
5456 for (index = next->curr_ret_stack; index >= 0; index--)
5457 next->ret_stack[index].calltime += timestamp;
5458 }
5459
5460 /* Allocate a return stack for each task */
start_graph_tracing(void)5461 static int start_graph_tracing(void)
5462 {
5463 struct ftrace_ret_stack **ret_stack_list;
5464 int ret, cpu;
5465
5466 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5467 sizeof(struct ftrace_ret_stack *),
5468 GFP_KERNEL);
5469
5470 if (!ret_stack_list)
5471 return -ENOMEM;
5472
5473 /* The cpu_boot init_task->ret_stack will never be freed */
5474 for_each_online_cpu(cpu) {
5475 if (!idle_task(cpu)->ret_stack)
5476 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5477 }
5478
5479 do {
5480 ret = alloc_retstack_tasklist(ret_stack_list);
5481 } while (ret == -EAGAIN);
5482
5483 if (!ret) {
5484 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5485 if (ret)
5486 pr_info("ftrace_graph: Couldn't activate tracepoint"
5487 " probe to kernel_sched_switch\n");
5488 }
5489
5490 kfree(ret_stack_list);
5491 return ret;
5492 }
5493
5494 /*
5495 * Hibernation protection.
5496 * The state of the current task is too much unstable during
5497 * suspend/restore to disk. We want to protect against that.
5498 */
5499 static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)5500 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5501 void *unused)
5502 {
5503 switch (state) {
5504 case PM_HIBERNATION_PREPARE:
5505 pause_graph_tracing();
5506 break;
5507
5508 case PM_POST_HIBERNATION:
5509 unpause_graph_tracing();
5510 break;
5511 }
5512 return NOTIFY_DONE;
5513 }
5514
ftrace_graph_entry_test(struct ftrace_graph_ent * trace)5515 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5516 {
5517 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5518 return 0;
5519 return __ftrace_graph_entry(trace);
5520 }
5521
5522 /*
5523 * The function graph tracer should only trace the functions defined
5524 * by set_ftrace_filter and set_ftrace_notrace. If another function
5525 * tracer ops is registered, the graph tracer requires testing the
5526 * function against the global ops, and not just trace any function
5527 * that any ftrace_ops registered.
5528 */
update_function_graph_func(void)5529 static void update_function_graph_func(void)
5530 {
5531 struct ftrace_ops *op;
5532 bool do_test = false;
5533
5534 /*
5535 * The graph and global ops share the same set of functions
5536 * to test. If any other ops is on the list, then
5537 * the graph tracing needs to test if its the function
5538 * it should call.
5539 */
5540 do_for_each_ftrace_op(op, ftrace_ops_list) {
5541 if (op != &global_ops && op != &graph_ops &&
5542 op != &ftrace_list_end) {
5543 do_test = true;
5544 /* in double loop, break out with goto */
5545 goto out;
5546 }
5547 } while_for_each_ftrace_op(op);
5548 out:
5549 if (do_test)
5550 ftrace_graph_entry = ftrace_graph_entry_test;
5551 else
5552 ftrace_graph_entry = __ftrace_graph_entry;
5553 }
5554
5555 static struct notifier_block ftrace_suspend_notifier = {
5556 .notifier_call = ftrace_suspend_notifier_call,
5557 };
5558
register_ftrace_graph(trace_func_graph_ret_t retfunc,trace_func_graph_ent_t entryfunc)5559 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5560 trace_func_graph_ent_t entryfunc)
5561 {
5562 int ret = 0;
5563
5564 mutex_lock(&ftrace_lock);
5565
5566 /* we currently allow only one tracer registered at a time */
5567 if (ftrace_graph_active) {
5568 ret = -EBUSY;
5569 goto out;
5570 }
5571
5572 register_pm_notifier(&ftrace_suspend_notifier);
5573
5574 ftrace_graph_active++;
5575 ret = start_graph_tracing();
5576 if (ret) {
5577 ftrace_graph_active--;
5578 goto out;
5579 }
5580
5581 ftrace_graph_return = retfunc;
5582
5583 /*
5584 * Update the indirect function to the entryfunc, and the
5585 * function that gets called to the entry_test first. Then
5586 * call the update fgraph entry function to determine if
5587 * the entryfunc should be called directly or not.
5588 */
5589 __ftrace_graph_entry = entryfunc;
5590 ftrace_graph_entry = ftrace_graph_entry_test;
5591 update_function_graph_func();
5592
5593 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5594
5595 out:
5596 mutex_unlock(&ftrace_lock);
5597 return ret;
5598 }
5599
unregister_ftrace_graph(void)5600 void unregister_ftrace_graph(void)
5601 {
5602 mutex_lock(&ftrace_lock);
5603
5604 if (unlikely(!ftrace_graph_active))
5605 goto out;
5606
5607 ftrace_graph_active--;
5608 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5609 ftrace_graph_entry = ftrace_graph_entry_stub;
5610 __ftrace_graph_entry = ftrace_graph_entry_stub;
5611 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5612 unregister_pm_notifier(&ftrace_suspend_notifier);
5613 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5614
5615 out:
5616 mutex_unlock(&ftrace_lock);
5617 }
5618
5619 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5620
5621 static void
graph_init_task(struct task_struct * t,struct ftrace_ret_stack * ret_stack)5622 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5623 {
5624 atomic_set(&t->tracing_graph_pause, 0);
5625 atomic_set(&t->trace_overrun, 0);
5626 t->ftrace_timestamp = 0;
5627 /* make curr_ret_stack visible before we add the ret_stack */
5628 smp_wmb();
5629 t->ret_stack = ret_stack;
5630 }
5631
5632 /*
5633 * Allocate a return stack for the idle task. May be the first
5634 * time through, or it may be done by CPU hotplug online.
5635 */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)5636 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5637 {
5638 t->curr_ret_stack = -1;
5639 /*
5640 * The idle task has no parent, it either has its own
5641 * stack or no stack at all.
5642 */
5643 if (t->ret_stack)
5644 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5645
5646 if (ftrace_graph_active) {
5647 struct ftrace_ret_stack *ret_stack;
5648
5649 ret_stack = per_cpu(idle_ret_stack, cpu);
5650 if (!ret_stack) {
5651 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5652 * sizeof(struct ftrace_ret_stack),
5653 GFP_KERNEL);
5654 if (!ret_stack)
5655 return;
5656 per_cpu(idle_ret_stack, cpu) = ret_stack;
5657 }
5658 graph_init_task(t, ret_stack);
5659 }
5660 }
5661
5662 /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)5663 void ftrace_graph_init_task(struct task_struct *t)
5664 {
5665 /* Make sure we do not use the parent ret_stack */
5666 t->ret_stack = NULL;
5667 t->curr_ret_stack = -1;
5668
5669 if (ftrace_graph_active) {
5670 struct ftrace_ret_stack *ret_stack;
5671
5672 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5673 * sizeof(struct ftrace_ret_stack),
5674 GFP_KERNEL);
5675 if (!ret_stack)
5676 return;
5677 graph_init_task(t, ret_stack);
5678 }
5679 }
5680
ftrace_graph_exit_task(struct task_struct * t)5681 void ftrace_graph_exit_task(struct task_struct *t)
5682 {
5683 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5684
5685 t->ret_stack = NULL;
5686 /* NULL must become visible to IRQs before we free it: */
5687 barrier();
5688
5689 kfree(ret_stack);
5690 }
5691 #endif
5692