• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 
31 #include <asm/ftrace.h>
32 
33 #include "trace.h"
34 
35 #define FTRACE_WARN_ON(cond)			\
36 	do {					\
37 		if (WARN_ON(cond))		\
38 			ftrace_kill();		\
39 	} while (0)
40 
41 #define FTRACE_WARN_ON_ONCE(cond)		\
42 	do {					\
43 		if (WARN_ON_ONCE(cond))		\
44 			ftrace_kill();		\
45 	} while (0)
46 
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
50 
51 /* set when tracing only a pid */
52 struct pid *ftrace_pid_trace;
53 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
54 
55 /* Quick disabling of function tracer. */
56 int function_trace_stop;
57 
58 /*
59  * ftrace_disabled is set when an anomaly is discovered.
60  * ftrace_disabled is much stronger than ftrace_enabled.
61  */
62 static int ftrace_disabled __read_mostly;
63 
64 static DEFINE_SPINLOCK(ftrace_lock);
65 static DEFINE_MUTEX(ftrace_sysctl_lock);
66 static DEFINE_MUTEX(ftrace_start_lock);
67 
68 static struct ftrace_ops ftrace_list_end __read_mostly =
69 {
70 	.func = ftrace_stub,
71 };
72 
73 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77 
ftrace_list_func(unsigned long ip,unsigned long parent_ip)78 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 {
80 	struct ftrace_ops *op = ftrace_list;
81 
82 	/* in case someone actually ports this to alpha! */
83 	read_barrier_depends();
84 
85 	while (op != &ftrace_list_end) {
86 		/* silly alpha */
87 		read_barrier_depends();
88 		op->func(ip, parent_ip);
89 		op = op->next;
90 	};
91 }
92 
ftrace_pid_func(unsigned long ip,unsigned long parent_ip)93 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 {
95 	if (!test_tsk_trace_trace(current))
96 		return;
97 
98 	ftrace_pid_function(ip, parent_ip);
99 }
100 
set_ftrace_pid_function(ftrace_func_t func)101 static void set_ftrace_pid_function(ftrace_func_t func)
102 {
103 	/* do not set ftrace_pid_function to itself! */
104 	if (func != ftrace_pid_func)
105 		ftrace_pid_function = func;
106 }
107 
108 /**
109  * clear_ftrace_function - reset the ftrace function
110  *
111  * This NULLs the ftrace function and in essence stops
112  * tracing.  There may be lag
113  */
clear_ftrace_function(void)114 void clear_ftrace_function(void)
115 {
116 	ftrace_trace_function = ftrace_stub;
117 	__ftrace_trace_function = ftrace_stub;
118 	ftrace_pid_function = ftrace_stub;
119 }
120 
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 /*
123  * For those archs that do not test ftrace_trace_stop in their
124  * mcount call site, we need to do it from C.
125  */
ftrace_test_stop_func(unsigned long ip,unsigned long parent_ip)126 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 {
128 	if (function_trace_stop)
129 		return;
130 
131 	__ftrace_trace_function(ip, parent_ip);
132 }
133 #endif
134 
__register_ftrace_function(struct ftrace_ops * ops)135 static int __register_ftrace_function(struct ftrace_ops *ops)
136 {
137 	/* should not be called from interrupt context */
138 	spin_lock(&ftrace_lock);
139 
140 	ops->next = ftrace_list;
141 	/*
142 	 * We are entering ops into the ftrace_list but another
143 	 * CPU might be walking that list. We need to make sure
144 	 * the ops->next pointer is valid before another CPU sees
145 	 * the ops pointer included into the ftrace_list.
146 	 */
147 	smp_wmb();
148 	ftrace_list = ops;
149 
150 	if (ftrace_enabled) {
151 		ftrace_func_t func;
152 
153 		if (ops->next == &ftrace_list_end)
154 			func = ops->func;
155 		else
156 			func = ftrace_list_func;
157 
158 		if (ftrace_pid_trace) {
159 			set_ftrace_pid_function(func);
160 			func = ftrace_pid_func;
161 		}
162 
163 		/*
164 		 * For one func, simply call it directly.
165 		 * For more than one func, call the chain.
166 		 */
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 		ftrace_trace_function = func;
169 #else
170 		__ftrace_trace_function = func;
171 		ftrace_trace_function = ftrace_test_stop_func;
172 #endif
173 	}
174 
175 	spin_unlock(&ftrace_lock);
176 
177 	return 0;
178 }
179 
__unregister_ftrace_function(struct ftrace_ops * ops)180 static int __unregister_ftrace_function(struct ftrace_ops *ops)
181 {
182 	struct ftrace_ops **p;
183 	int ret = 0;
184 
185 	/* should not be called from interrupt context */
186 	spin_lock(&ftrace_lock);
187 
188 	/*
189 	 * If we are removing the last function, then simply point
190 	 * to the ftrace_stub.
191 	 */
192 	if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 		ftrace_trace_function = ftrace_stub;
194 		ftrace_list = &ftrace_list_end;
195 		goto out;
196 	}
197 
198 	for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199 		if (*p == ops)
200 			break;
201 
202 	if (*p != ops) {
203 		ret = -1;
204 		goto out;
205 	}
206 
207 	*p = (*p)->next;
208 
209 	if (ftrace_enabled) {
210 		/* If we only have one func left, then call that directly */
211 		if (ftrace_list->next == &ftrace_list_end) {
212 			ftrace_func_t func = ftrace_list->func;
213 
214 			if (ftrace_pid_trace) {
215 				set_ftrace_pid_function(func);
216 				func = ftrace_pid_func;
217 			}
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 			ftrace_trace_function = func;
220 #else
221 			__ftrace_trace_function = func;
222 #endif
223 		}
224 	}
225 
226  out:
227 	spin_unlock(&ftrace_lock);
228 
229 	return ret;
230 }
231 
ftrace_update_pid_func(void)232 static void ftrace_update_pid_func(void)
233 {
234 	ftrace_func_t func;
235 
236 	/* should not be called from interrupt context */
237 	spin_lock(&ftrace_lock);
238 
239 	if (ftrace_trace_function == ftrace_stub)
240 		goto out;
241 
242 	func = ftrace_trace_function;
243 
244 	if (ftrace_pid_trace) {
245 		set_ftrace_pid_function(func);
246 		func = ftrace_pid_func;
247 	} else {
248 		if (func == ftrace_pid_func)
249 			func = ftrace_pid_function;
250 	}
251 
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 	ftrace_trace_function = func;
254 #else
255 	__ftrace_trace_function = func;
256 #endif
257 
258  out:
259 	spin_unlock(&ftrace_lock);
260 }
261 
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
265 #endif
266 
267 /*
268  * Since MCOUNT_ADDR may point to mcount itself, we do not want
269  * to get it confused by reading a reference in the code as we
270  * are parsing on objcopy output of text. Use a variable for
271  * it instead.
272  */
273 static unsigned long mcount_addr = MCOUNT_ADDR;
274 
275 enum {
276 	FTRACE_ENABLE_CALLS		= (1 << 0),
277 	FTRACE_DISABLE_CALLS		= (1 << 1),
278 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
279 	FTRACE_ENABLE_MCOUNT		= (1 << 3),
280 	FTRACE_DISABLE_MCOUNT		= (1 << 4),
281 	FTRACE_START_FUNC_RET		= (1 << 5),
282 	FTRACE_STOP_FUNC_RET		= (1 << 6),
283 };
284 
285 static int ftrace_filtered;
286 
287 static LIST_HEAD(ftrace_new_addrs);
288 
289 static DEFINE_MUTEX(ftrace_regex_lock);
290 
291 struct ftrace_page {
292 	struct ftrace_page	*next;
293 	unsigned long		index;
294 	struct dyn_ftrace	records[];
295 };
296 
297 #define ENTRIES_PER_PAGE \
298   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
299 
300 /* estimate from running different kernels */
301 #define NR_TO_INIT		10000
302 
303 static struct ftrace_page	*ftrace_pages_start;
304 static struct ftrace_page	*ftrace_pages;
305 
306 static struct dyn_ftrace *ftrace_free_records;
307 
308 
309 #ifdef CONFIG_KPROBES
310 
311 static int frozen_record_count;
312 
freeze_record(struct dyn_ftrace * rec)313 static inline void freeze_record(struct dyn_ftrace *rec)
314 {
315 	if (!(rec->flags & FTRACE_FL_FROZEN)) {
316 		rec->flags |= FTRACE_FL_FROZEN;
317 		frozen_record_count++;
318 	}
319 }
320 
unfreeze_record(struct dyn_ftrace * rec)321 static inline void unfreeze_record(struct dyn_ftrace *rec)
322 {
323 	if (rec->flags & FTRACE_FL_FROZEN) {
324 		rec->flags &= ~FTRACE_FL_FROZEN;
325 		frozen_record_count--;
326 	}
327 }
328 
record_frozen(struct dyn_ftrace * rec)329 static inline int record_frozen(struct dyn_ftrace *rec)
330 {
331 	return rec->flags & FTRACE_FL_FROZEN;
332 }
333 #else
334 # define freeze_record(rec)			({ 0; })
335 # define unfreeze_record(rec)			({ 0; })
336 # define record_frozen(rec)			({ 0; })
337 #endif /* CONFIG_KPROBES */
338 
ftrace_free_rec(struct dyn_ftrace * rec)339 static void ftrace_free_rec(struct dyn_ftrace *rec)
340 {
341 	rec->ip = (unsigned long)ftrace_free_records;
342 	ftrace_free_records = rec;
343 	rec->flags |= FTRACE_FL_FREE;
344 }
345 
ftrace_release(void * start,unsigned long size)346 void ftrace_release(void *start, unsigned long size)
347 {
348 	struct dyn_ftrace *rec;
349 	struct ftrace_page *pg;
350 	unsigned long s = (unsigned long)start;
351 	unsigned long e = s + size;
352 	int i;
353 
354 	if (ftrace_disabled || !start)
355 		return;
356 
357 	/* should not be called from interrupt context */
358 	spin_lock(&ftrace_lock);
359 
360 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
361 		for (i = 0; i < pg->index; i++) {
362 			rec = &pg->records[i];
363 
364 			if ((rec->ip >= s) && (rec->ip < e))
365 				ftrace_free_rec(rec);
366 		}
367 	}
368 	spin_unlock(&ftrace_lock);
369 }
370 
ftrace_alloc_dyn_node(unsigned long ip)371 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
372 {
373 	struct dyn_ftrace *rec;
374 
375 	/* First check for freed records */
376 	if (ftrace_free_records) {
377 		rec = ftrace_free_records;
378 
379 		if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
380 			FTRACE_WARN_ON_ONCE(1);
381 			ftrace_free_records = NULL;
382 			return NULL;
383 		}
384 
385 		ftrace_free_records = (void *)rec->ip;
386 		memset(rec, 0, sizeof(*rec));
387 		return rec;
388 	}
389 
390 	if (ftrace_pages->index == ENTRIES_PER_PAGE) {
391 		if (!ftrace_pages->next) {
392 			/* allocate another page */
393 			ftrace_pages->next =
394 				(void *)get_zeroed_page(GFP_KERNEL);
395 			if (!ftrace_pages->next)
396 				return NULL;
397 		}
398 		ftrace_pages = ftrace_pages->next;
399 	}
400 
401 	return &ftrace_pages->records[ftrace_pages->index++];
402 }
403 
404 static struct dyn_ftrace *
ftrace_record_ip(unsigned long ip)405 ftrace_record_ip(unsigned long ip)
406 {
407 	struct dyn_ftrace *rec;
408 
409 	if (ftrace_disabled)
410 		return NULL;
411 
412 	rec = ftrace_alloc_dyn_node(ip);
413 	if (!rec)
414 		return NULL;
415 
416 	rec->ip = ip;
417 
418 	list_add(&rec->list, &ftrace_new_addrs);
419 
420 	return rec;
421 }
422 
print_ip_ins(const char * fmt,unsigned char * p)423 static void print_ip_ins(const char *fmt, unsigned char *p)
424 {
425 	int i;
426 
427 	printk(KERN_CONT "%s", fmt);
428 
429 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
430 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
431 }
432 
ftrace_bug(int failed,unsigned long ip)433 static void ftrace_bug(int failed, unsigned long ip)
434 {
435 	switch (failed) {
436 	case -EFAULT:
437 		FTRACE_WARN_ON_ONCE(1);
438 		pr_info("ftrace faulted on modifying ");
439 		print_ip_sym(ip);
440 		break;
441 	case -EINVAL:
442 		FTRACE_WARN_ON_ONCE(1);
443 		pr_info("ftrace failed to modify ");
444 		print_ip_sym(ip);
445 		print_ip_ins(" actual: ", (unsigned char *)ip);
446 		printk(KERN_CONT "\n");
447 		break;
448 	case -EPERM:
449 		FTRACE_WARN_ON_ONCE(1);
450 		pr_info("ftrace faulted on writing ");
451 		print_ip_sym(ip);
452 		break;
453 	default:
454 		FTRACE_WARN_ON_ONCE(1);
455 		pr_info("ftrace faulted on unknown error ");
456 		print_ip_sym(ip);
457 	}
458 }
459 
460 
461 static int
__ftrace_replace_code(struct dyn_ftrace * rec,int enable)462 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
463 {
464 	unsigned long ip, fl;
465 	unsigned long ftrace_addr;
466 
467 	ftrace_addr = (unsigned long)ftrace_caller;
468 
469 	ip = rec->ip;
470 
471 	/*
472 	 * If this record is not to be traced and
473 	 * it is not enabled then do nothing.
474 	 *
475 	 * If this record is not to be traced and
476 	 * it is enabled then disabled it.
477 	 *
478 	 */
479 	if (rec->flags & FTRACE_FL_NOTRACE) {
480 		if (rec->flags & FTRACE_FL_ENABLED)
481 			rec->flags &= ~FTRACE_FL_ENABLED;
482 		else
483 			return 0;
484 
485 	} else if (ftrace_filtered && enable) {
486 		/*
487 		 * Filtering is on:
488 		 */
489 
490 		fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
491 
492 		/* Record is filtered and enabled, do nothing */
493 		if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
494 			return 0;
495 
496 		/* Record is not filtered and is not enabled do nothing */
497 		if (!fl)
498 			return 0;
499 
500 		/* Record is not filtered but enabled, disable it */
501 		if (fl == FTRACE_FL_ENABLED)
502 			rec->flags &= ~FTRACE_FL_ENABLED;
503 		else
504 		/* Otherwise record is filtered but not enabled, enable it */
505 			rec->flags |= FTRACE_FL_ENABLED;
506 	} else {
507 		/* Disable or not filtered */
508 
509 		if (enable) {
510 			/* if record is enabled, do nothing */
511 			if (rec->flags & FTRACE_FL_ENABLED)
512 				return 0;
513 
514 			rec->flags |= FTRACE_FL_ENABLED;
515 
516 		} else {
517 
518 			/* if record is not enabled do nothing */
519 			if (!(rec->flags & FTRACE_FL_ENABLED))
520 				return 0;
521 
522 			rec->flags &= ~FTRACE_FL_ENABLED;
523 		}
524 	}
525 
526 	if (rec->flags & FTRACE_FL_ENABLED)
527 		return ftrace_make_call(rec, ftrace_addr);
528 	else
529 		return ftrace_make_nop(NULL, rec, ftrace_addr);
530 }
531 
ftrace_replace_code(int enable)532 static void ftrace_replace_code(int enable)
533 {
534 	int i, failed;
535 	struct dyn_ftrace *rec;
536 	struct ftrace_page *pg;
537 
538 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
539 		for (i = 0; i < pg->index; i++) {
540 			rec = &pg->records[i];
541 
542 			/*
543 			 * Skip over free records and records that have
544 			 * failed.
545 			 */
546 			if (rec->flags & FTRACE_FL_FREE ||
547 			    rec->flags & FTRACE_FL_FAILED)
548 				continue;
549 
550 			/* ignore updates to this record's mcount site */
551 			if (get_kprobe((void *)rec->ip)) {
552 				freeze_record(rec);
553 				continue;
554 			} else {
555 				unfreeze_record(rec);
556 			}
557 
558 			failed = __ftrace_replace_code(rec, enable);
559 			if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
560 				rec->flags |= FTRACE_FL_FAILED;
561 				if ((system_state == SYSTEM_BOOTING) ||
562 				    !core_kernel_text(rec->ip)) {
563 					ftrace_free_rec(rec);
564 				} else
565 					ftrace_bug(failed, rec->ip);
566 			}
567 		}
568 	}
569 }
570 
571 static int
ftrace_code_disable(struct module * mod,struct dyn_ftrace * rec)572 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
573 {
574 	unsigned long ip;
575 	int ret;
576 
577 	ip = rec->ip;
578 
579 	ret = ftrace_make_nop(mod, rec, mcount_addr);
580 	if (ret) {
581 		ftrace_bug(ret, ip);
582 		rec->flags |= FTRACE_FL_FAILED;
583 		return 0;
584 	}
585 	return 1;
586 }
587 
__ftrace_modify_code(void * data)588 static int __ftrace_modify_code(void *data)
589 {
590 	int *command = data;
591 
592 	if (*command & FTRACE_ENABLE_CALLS)
593 		ftrace_replace_code(1);
594 	else if (*command & FTRACE_DISABLE_CALLS)
595 		ftrace_replace_code(0);
596 
597 	if (*command & FTRACE_UPDATE_TRACE_FUNC)
598 		ftrace_update_ftrace_func(ftrace_trace_function);
599 
600 	if (*command & FTRACE_START_FUNC_RET)
601 		ftrace_enable_ftrace_graph_caller();
602 	else if (*command & FTRACE_STOP_FUNC_RET)
603 		ftrace_disable_ftrace_graph_caller();
604 
605 	return 0;
606 }
607 
ftrace_run_update_code(int command)608 static void ftrace_run_update_code(int command)
609 {
610 	stop_machine(__ftrace_modify_code, &command, NULL);
611 }
612 
613 static ftrace_func_t saved_ftrace_func;
614 static int ftrace_start_up;
615 
ftrace_startup_enable(int command)616 static void ftrace_startup_enable(int command)
617 {
618 	if (saved_ftrace_func != ftrace_trace_function) {
619 		saved_ftrace_func = ftrace_trace_function;
620 		command |= FTRACE_UPDATE_TRACE_FUNC;
621 	}
622 
623 	if (!command || !ftrace_enabled)
624 		return;
625 
626 	ftrace_run_update_code(command);
627 }
628 
ftrace_startup(int command)629 static void ftrace_startup(int command)
630 {
631 	if (unlikely(ftrace_disabled))
632 		return;
633 
634 	mutex_lock(&ftrace_start_lock);
635 	ftrace_start_up++;
636 	command |= FTRACE_ENABLE_CALLS;
637 
638 	ftrace_startup_enable(command);
639 
640 	mutex_unlock(&ftrace_start_lock);
641 }
642 
ftrace_shutdown(int command)643 static void ftrace_shutdown(int command)
644 {
645 	if (unlikely(ftrace_disabled))
646 		return;
647 
648 	mutex_lock(&ftrace_start_lock);
649 	ftrace_start_up--;
650 	if (!ftrace_start_up)
651 		command |= FTRACE_DISABLE_CALLS;
652 
653 	if (saved_ftrace_func != ftrace_trace_function) {
654 		saved_ftrace_func = ftrace_trace_function;
655 		command |= FTRACE_UPDATE_TRACE_FUNC;
656 	}
657 
658 	if (!command || !ftrace_enabled)
659 		goto out;
660 
661 	ftrace_run_update_code(command);
662  out:
663 	mutex_unlock(&ftrace_start_lock);
664 }
665 
ftrace_startup_sysctl(void)666 static void ftrace_startup_sysctl(void)
667 {
668 	int command = FTRACE_ENABLE_MCOUNT;
669 
670 	if (unlikely(ftrace_disabled))
671 		return;
672 
673 	mutex_lock(&ftrace_start_lock);
674 	/* Force update next time */
675 	saved_ftrace_func = NULL;
676 	/* ftrace_start_up is true if we want ftrace running */
677 	if (ftrace_start_up)
678 		command |= FTRACE_ENABLE_CALLS;
679 
680 	ftrace_run_update_code(command);
681 	mutex_unlock(&ftrace_start_lock);
682 }
683 
ftrace_shutdown_sysctl(void)684 static void ftrace_shutdown_sysctl(void)
685 {
686 	int command = FTRACE_DISABLE_MCOUNT;
687 
688 	if (unlikely(ftrace_disabled))
689 		return;
690 
691 	mutex_lock(&ftrace_start_lock);
692 	/* ftrace_start_up is true if ftrace is running */
693 	if (ftrace_start_up)
694 		command |= FTRACE_DISABLE_CALLS;
695 
696 	ftrace_run_update_code(command);
697 	mutex_unlock(&ftrace_start_lock);
698 }
699 
700 static cycle_t		ftrace_update_time;
701 static unsigned long	ftrace_update_cnt;
702 unsigned long		ftrace_update_tot_cnt;
703 
ftrace_update_code(struct module * mod)704 static int ftrace_update_code(struct module *mod)
705 {
706 	struct dyn_ftrace *p, *t;
707 	cycle_t start, stop;
708 
709 	start = ftrace_now(raw_smp_processor_id());
710 	ftrace_update_cnt = 0;
711 
712 	list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
713 
714 		/* If something went wrong, bail without enabling anything */
715 		if (unlikely(ftrace_disabled))
716 			return -1;
717 
718 		list_del_init(&p->list);
719 
720 		/* convert record (i.e, patch mcount-call with NOP) */
721 		if (ftrace_code_disable(mod, p)) {
722 			p->flags |= FTRACE_FL_CONVERTED;
723 			ftrace_update_cnt++;
724 		} else
725 			ftrace_free_rec(p);
726 	}
727 
728 	stop = ftrace_now(raw_smp_processor_id());
729 	ftrace_update_time = stop - start;
730 	ftrace_update_tot_cnt += ftrace_update_cnt;
731 
732 	return 0;
733 }
734 
ftrace_dyn_table_alloc(unsigned long num_to_init)735 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
736 {
737 	struct ftrace_page *pg;
738 	int cnt;
739 	int i;
740 
741 	/* allocate a few pages */
742 	ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
743 	if (!ftrace_pages_start)
744 		return -1;
745 
746 	/*
747 	 * Allocate a few more pages.
748 	 *
749 	 * TODO: have some parser search vmlinux before
750 	 *   final linking to find all calls to ftrace.
751 	 *   Then we can:
752 	 *    a) know how many pages to allocate.
753 	 *     and/or
754 	 *    b) set up the table then.
755 	 *
756 	 *  The dynamic code is still necessary for
757 	 *  modules.
758 	 */
759 
760 	pg = ftrace_pages = ftrace_pages_start;
761 
762 	cnt = num_to_init / ENTRIES_PER_PAGE;
763 	pr_info("ftrace: allocating %ld entries in %d pages\n",
764 		num_to_init, cnt + 1);
765 
766 	for (i = 0; i < cnt; i++) {
767 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
768 
769 		/* If we fail, we'll try later anyway */
770 		if (!pg->next)
771 			break;
772 
773 		pg = pg->next;
774 	}
775 
776 	return 0;
777 }
778 
779 enum {
780 	FTRACE_ITER_FILTER	= (1 << 0),
781 	FTRACE_ITER_CONT	= (1 << 1),
782 	FTRACE_ITER_NOTRACE	= (1 << 2),
783 	FTRACE_ITER_FAILURES	= (1 << 3),
784 };
785 
786 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
787 
788 struct ftrace_iterator {
789 	struct ftrace_page	*pg;
790 	unsigned		idx;
791 	unsigned		flags;
792 	unsigned char		buffer[FTRACE_BUFF_MAX+1];
793 	unsigned		buffer_idx;
794 	unsigned		filtered;
795 };
796 
797 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)798 t_next(struct seq_file *m, void *v, loff_t *pos)
799 {
800 	struct ftrace_iterator *iter = m->private;
801 	struct dyn_ftrace *rec = NULL;
802 
803 	(*pos)++;
804 
805 	/* should not be called from interrupt context */
806 	spin_lock(&ftrace_lock);
807  retry:
808 	if (iter->idx >= iter->pg->index) {
809 		if (iter->pg->next) {
810 			iter->pg = iter->pg->next;
811 			iter->idx = 0;
812 			goto retry;
813 		} else {
814 			iter->idx = -1;
815 		}
816 	} else {
817 		rec = &iter->pg->records[iter->idx++];
818 		if ((rec->flags & FTRACE_FL_FREE) ||
819 
820 		    (!(iter->flags & FTRACE_ITER_FAILURES) &&
821 		     (rec->flags & FTRACE_FL_FAILED)) ||
822 
823 		    ((iter->flags & FTRACE_ITER_FAILURES) &&
824 		     !(rec->flags & FTRACE_FL_FAILED)) ||
825 
826 		    ((iter->flags & FTRACE_ITER_FILTER) &&
827 		     !(rec->flags & FTRACE_FL_FILTER)) ||
828 
829 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
830 		     !(rec->flags & FTRACE_FL_NOTRACE))) {
831 			rec = NULL;
832 			goto retry;
833 		}
834 	}
835 	spin_unlock(&ftrace_lock);
836 
837 	return rec;
838 }
839 
t_start(struct seq_file * m,loff_t * pos)840 static void *t_start(struct seq_file *m, loff_t *pos)
841 {
842 	struct ftrace_iterator *iter = m->private;
843 	void *p = NULL;
844 
845 	if (*pos > 0) {
846 		if (iter->idx < 0)
847 			return p;
848 		(*pos)--;
849 		iter->idx--;
850 	}
851 
852 	p = t_next(m, p, pos);
853 
854 	return p;
855 }
856 
t_stop(struct seq_file * m,void * p)857 static void t_stop(struct seq_file *m, void *p)
858 {
859 }
860 
t_show(struct seq_file * m,void * v)861 static int t_show(struct seq_file *m, void *v)
862 {
863 	struct dyn_ftrace *rec = v;
864 	char str[KSYM_SYMBOL_LEN];
865 
866 	if (!rec)
867 		return 0;
868 
869 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
870 
871 	seq_printf(m, "%s\n", str);
872 
873 	return 0;
874 }
875 
876 static struct seq_operations show_ftrace_seq_ops = {
877 	.start = t_start,
878 	.next = t_next,
879 	.stop = t_stop,
880 	.show = t_show,
881 };
882 
883 static int
ftrace_avail_open(struct inode * inode,struct file * file)884 ftrace_avail_open(struct inode *inode, struct file *file)
885 {
886 	struct ftrace_iterator *iter;
887 	int ret;
888 
889 	if (unlikely(ftrace_disabled))
890 		return -ENODEV;
891 
892 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
893 	if (!iter)
894 		return -ENOMEM;
895 
896 	iter->pg = ftrace_pages_start;
897 
898 	ret = seq_open(file, &show_ftrace_seq_ops);
899 	if (!ret) {
900 		struct seq_file *m = file->private_data;
901 
902 		m->private = iter;
903 	} else {
904 		kfree(iter);
905 	}
906 
907 	return ret;
908 }
909 
ftrace_avail_release(struct inode * inode,struct file * file)910 int ftrace_avail_release(struct inode *inode, struct file *file)
911 {
912 	struct seq_file *m = (struct seq_file *)file->private_data;
913 	struct ftrace_iterator *iter = m->private;
914 
915 	seq_release(inode, file);
916 	kfree(iter);
917 
918 	return 0;
919 }
920 
921 static int
ftrace_failures_open(struct inode * inode,struct file * file)922 ftrace_failures_open(struct inode *inode, struct file *file)
923 {
924 	int ret;
925 	struct seq_file *m;
926 	struct ftrace_iterator *iter;
927 
928 	ret = ftrace_avail_open(inode, file);
929 	if (!ret) {
930 		m = (struct seq_file *)file->private_data;
931 		iter = (struct ftrace_iterator *)m->private;
932 		iter->flags = FTRACE_ITER_FAILURES;
933 	}
934 
935 	return ret;
936 }
937 
938 
ftrace_filter_reset(int enable)939 static void ftrace_filter_reset(int enable)
940 {
941 	struct ftrace_page *pg;
942 	struct dyn_ftrace *rec;
943 	unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
944 	unsigned i;
945 
946 	/* should not be called from interrupt context */
947 	spin_lock(&ftrace_lock);
948 	if (enable)
949 		ftrace_filtered = 0;
950 	pg = ftrace_pages_start;
951 	while (pg) {
952 		for (i = 0; i < pg->index; i++) {
953 			rec = &pg->records[i];
954 			if (rec->flags & FTRACE_FL_FAILED)
955 				continue;
956 			rec->flags &= ~type;
957 		}
958 		pg = pg->next;
959 	}
960 	spin_unlock(&ftrace_lock);
961 }
962 
963 static int
ftrace_regex_open(struct inode * inode,struct file * file,int enable)964 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
965 {
966 	struct ftrace_iterator *iter;
967 	int ret = 0;
968 
969 	if (unlikely(ftrace_disabled))
970 		return -ENODEV;
971 
972 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
973 	if (!iter)
974 		return -ENOMEM;
975 
976 	mutex_lock(&ftrace_regex_lock);
977 	if ((file->f_mode & FMODE_WRITE) &&
978 	    !(file->f_flags & O_APPEND))
979 		ftrace_filter_reset(enable);
980 
981 	if (file->f_mode & FMODE_READ) {
982 		iter->pg = ftrace_pages_start;
983 		iter->flags = enable ? FTRACE_ITER_FILTER :
984 			FTRACE_ITER_NOTRACE;
985 
986 		ret = seq_open(file, &show_ftrace_seq_ops);
987 		if (!ret) {
988 			struct seq_file *m = file->private_data;
989 			m->private = iter;
990 		} else
991 			kfree(iter);
992 	} else
993 		file->private_data = iter;
994 	mutex_unlock(&ftrace_regex_lock);
995 
996 	return ret;
997 }
998 
999 static int
ftrace_filter_open(struct inode * inode,struct file * file)1000 ftrace_filter_open(struct inode *inode, struct file *file)
1001 {
1002 	return ftrace_regex_open(inode, file, 1);
1003 }
1004 
1005 static int
ftrace_notrace_open(struct inode * inode,struct file * file)1006 ftrace_notrace_open(struct inode *inode, struct file *file)
1007 {
1008 	return ftrace_regex_open(inode, file, 0);
1009 }
1010 
1011 static ssize_t
ftrace_regex_read(struct file * file,char __user * ubuf,size_t cnt,loff_t * ppos)1012 ftrace_regex_read(struct file *file, char __user *ubuf,
1013 		       size_t cnt, loff_t *ppos)
1014 {
1015 	if (file->f_mode & FMODE_READ)
1016 		return seq_read(file, ubuf, cnt, ppos);
1017 	else
1018 		return -EPERM;
1019 }
1020 
1021 static loff_t
ftrace_regex_lseek(struct file * file,loff_t offset,int origin)1022 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1023 {
1024 	loff_t ret;
1025 
1026 	if (file->f_mode & FMODE_READ)
1027 		ret = seq_lseek(file, offset, origin);
1028 	else
1029 		file->f_pos = ret = 1;
1030 
1031 	return ret;
1032 }
1033 
1034 enum {
1035 	MATCH_FULL,
1036 	MATCH_FRONT_ONLY,
1037 	MATCH_MIDDLE_ONLY,
1038 	MATCH_END_ONLY,
1039 };
1040 
1041 static void
ftrace_match(unsigned char * buff,int len,int enable)1042 ftrace_match(unsigned char *buff, int len, int enable)
1043 {
1044 	char str[KSYM_SYMBOL_LEN];
1045 	char *search = NULL;
1046 	struct ftrace_page *pg;
1047 	struct dyn_ftrace *rec;
1048 	int type = MATCH_FULL;
1049 	unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1050 	unsigned i, match = 0, search_len = 0;
1051 	int not = 0;
1052 
1053 	if (buff[0] == '!') {
1054 		not = 1;
1055 		buff++;
1056 		len--;
1057 	}
1058 
1059 	for (i = 0; i < len; i++) {
1060 		if (buff[i] == '*') {
1061 			if (!i) {
1062 				search = buff + i + 1;
1063 				type = MATCH_END_ONLY;
1064 				search_len = len - (i + 1);
1065 			} else {
1066 				if (type == MATCH_END_ONLY) {
1067 					type = MATCH_MIDDLE_ONLY;
1068 				} else {
1069 					match = i;
1070 					type = MATCH_FRONT_ONLY;
1071 				}
1072 				buff[i] = 0;
1073 				break;
1074 			}
1075 		}
1076 	}
1077 
1078 	/* should not be called from interrupt context */
1079 	spin_lock(&ftrace_lock);
1080 	if (enable)
1081 		ftrace_filtered = 1;
1082 	pg = ftrace_pages_start;
1083 	while (pg) {
1084 		for (i = 0; i < pg->index; i++) {
1085 			int matched = 0;
1086 			char *ptr;
1087 
1088 			rec = &pg->records[i];
1089 			if (rec->flags & FTRACE_FL_FAILED)
1090 				continue;
1091 			kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1092 			switch (type) {
1093 			case MATCH_FULL:
1094 				if (strcmp(str, buff) == 0)
1095 					matched = 1;
1096 				break;
1097 			case MATCH_FRONT_ONLY:
1098 				if (memcmp(str, buff, match) == 0)
1099 					matched = 1;
1100 				break;
1101 			case MATCH_MIDDLE_ONLY:
1102 				if (strstr(str, search))
1103 					matched = 1;
1104 				break;
1105 			case MATCH_END_ONLY:
1106 				ptr = strstr(str, search);
1107 				if (ptr && (ptr[search_len] == 0))
1108 					matched = 1;
1109 				break;
1110 			}
1111 			if (matched) {
1112 				if (not)
1113 					rec->flags &= ~flag;
1114 				else
1115 					rec->flags |= flag;
1116 			}
1117 		}
1118 		pg = pg->next;
1119 	}
1120 	spin_unlock(&ftrace_lock);
1121 }
1122 
1123 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)1124 ftrace_regex_write(struct file *file, const char __user *ubuf,
1125 		   size_t cnt, loff_t *ppos, int enable)
1126 {
1127 	struct ftrace_iterator *iter;
1128 	char ch;
1129 	size_t read = 0;
1130 	ssize_t ret;
1131 
1132 	if (!cnt || cnt < 0)
1133 		return 0;
1134 
1135 	mutex_lock(&ftrace_regex_lock);
1136 
1137 	if (file->f_mode & FMODE_READ) {
1138 		struct seq_file *m = file->private_data;
1139 		iter = m->private;
1140 	} else
1141 		iter = file->private_data;
1142 
1143 	if (!*ppos) {
1144 		iter->flags &= ~FTRACE_ITER_CONT;
1145 		iter->buffer_idx = 0;
1146 	}
1147 
1148 	ret = get_user(ch, ubuf++);
1149 	if (ret)
1150 		goto out;
1151 	read++;
1152 	cnt--;
1153 
1154 	if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1155 		/* skip white space */
1156 		while (cnt && isspace(ch)) {
1157 			ret = get_user(ch, ubuf++);
1158 			if (ret)
1159 				goto out;
1160 			read++;
1161 			cnt--;
1162 		}
1163 
1164 		if (isspace(ch)) {
1165 			file->f_pos += read;
1166 			ret = read;
1167 			goto out;
1168 		}
1169 
1170 		iter->buffer_idx = 0;
1171 	}
1172 
1173 	while (cnt && !isspace(ch)) {
1174 		if (iter->buffer_idx < FTRACE_BUFF_MAX)
1175 			iter->buffer[iter->buffer_idx++] = ch;
1176 		else {
1177 			ret = -EINVAL;
1178 			goto out;
1179 		}
1180 		ret = get_user(ch, ubuf++);
1181 		if (ret)
1182 			goto out;
1183 		read++;
1184 		cnt--;
1185 	}
1186 
1187 	if (isspace(ch)) {
1188 		iter->filtered++;
1189 		iter->buffer[iter->buffer_idx] = 0;
1190 		ftrace_match(iter->buffer, iter->buffer_idx, enable);
1191 		iter->buffer_idx = 0;
1192 	} else
1193 		iter->flags |= FTRACE_ITER_CONT;
1194 
1195 
1196 	file->f_pos += read;
1197 
1198 	ret = read;
1199  out:
1200 	mutex_unlock(&ftrace_regex_lock);
1201 
1202 	return ret;
1203 }
1204 
1205 static ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1206 ftrace_filter_write(struct file *file, const char __user *ubuf,
1207 		    size_t cnt, loff_t *ppos)
1208 {
1209 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1210 }
1211 
1212 static ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1213 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1214 		     size_t cnt, loff_t *ppos)
1215 {
1216 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1217 }
1218 
1219 static void
ftrace_set_regex(unsigned char * buf,int len,int reset,int enable)1220 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1221 {
1222 	if (unlikely(ftrace_disabled))
1223 		return;
1224 
1225 	mutex_lock(&ftrace_regex_lock);
1226 	if (reset)
1227 		ftrace_filter_reset(enable);
1228 	if (buf)
1229 		ftrace_match(buf, len, enable);
1230 	mutex_unlock(&ftrace_regex_lock);
1231 }
1232 
1233 /**
1234  * ftrace_set_filter - set a function to filter on in ftrace
1235  * @buf - the string that holds the function filter text.
1236  * @len - the length of the string.
1237  * @reset - non zero to reset all filters before applying this filter.
1238  *
1239  * Filters denote which functions should be enabled when tracing is enabled.
1240  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1241  */
ftrace_set_filter(unsigned char * buf,int len,int reset)1242 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1243 {
1244 	ftrace_set_regex(buf, len, reset, 1);
1245 }
1246 
1247 /**
1248  * ftrace_set_notrace - set a function to not trace in ftrace
1249  * @buf - the string that holds the function notrace text.
1250  * @len - the length of the string.
1251  * @reset - non zero to reset all filters before applying this filter.
1252  *
1253  * Notrace Filters denote which functions should not be enabled when tracing
1254  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1255  * for tracing.
1256  */
ftrace_set_notrace(unsigned char * buf,int len,int reset)1257 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1258 {
1259 	ftrace_set_regex(buf, len, reset, 0);
1260 }
1261 
1262 static int
ftrace_regex_release(struct inode * inode,struct file * file,int enable)1263 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1264 {
1265 	struct seq_file *m = (struct seq_file *)file->private_data;
1266 	struct ftrace_iterator *iter;
1267 
1268 	mutex_lock(&ftrace_regex_lock);
1269 	if (file->f_mode & FMODE_READ) {
1270 		iter = m->private;
1271 
1272 		seq_release(inode, file);
1273 	} else
1274 		iter = file->private_data;
1275 
1276 	if (iter->buffer_idx) {
1277 		iter->filtered++;
1278 		iter->buffer[iter->buffer_idx] = 0;
1279 		ftrace_match(iter->buffer, iter->buffer_idx, enable);
1280 	}
1281 
1282 	mutex_lock(&ftrace_sysctl_lock);
1283 	mutex_lock(&ftrace_start_lock);
1284 	if (ftrace_start_up && ftrace_enabled)
1285 		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1286 	mutex_unlock(&ftrace_start_lock);
1287 	mutex_unlock(&ftrace_sysctl_lock);
1288 
1289 	kfree(iter);
1290 	mutex_unlock(&ftrace_regex_lock);
1291 	return 0;
1292 }
1293 
1294 static int
ftrace_filter_release(struct inode * inode,struct file * file)1295 ftrace_filter_release(struct inode *inode, struct file *file)
1296 {
1297 	return ftrace_regex_release(inode, file, 1);
1298 }
1299 
1300 static int
ftrace_notrace_release(struct inode * inode,struct file * file)1301 ftrace_notrace_release(struct inode *inode, struct file *file)
1302 {
1303 	return ftrace_regex_release(inode, file, 0);
1304 }
1305 
1306 static struct file_operations ftrace_avail_fops = {
1307 	.open = ftrace_avail_open,
1308 	.read = seq_read,
1309 	.llseek = seq_lseek,
1310 	.release = ftrace_avail_release,
1311 };
1312 
1313 static struct file_operations ftrace_failures_fops = {
1314 	.open = ftrace_failures_open,
1315 	.read = seq_read,
1316 	.llseek = seq_lseek,
1317 	.release = ftrace_avail_release,
1318 };
1319 
1320 static struct file_operations ftrace_filter_fops = {
1321 	.open = ftrace_filter_open,
1322 	.read = ftrace_regex_read,
1323 	.write = ftrace_filter_write,
1324 	.llseek = ftrace_regex_lseek,
1325 	.release = ftrace_filter_release,
1326 };
1327 
1328 static struct file_operations ftrace_notrace_fops = {
1329 	.open = ftrace_notrace_open,
1330 	.read = ftrace_regex_read,
1331 	.write = ftrace_notrace_write,
1332 	.llseek = ftrace_regex_lseek,
1333 	.release = ftrace_notrace_release,
1334 };
1335 
1336 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1337 
1338 static DEFINE_MUTEX(graph_lock);
1339 
1340 int ftrace_graph_count;
1341 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1342 
1343 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)1344 g_next(struct seq_file *m, void *v, loff_t *pos)
1345 {
1346 	unsigned long *array = m->private;
1347 	int index = *pos;
1348 
1349 	(*pos)++;
1350 
1351 	if (index >= ftrace_graph_count)
1352 		return NULL;
1353 
1354 	return &array[index];
1355 }
1356 
g_start(struct seq_file * m,loff_t * pos)1357 static void *g_start(struct seq_file *m, loff_t *pos)
1358 {
1359 	void *p = NULL;
1360 
1361 	mutex_lock(&graph_lock);
1362 
1363 	p = g_next(m, p, pos);
1364 
1365 	return p;
1366 }
1367 
g_stop(struct seq_file * m,void * p)1368 static void g_stop(struct seq_file *m, void *p)
1369 {
1370 	mutex_unlock(&graph_lock);
1371 }
1372 
g_show(struct seq_file * m,void * v)1373 static int g_show(struct seq_file *m, void *v)
1374 {
1375 	unsigned long *ptr = v;
1376 	char str[KSYM_SYMBOL_LEN];
1377 
1378 	if (!ptr)
1379 		return 0;
1380 
1381 	kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1382 
1383 	seq_printf(m, "%s\n", str);
1384 
1385 	return 0;
1386 }
1387 
1388 static struct seq_operations ftrace_graph_seq_ops = {
1389 	.start = g_start,
1390 	.next = g_next,
1391 	.stop = g_stop,
1392 	.show = g_show,
1393 };
1394 
1395 static int
ftrace_graph_open(struct inode * inode,struct file * file)1396 ftrace_graph_open(struct inode *inode, struct file *file)
1397 {
1398 	int ret = 0;
1399 
1400 	if (unlikely(ftrace_disabled))
1401 		return -ENODEV;
1402 
1403 	mutex_lock(&graph_lock);
1404 	if ((file->f_mode & FMODE_WRITE) &&
1405 	    !(file->f_flags & O_APPEND)) {
1406 		ftrace_graph_count = 0;
1407 		memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1408 	}
1409 
1410 	if (file->f_mode & FMODE_READ) {
1411 		ret = seq_open(file, &ftrace_graph_seq_ops);
1412 		if (!ret) {
1413 			struct seq_file *m = file->private_data;
1414 			m->private = ftrace_graph_funcs;
1415 		}
1416 	} else
1417 		file->private_data = ftrace_graph_funcs;
1418 	mutex_unlock(&graph_lock);
1419 
1420 	return ret;
1421 }
1422 
1423 static ssize_t
ftrace_graph_read(struct file * file,char __user * ubuf,size_t cnt,loff_t * ppos)1424 ftrace_graph_read(struct file *file, char __user *ubuf,
1425 		       size_t cnt, loff_t *ppos)
1426 {
1427 	if (file->f_mode & FMODE_READ)
1428 		return seq_read(file, ubuf, cnt, ppos);
1429 	else
1430 		return -EPERM;
1431 }
1432 
1433 static int
ftrace_set_func(unsigned long * array,int idx,char * buffer)1434 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1435 {
1436 	char str[KSYM_SYMBOL_LEN];
1437 	struct dyn_ftrace *rec;
1438 	struct ftrace_page *pg;
1439 	int found = 0;
1440 	int i, j;
1441 
1442 	if (ftrace_disabled)
1443 		return -ENODEV;
1444 
1445 	/* should not be called from interrupt context */
1446 	spin_lock(&ftrace_lock);
1447 
1448 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1449 		for (i = 0; i < pg->index; i++) {
1450 			rec = &pg->records[i];
1451 
1452 			if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1453 				continue;
1454 
1455 			kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1456 			if (strcmp(str, buffer) == 0) {
1457 				found = 1;
1458 				for (j = 0; j < idx; j++)
1459 					if (array[j] == rec->ip) {
1460 						found = 0;
1461 						break;
1462 					}
1463 				if (found)
1464 					array[idx] = rec->ip;
1465 				break;
1466 			}
1467 		}
1468 	}
1469 	spin_unlock(&ftrace_lock);
1470 
1471 	return found ? 0 : -EINVAL;
1472 }
1473 
1474 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1475 ftrace_graph_write(struct file *file, const char __user *ubuf,
1476 		   size_t cnt, loff_t *ppos)
1477 {
1478 	unsigned char buffer[FTRACE_BUFF_MAX+1];
1479 	unsigned long *array;
1480 	size_t read = 0;
1481 	ssize_t ret;
1482 	int index = 0;
1483 	char ch;
1484 
1485 	if (!cnt || cnt < 0)
1486 		return 0;
1487 
1488 	mutex_lock(&graph_lock);
1489 
1490 	if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1491 		ret = -EBUSY;
1492 		goto out;
1493 	}
1494 
1495 	if (file->f_mode & FMODE_READ) {
1496 		struct seq_file *m = file->private_data;
1497 		array = m->private;
1498 	} else
1499 		array = file->private_data;
1500 
1501 	ret = get_user(ch, ubuf++);
1502 	if (ret)
1503 		goto out;
1504 	read++;
1505 	cnt--;
1506 
1507 	/* skip white space */
1508 	while (cnt && isspace(ch)) {
1509 		ret = get_user(ch, ubuf++);
1510 		if (ret)
1511 			goto out;
1512 		read++;
1513 		cnt--;
1514 	}
1515 
1516 	if (isspace(ch)) {
1517 		*ppos += read;
1518 		ret = read;
1519 		goto out;
1520 	}
1521 
1522 	while (cnt && !isspace(ch)) {
1523 		if (index < FTRACE_BUFF_MAX)
1524 			buffer[index++] = ch;
1525 		else {
1526 			ret = -EINVAL;
1527 			goto out;
1528 		}
1529 		ret = get_user(ch, ubuf++);
1530 		if (ret)
1531 			goto out;
1532 		read++;
1533 		cnt--;
1534 	}
1535 	buffer[index] = 0;
1536 
1537 	/* we allow only one at a time */
1538 	ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1539 	if (ret)
1540 		goto out;
1541 
1542 	ftrace_graph_count++;
1543 
1544 	file->f_pos += read;
1545 
1546 	ret = read;
1547  out:
1548 	mutex_unlock(&graph_lock);
1549 
1550 	return ret;
1551 }
1552 
1553 static const struct file_operations ftrace_graph_fops = {
1554 	.open = ftrace_graph_open,
1555 	.read = ftrace_graph_read,
1556 	.write = ftrace_graph_write,
1557 };
1558 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1559 
ftrace_init_dyn_debugfs(struct dentry * d_tracer)1560 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1561 {
1562 	struct dentry *entry;
1563 
1564 	entry = debugfs_create_file("available_filter_functions", 0444,
1565 				    d_tracer, NULL, &ftrace_avail_fops);
1566 	if (!entry)
1567 		pr_warning("Could not create debugfs "
1568 			   "'available_filter_functions' entry\n");
1569 
1570 	entry = debugfs_create_file("failures", 0444,
1571 				    d_tracer, NULL, &ftrace_failures_fops);
1572 	if (!entry)
1573 		pr_warning("Could not create debugfs 'failures' entry\n");
1574 
1575 	entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1576 				    NULL, &ftrace_filter_fops);
1577 	if (!entry)
1578 		pr_warning("Could not create debugfs "
1579 			   "'set_ftrace_filter' entry\n");
1580 
1581 	entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1582 				    NULL, &ftrace_notrace_fops);
1583 	if (!entry)
1584 		pr_warning("Could not create debugfs "
1585 			   "'set_ftrace_notrace' entry\n");
1586 
1587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1588 	entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1589 				    NULL,
1590 				    &ftrace_graph_fops);
1591 	if (!entry)
1592 		pr_warning("Could not create debugfs "
1593 			   "'set_graph_function' entry\n");
1594 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1595 
1596 	return 0;
1597 }
1598 
ftrace_convert_nops(struct module * mod,unsigned long * start,unsigned long * end)1599 static int ftrace_convert_nops(struct module *mod,
1600 			       unsigned long *start,
1601 			       unsigned long *end)
1602 {
1603 	unsigned long *p;
1604 	unsigned long addr;
1605 	unsigned long flags;
1606 
1607 	mutex_lock(&ftrace_start_lock);
1608 	p = start;
1609 	while (p < end) {
1610 		addr = ftrace_call_adjust(*p++);
1611 		/*
1612 		 * Some architecture linkers will pad between
1613 		 * the different mcount_loc sections of different
1614 		 * object files to satisfy alignments.
1615 		 * Skip any NULL pointers.
1616 		 */
1617 		if (!addr)
1618 			continue;
1619 		ftrace_record_ip(addr);
1620 	}
1621 
1622 	/* disable interrupts to prevent kstop machine */
1623 	local_irq_save(flags);
1624 	ftrace_update_code(mod);
1625 	local_irq_restore(flags);
1626 	mutex_unlock(&ftrace_start_lock);
1627 
1628 	return 0;
1629 }
1630 
ftrace_init_module(struct module * mod,unsigned long * start,unsigned long * end)1631 void ftrace_init_module(struct module *mod,
1632 			unsigned long *start, unsigned long *end)
1633 {
1634 	if (ftrace_disabled || start == end)
1635 		return;
1636 	ftrace_convert_nops(mod, start, end);
1637 }
1638 
1639 extern unsigned long __start_mcount_loc[];
1640 extern unsigned long __stop_mcount_loc[];
1641 
ftrace_init(void)1642 void __init ftrace_init(void)
1643 {
1644 	unsigned long count, addr, flags;
1645 	int ret;
1646 
1647 	/* Keep the ftrace pointer to the stub */
1648 	addr = (unsigned long)ftrace_stub;
1649 
1650 	local_irq_save(flags);
1651 	ftrace_dyn_arch_init(&addr);
1652 	local_irq_restore(flags);
1653 
1654 	/* ftrace_dyn_arch_init places the return code in addr */
1655 	if (addr)
1656 		goto failed;
1657 
1658 	count = __stop_mcount_loc - __start_mcount_loc;
1659 
1660 	ret = ftrace_dyn_table_alloc(count);
1661 	if (ret)
1662 		goto failed;
1663 
1664 	last_ftrace_enabled = ftrace_enabled = 1;
1665 
1666 	ret = ftrace_convert_nops(NULL,
1667 				  __start_mcount_loc,
1668 				  __stop_mcount_loc);
1669 
1670 	return;
1671  failed:
1672 	ftrace_disabled = 1;
1673 }
1674 
1675 #else
1676 
ftrace_nodyn_init(void)1677 static int __init ftrace_nodyn_init(void)
1678 {
1679 	ftrace_enabled = 1;
1680 	return 0;
1681 }
1682 device_initcall(ftrace_nodyn_init);
1683 
ftrace_init_dyn_debugfs(struct dentry * d_tracer)1684 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
ftrace_startup_enable(int command)1685 static inline void ftrace_startup_enable(int command) { }
1686 /* Keep as macros so we do not need to define the commands */
1687 # define ftrace_startup(command)	do { } while (0)
1688 # define ftrace_shutdown(command)	do { } while (0)
1689 # define ftrace_startup_sysctl()	do { } while (0)
1690 # define ftrace_shutdown_sysctl()	do { } while (0)
1691 #endif /* CONFIG_DYNAMIC_FTRACE */
1692 
1693 static ssize_t
ftrace_pid_read(struct file * file,char __user * ubuf,size_t cnt,loff_t * ppos)1694 ftrace_pid_read(struct file *file, char __user *ubuf,
1695 		       size_t cnt, loff_t *ppos)
1696 {
1697 	char buf[64];
1698 	int r;
1699 
1700 	if (ftrace_pid_trace == ftrace_swapper_pid)
1701 		r = sprintf(buf, "swapper tasks\n");
1702 	else if (ftrace_pid_trace)
1703 		r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1704 	else
1705 		r = sprintf(buf, "no pid\n");
1706 
1707 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1708 }
1709 
clear_ftrace_swapper(void)1710 static void clear_ftrace_swapper(void)
1711 {
1712 	struct task_struct *p;
1713 	int cpu;
1714 
1715 	get_online_cpus();
1716 	for_each_online_cpu(cpu) {
1717 		p = idle_task(cpu);
1718 		clear_tsk_trace_trace(p);
1719 	}
1720 	put_online_cpus();
1721 }
1722 
set_ftrace_swapper(void)1723 static void set_ftrace_swapper(void)
1724 {
1725 	struct task_struct *p;
1726 	int cpu;
1727 
1728 	get_online_cpus();
1729 	for_each_online_cpu(cpu) {
1730 		p = idle_task(cpu);
1731 		set_tsk_trace_trace(p);
1732 	}
1733 	put_online_cpus();
1734 }
1735 
clear_ftrace_pid(struct pid * pid)1736 static void clear_ftrace_pid(struct pid *pid)
1737 {
1738 	struct task_struct *p;
1739 
1740 	rcu_read_lock();
1741 	do_each_pid_task(pid, PIDTYPE_PID, p) {
1742 		clear_tsk_trace_trace(p);
1743 	} while_each_pid_task(pid, PIDTYPE_PID, p);
1744 	rcu_read_unlock();
1745 
1746 	put_pid(pid);
1747 }
1748 
set_ftrace_pid(struct pid * pid)1749 static void set_ftrace_pid(struct pid *pid)
1750 {
1751 	struct task_struct *p;
1752 
1753 	rcu_read_lock();
1754 	do_each_pid_task(pid, PIDTYPE_PID, p) {
1755 		set_tsk_trace_trace(p);
1756 	} while_each_pid_task(pid, PIDTYPE_PID, p);
1757 	rcu_read_unlock();
1758 }
1759 
clear_ftrace_pid_task(struct pid ** pid)1760 static void clear_ftrace_pid_task(struct pid **pid)
1761 {
1762 	if (*pid == ftrace_swapper_pid)
1763 		clear_ftrace_swapper();
1764 	else
1765 		clear_ftrace_pid(*pid);
1766 
1767 	*pid = NULL;
1768 }
1769 
set_ftrace_pid_task(struct pid * pid)1770 static void set_ftrace_pid_task(struct pid *pid)
1771 {
1772 	if (pid == ftrace_swapper_pid)
1773 		set_ftrace_swapper();
1774 	else
1775 		set_ftrace_pid(pid);
1776 }
1777 
1778 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1779 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1780 		   size_t cnt, loff_t *ppos)
1781 {
1782 	struct pid *pid;
1783 	char buf[64];
1784 	long val;
1785 	int ret;
1786 
1787 	if (cnt >= sizeof(buf))
1788 		return -EINVAL;
1789 
1790 	if (copy_from_user(&buf, ubuf, cnt))
1791 		return -EFAULT;
1792 
1793 	buf[cnt] = 0;
1794 
1795 	ret = strict_strtol(buf, 10, &val);
1796 	if (ret < 0)
1797 		return ret;
1798 
1799 	mutex_lock(&ftrace_start_lock);
1800 	if (val < 0) {
1801 		/* disable pid tracing */
1802 		if (!ftrace_pid_trace)
1803 			goto out;
1804 
1805 		clear_ftrace_pid_task(&ftrace_pid_trace);
1806 
1807 	} else {
1808 		/* swapper task is special */
1809 		if (!val) {
1810 			pid = ftrace_swapper_pid;
1811 			if (pid == ftrace_pid_trace)
1812 				goto out;
1813 		} else {
1814 			pid = find_get_pid(val);
1815 
1816 			if (pid == ftrace_pid_trace) {
1817 				put_pid(pid);
1818 				goto out;
1819 			}
1820 		}
1821 
1822 		if (ftrace_pid_trace)
1823 			clear_ftrace_pid_task(&ftrace_pid_trace);
1824 
1825 		if (!pid)
1826 			goto out;
1827 
1828 		ftrace_pid_trace = pid;
1829 
1830 		set_ftrace_pid_task(ftrace_pid_trace);
1831 	}
1832 
1833 	/* update the function call */
1834 	ftrace_update_pid_func();
1835 	ftrace_startup_enable(0);
1836 
1837  out:
1838 	mutex_unlock(&ftrace_start_lock);
1839 
1840 	return cnt;
1841 }
1842 
1843 static struct file_operations ftrace_pid_fops = {
1844 	.read = ftrace_pid_read,
1845 	.write = ftrace_pid_write,
1846 };
1847 
ftrace_init_debugfs(void)1848 static __init int ftrace_init_debugfs(void)
1849 {
1850 	struct dentry *d_tracer;
1851 	struct dentry *entry;
1852 
1853 	d_tracer = tracing_init_dentry();
1854 	if (!d_tracer)
1855 		return 0;
1856 
1857 	ftrace_init_dyn_debugfs(d_tracer);
1858 
1859 	entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1860 				    NULL, &ftrace_pid_fops);
1861 	if (!entry)
1862 		pr_warning("Could not create debugfs "
1863 			   "'set_ftrace_pid' entry\n");
1864 	return 0;
1865 }
1866 
1867 fs_initcall(ftrace_init_debugfs);
1868 
1869 /**
1870  * ftrace_kill - kill ftrace
1871  *
1872  * This function should be used by panic code. It stops ftrace
1873  * but in a not so nice way. If you need to simply kill ftrace
1874  * from a non-atomic section, use ftrace_kill.
1875  */
ftrace_kill(void)1876 void ftrace_kill(void)
1877 {
1878 	ftrace_disabled = 1;
1879 	ftrace_enabled = 0;
1880 	clear_ftrace_function();
1881 }
1882 
1883 /**
1884  * register_ftrace_function - register a function for profiling
1885  * @ops - ops structure that holds the function for profiling.
1886  *
1887  * Register a function to be called by all functions in the
1888  * kernel.
1889  *
1890  * Note: @ops->func and all the functions it calls must be labeled
1891  *       with "notrace", otherwise it will go into a
1892  *       recursive loop.
1893  */
register_ftrace_function(struct ftrace_ops * ops)1894 int register_ftrace_function(struct ftrace_ops *ops)
1895 {
1896 	int ret;
1897 
1898 	if (unlikely(ftrace_disabled))
1899 		return -1;
1900 
1901 	mutex_lock(&ftrace_sysctl_lock);
1902 
1903 	ret = __register_ftrace_function(ops);
1904 	ftrace_startup(0);
1905 
1906 	mutex_unlock(&ftrace_sysctl_lock);
1907 	return ret;
1908 }
1909 
1910 /**
1911  * unregister_ftrace_function - unresgister a function for profiling.
1912  * @ops - ops structure that holds the function to unregister
1913  *
1914  * Unregister a function that was added to be called by ftrace profiling.
1915  */
unregister_ftrace_function(struct ftrace_ops * ops)1916 int unregister_ftrace_function(struct ftrace_ops *ops)
1917 {
1918 	int ret;
1919 
1920 	mutex_lock(&ftrace_sysctl_lock);
1921 	ret = __unregister_ftrace_function(ops);
1922 	ftrace_shutdown(0);
1923 	mutex_unlock(&ftrace_sysctl_lock);
1924 
1925 	return ret;
1926 }
1927 
1928 int
ftrace_enable_sysctl(struct ctl_table * table,int write,struct file * file,void __user * buffer,size_t * lenp,loff_t * ppos)1929 ftrace_enable_sysctl(struct ctl_table *table, int write,
1930 		     struct file *file, void __user *buffer, size_t *lenp,
1931 		     loff_t *ppos)
1932 {
1933 	int ret;
1934 
1935 	if (unlikely(ftrace_disabled))
1936 		return -ENODEV;
1937 
1938 	mutex_lock(&ftrace_sysctl_lock);
1939 
1940 	ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1941 
1942 	if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1943 		goto out;
1944 
1945 	last_ftrace_enabled = ftrace_enabled;
1946 
1947 	if (ftrace_enabled) {
1948 
1949 		ftrace_startup_sysctl();
1950 
1951 		/* we are starting ftrace again */
1952 		if (ftrace_list != &ftrace_list_end) {
1953 			if (ftrace_list->next == &ftrace_list_end)
1954 				ftrace_trace_function = ftrace_list->func;
1955 			else
1956 				ftrace_trace_function = ftrace_list_func;
1957 		}
1958 
1959 	} else {
1960 		/* stopping ftrace calls (just send to ftrace_stub) */
1961 		ftrace_trace_function = ftrace_stub;
1962 
1963 		ftrace_shutdown_sysctl();
1964 	}
1965 
1966  out:
1967 	mutex_unlock(&ftrace_sysctl_lock);
1968 	return ret;
1969 }
1970 
1971 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1972 
1973 static atomic_t ftrace_graph_active;
1974 static struct notifier_block ftrace_suspend_notifier;
1975 
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace)1976 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1977 {
1978 	return 0;
1979 }
1980 
1981 /* The callbacks that hook a function */
1982 trace_func_graph_ret_t ftrace_graph_return =
1983 			(trace_func_graph_ret_t)ftrace_stub;
1984 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1985 
1986 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(struct ftrace_ret_stack ** ret_stack_list)1987 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1988 {
1989 	int i;
1990 	int ret = 0;
1991 	unsigned long flags;
1992 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1993 	struct task_struct *g, *t;
1994 
1995 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1996 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1997 					* sizeof(struct ftrace_ret_stack),
1998 					GFP_KERNEL);
1999 		if (!ret_stack_list[i]) {
2000 			start = 0;
2001 			end = i;
2002 			ret = -ENOMEM;
2003 			goto free;
2004 		}
2005 	}
2006 
2007 	read_lock_irqsave(&tasklist_lock, flags);
2008 	do_each_thread(g, t) {
2009 		if (start == end) {
2010 			ret = -EAGAIN;
2011 			goto unlock;
2012 		}
2013 
2014 		if (t->ret_stack == NULL) {
2015 			t->curr_ret_stack = -1;
2016 			/* Make sure IRQs see the -1 first: */
2017 			barrier();
2018 			t->ret_stack = ret_stack_list[start++];
2019 			atomic_set(&t->tracing_graph_pause, 0);
2020 			atomic_set(&t->trace_overrun, 0);
2021 		}
2022 	} while_each_thread(g, t);
2023 
2024 unlock:
2025 	read_unlock_irqrestore(&tasklist_lock, flags);
2026 free:
2027 	for (i = start; i < end; i++)
2028 		kfree(ret_stack_list[i]);
2029 	return ret;
2030 }
2031 
2032 /* Allocate a return stack for each task */
start_graph_tracing(void)2033 static int start_graph_tracing(void)
2034 {
2035 	struct ftrace_ret_stack **ret_stack_list;
2036 	int ret, cpu;
2037 
2038 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2039 				sizeof(struct ftrace_ret_stack *),
2040 				GFP_KERNEL);
2041 
2042 	if (!ret_stack_list)
2043 		return -ENOMEM;
2044 
2045 	/* The cpu_boot init_task->ret_stack will never be freed */
2046 	for_each_online_cpu(cpu)
2047 		ftrace_graph_init_task(idle_task(cpu));
2048 
2049 	do {
2050 		ret = alloc_retstack_tasklist(ret_stack_list);
2051 	} while (ret == -EAGAIN);
2052 
2053 	kfree(ret_stack_list);
2054 	return ret;
2055 }
2056 
2057 /*
2058  * Hibernation protection.
2059  * The state of the current task is too much unstable during
2060  * suspend/restore to disk. We want to protect against that.
2061  */
2062 static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)2063 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2064 							void *unused)
2065 {
2066 	switch (state) {
2067 	case PM_HIBERNATION_PREPARE:
2068 		pause_graph_tracing();
2069 		break;
2070 
2071 	case PM_POST_HIBERNATION:
2072 		unpause_graph_tracing();
2073 		break;
2074 	}
2075 	return NOTIFY_DONE;
2076 }
2077 
register_ftrace_graph(trace_func_graph_ret_t retfunc,trace_func_graph_ent_t entryfunc)2078 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2079 			trace_func_graph_ent_t entryfunc)
2080 {
2081 	int ret = 0;
2082 
2083 	mutex_lock(&ftrace_sysctl_lock);
2084 
2085 	ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2086 	register_pm_notifier(&ftrace_suspend_notifier);
2087 
2088 	atomic_inc(&ftrace_graph_active);
2089 	ret = start_graph_tracing();
2090 	if (ret) {
2091 		atomic_dec(&ftrace_graph_active);
2092 		goto out;
2093 	}
2094 
2095 	ftrace_graph_return = retfunc;
2096 	ftrace_graph_entry = entryfunc;
2097 
2098 	ftrace_startup(FTRACE_START_FUNC_RET);
2099 
2100 out:
2101 	mutex_unlock(&ftrace_sysctl_lock);
2102 	return ret;
2103 }
2104 
unregister_ftrace_graph(void)2105 void unregister_ftrace_graph(void)
2106 {
2107 	mutex_lock(&ftrace_sysctl_lock);
2108 
2109 	atomic_dec(&ftrace_graph_active);
2110 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2111 	ftrace_graph_entry = ftrace_graph_entry_stub;
2112 	ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2113 	unregister_pm_notifier(&ftrace_suspend_notifier);
2114 
2115 	mutex_unlock(&ftrace_sysctl_lock);
2116 }
2117 
2118 /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)2119 void ftrace_graph_init_task(struct task_struct *t)
2120 {
2121 	if (atomic_read(&ftrace_graph_active)) {
2122 		t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2123 				* sizeof(struct ftrace_ret_stack),
2124 				GFP_KERNEL);
2125 		if (!t->ret_stack)
2126 			return;
2127 		t->curr_ret_stack = -1;
2128 		atomic_set(&t->tracing_graph_pause, 0);
2129 		atomic_set(&t->trace_overrun, 0);
2130 	} else
2131 		t->ret_stack = NULL;
2132 }
2133 
ftrace_graph_exit_task(struct task_struct * t)2134 void ftrace_graph_exit_task(struct task_struct *t)
2135 {
2136 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
2137 
2138 	t->ret_stack = NULL;
2139 	/* NULL must become visible to IRQs before we free it: */
2140 	barrier();
2141 
2142 	kfree(ret_stack);
2143 }
2144 
ftrace_graph_stop(void)2145 void ftrace_graph_stop(void)
2146 {
2147 	ftrace_stop();
2148 }
2149 #endif
2150 
2151