• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/suspend.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 
14 #include <trace/events/sched.h>
15 
16 #include "ftrace_internal.h"
17 
18 #ifdef CONFIG_DYNAMIC_FTRACE
19 #define ASSIGN_OPS_HASH(opsname, val) \
20 	.func_hash		= val, \
21 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
22 #else
23 #define ASSIGN_OPS_HASH(opsname, val)
24 #endif
25 
26 static bool kill_ftrace_graph;
27 int ftrace_graph_active;
28 
29 /* Both enabled by default (can be cleared by function_graph tracer flags */
30 static bool fgraph_sleep_time = true;
31 
32 /**
33  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
34  *
35  * ftrace_graph_stop() is called when a severe error is detected in
36  * the function graph tracing. This function is called by the critical
37  * paths of function graph to keep those paths from doing any more harm.
38  */
ftrace_graph_is_dead(void)39 bool ftrace_graph_is_dead(void)
40 {
41 	return kill_ftrace_graph;
42 }
43 
44 /**
45  * ftrace_graph_stop - set to permanently disable function graph tracincg
46  *
47  * In case of an error int function graph tracing, this is called
48  * to try to keep function graph tracing from causing any more harm.
49  * Usually this is pretty severe and this is called to try to at least
50  * get a warning out to the user.
51  */
ftrace_graph_stop(void)52 void ftrace_graph_stop(void)
53 {
54 	kill_ftrace_graph = true;
55 }
56 
57 /* Add a function return address to the trace stack on thread info.*/
58 static int
ftrace_push_return_trace(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp)59 ftrace_push_return_trace(unsigned long ret, unsigned long func,
60 			 unsigned long frame_pointer, unsigned long *retp)
61 {
62 	unsigned long long calltime;
63 	int index;
64 
65 	if (unlikely(ftrace_graph_is_dead()))
66 		return -EBUSY;
67 
68 	if (!current->ret_stack)
69 		return -EBUSY;
70 
71 	/*
72 	 * We must make sure the ret_stack is tested before we read
73 	 * anything else.
74 	 */
75 	smp_rmb();
76 
77 	/* The return trace stack is full */
78 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
79 		atomic_inc(&current->trace_overrun);
80 		return -EBUSY;
81 	}
82 
83 	calltime = trace_clock_local();
84 
85 	index = ++current->curr_ret_stack;
86 	barrier();
87 	current->ret_stack[index].ret = ret;
88 	current->ret_stack[index].func = func;
89 	current->ret_stack[index].calltime = calltime;
90 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
91 	current->ret_stack[index].fp = frame_pointer;
92 #endif
93 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
94 	current->ret_stack[index].retp = retp;
95 #endif
96 	return 0;
97 }
98 
function_graph_enter(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp)99 int function_graph_enter(unsigned long ret, unsigned long func,
100 			 unsigned long frame_pointer, unsigned long *retp)
101 {
102 	struct ftrace_graph_ent trace;
103 
104 	trace.func = func;
105 	trace.depth = ++current->curr_ret_depth;
106 
107 	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
108 		goto out;
109 
110 	/* Only trace if the calling function expects to */
111 	if (!ftrace_graph_entry(&trace))
112 		goto out_ret;
113 
114 	return 0;
115  out_ret:
116 	current->curr_ret_stack--;
117  out:
118 	current->curr_ret_depth--;
119 	return -EBUSY;
120 }
121 
122 /* Retrieve a function return address to the trace stack on thread info.*/
123 static void
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer)124 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
125 			unsigned long frame_pointer)
126 {
127 	int index;
128 
129 	index = current->curr_ret_stack;
130 
131 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
132 		ftrace_graph_stop();
133 		WARN_ON(1);
134 		/* Might as well panic, otherwise we have no where to go */
135 		*ret = (unsigned long)panic;
136 		return;
137 	}
138 
139 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
140 	/*
141 	 * The arch may choose to record the frame pointer used
142 	 * and check it here to make sure that it is what we expect it
143 	 * to be. If gcc does not set the place holder of the return
144 	 * address in the frame pointer, and does a copy instead, then
145 	 * the function graph trace will fail. This test detects this
146 	 * case.
147 	 *
148 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
149 	 * gcc do the above.
150 	 *
151 	 * Note, -mfentry does not use frame pointers, and this test
152 	 *  is not needed if CC_USING_FENTRY is set.
153 	 */
154 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
155 		ftrace_graph_stop();
156 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
157 		     "  from func %ps return to %lx\n",
158 		     current->ret_stack[index].fp,
159 		     frame_pointer,
160 		     (void *)current->ret_stack[index].func,
161 		     current->ret_stack[index].ret);
162 		*ret = (unsigned long)panic;
163 		return;
164 	}
165 #endif
166 
167 	*ret = current->ret_stack[index].ret;
168 	trace->func = current->ret_stack[index].func;
169 	trace->calltime = current->ret_stack[index].calltime;
170 	trace->overrun = atomic_read(&current->trace_overrun);
171 	trace->depth = current->curr_ret_depth--;
172 	/*
173 	 * We still want to trace interrupts coming in if
174 	 * max_depth is set to 1. Make sure the decrement is
175 	 * seen before ftrace_graph_return.
176 	 */
177 	barrier();
178 }
179 
180 /*
181  * Hibernation protection.
182  * The state of the current task is too much unstable during
183  * suspend/restore to disk. We want to protect against that.
184  */
185 static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)186 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
187 							void *unused)
188 {
189 	switch (state) {
190 	case PM_HIBERNATION_PREPARE:
191 		pause_graph_tracing();
192 		break;
193 
194 	case PM_POST_HIBERNATION:
195 		unpause_graph_tracing();
196 		break;
197 	}
198 	return NOTIFY_DONE;
199 }
200 
201 static struct notifier_block ftrace_suspend_notifier = {
202 	.notifier_call = ftrace_suspend_notifier_call,
203 };
204 
205 /*
206  * Send the trace to the ring-buffer.
207  * @return the original return address.
208  */
ftrace_return_to_handler(unsigned long frame_pointer)209 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
210 {
211 	struct ftrace_graph_ret trace;
212 	unsigned long ret;
213 
214 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
215 	trace.rettime = trace_clock_local();
216 	ftrace_graph_return(&trace);
217 	/*
218 	 * The ftrace_graph_return() may still access the current
219 	 * ret_stack structure, we need to make sure the update of
220 	 * curr_ret_stack is after that.
221 	 */
222 	barrier();
223 	current->curr_ret_stack--;
224 
225 	if (unlikely(!ret)) {
226 		ftrace_graph_stop();
227 		WARN_ON(1);
228 		/* Might as well panic. What else to do? */
229 		ret = (unsigned long)panic;
230 	}
231 
232 	return ret;
233 }
234 
235 /**
236  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
237  * @task: The task to read the shadow stack from
238  * @idx: Index down the shadow stack
239  *
240  * Return the ret_struct on the shadow stack of the @task at the
241  * call graph at @idx starting with zero. If @idx is zero, it
242  * will return the last saved ret_stack entry. If it is greater than
243  * zero, it will return the corresponding ret_stack for the depth
244  * of saved return addresses.
245  */
246 struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct * task,int idx)247 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
248 {
249 	idx = task->curr_ret_stack - idx;
250 
251 	if (idx >= 0 && idx <= task->curr_ret_stack)
252 		return &task->ret_stack[idx];
253 
254 	return NULL;
255 }
256 
257 /**
258  * ftrace_graph_ret_addr - convert a potentially modified stack return address
259  *			   to its original value
260  *
261  * This function can be called by stack unwinding code to convert a found stack
262  * return address ('ret') to its original value, in case the function graph
263  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
264  * been modified, the unchanged value of 'ret' is returned.
265  *
266  * 'idx' is a state variable which should be initialized by the caller to zero
267  * before the first call.
268  *
269  * 'retp' is a pointer to the return address on the stack.  It's ignored if
270  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
271  */
272 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)273 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
274 				    unsigned long ret, unsigned long *retp)
275 {
276 	int index = task->curr_ret_stack;
277 	int i;
278 
279 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
280 		return ret;
281 
282 	if (index < 0)
283 		return ret;
284 
285 	for (i = 0; i <= index; i++)
286 		if (task->ret_stack[i].retp == retp)
287 			return task->ret_stack[i].ret;
288 
289 	return ret;
290 }
291 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)292 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
293 				    unsigned long ret, unsigned long *retp)
294 {
295 	int task_idx;
296 
297 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
298 		return ret;
299 
300 	task_idx = task->curr_ret_stack;
301 
302 	if (!task->ret_stack || task_idx < *idx)
303 		return ret;
304 
305 	task_idx -= *idx;
306 	(*idx)++;
307 
308 	return task->ret_stack[task_idx].ret;
309 }
310 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
311 
312 static struct ftrace_ops graph_ops = {
313 	.func			= ftrace_stub,
314 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
315 				   FTRACE_OPS_FL_INITIALIZED |
316 				   FTRACE_OPS_FL_PID |
317 				   FTRACE_OPS_FL_STUB,
318 #ifdef FTRACE_GRAPH_TRAMP_ADDR
319 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
320 	/* trampoline_size is only needed for dynamically allocated tramps */
321 #endif
322 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
323 };
324 
ftrace_graph_sleep_time_control(bool enable)325 void ftrace_graph_sleep_time_control(bool enable)
326 {
327 	fgraph_sleep_time = enable;
328 }
329 
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace)330 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
331 {
332 	return 0;
333 }
334 
335 /* The callbacks that hook a function */
336 trace_func_graph_ret_t ftrace_graph_return =
337 			(trace_func_graph_ret_t)ftrace_stub;
338 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
339 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
340 
341 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(struct ftrace_ret_stack ** ret_stack_list)342 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
343 {
344 	int i;
345 	int ret = 0;
346 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
347 	struct task_struct *g, *t;
348 
349 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
350 		ret_stack_list[i] =
351 			kmalloc_array(FTRACE_RETFUNC_DEPTH,
352 				      sizeof(struct ftrace_ret_stack),
353 				      GFP_KERNEL);
354 		if (!ret_stack_list[i]) {
355 			start = 0;
356 			end = i;
357 			ret = -ENOMEM;
358 			goto free;
359 		}
360 	}
361 
362 	read_lock(&tasklist_lock);
363 	do_each_thread(g, t) {
364 		if (start == end) {
365 			ret = -EAGAIN;
366 			goto unlock;
367 		}
368 
369 		if (t->ret_stack == NULL) {
370 			atomic_set(&t->trace_overrun, 0);
371 			t->curr_ret_stack = -1;
372 			t->curr_ret_depth = -1;
373 			/* Make sure the tasks see the -1 first: */
374 			smp_wmb();
375 			t->ret_stack = ret_stack_list[start++];
376 		}
377 	} while_each_thread(g, t);
378 
379 unlock:
380 	read_unlock(&tasklist_lock);
381 free:
382 	for (i = start; i < end; i++)
383 		kfree(ret_stack_list[i]);
384 	return ret;
385 }
386 
387 static void
ftrace_graph_probe_sched_switch(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next)388 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
389 			struct task_struct *prev, struct task_struct *next)
390 {
391 	unsigned long long timestamp;
392 	int index;
393 
394 	/*
395 	 * Does the user want to count the time a function was asleep.
396 	 * If so, do not update the time stamps.
397 	 */
398 	if (fgraph_sleep_time)
399 		return;
400 
401 	timestamp = trace_clock_local();
402 
403 	prev->ftrace_timestamp = timestamp;
404 
405 	/* only process tasks that we timestamped */
406 	if (!next->ftrace_timestamp)
407 		return;
408 
409 	/*
410 	 * Update all the counters in next to make up for the
411 	 * time next was sleeping.
412 	 */
413 	timestamp -= next->ftrace_timestamp;
414 
415 	for (index = next->curr_ret_stack; index >= 0; index--)
416 		next->ret_stack[index].calltime += timestamp;
417 }
418 
ftrace_graph_entry_test(struct ftrace_graph_ent * trace)419 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
420 {
421 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
422 		return 0;
423 	return __ftrace_graph_entry(trace);
424 }
425 
426 /*
427  * The function graph tracer should only trace the functions defined
428  * by set_ftrace_filter and set_ftrace_notrace. If another function
429  * tracer ops is registered, the graph tracer requires testing the
430  * function against the global ops, and not just trace any function
431  * that any ftrace_ops registered.
432  */
update_function_graph_func(void)433 void update_function_graph_func(void)
434 {
435 	struct ftrace_ops *op;
436 	bool do_test = false;
437 
438 	/*
439 	 * The graph and global ops share the same set of functions
440 	 * to test. If any other ops is on the list, then
441 	 * the graph tracing needs to test if its the function
442 	 * it should call.
443 	 */
444 	do_for_each_ftrace_op(op, ftrace_ops_list) {
445 		if (op != &global_ops && op != &graph_ops &&
446 		    op != &ftrace_list_end) {
447 			do_test = true;
448 			/* in double loop, break out with goto */
449 			goto out;
450 		}
451 	} while_for_each_ftrace_op(op);
452  out:
453 	if (do_test)
454 		ftrace_graph_entry = ftrace_graph_entry_test;
455 	else
456 		ftrace_graph_entry = __ftrace_graph_entry;
457 }
458 
459 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
460 
461 static void
graph_init_task(struct task_struct * t,struct ftrace_ret_stack * ret_stack)462 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
463 {
464 	atomic_set(&t->trace_overrun, 0);
465 	t->ftrace_timestamp = 0;
466 	/* make curr_ret_stack visible before we add the ret_stack */
467 	smp_wmb();
468 	t->ret_stack = ret_stack;
469 }
470 
471 /*
472  * Allocate a return stack for the idle task. May be the first
473  * time through, or it may be done by CPU hotplug online.
474  */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)475 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
476 {
477 	t->curr_ret_stack = -1;
478 	t->curr_ret_depth = -1;
479 	/*
480 	 * The idle task has no parent, it either has its own
481 	 * stack or no stack at all.
482 	 */
483 	if (t->ret_stack)
484 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
485 
486 	if (ftrace_graph_active) {
487 		struct ftrace_ret_stack *ret_stack;
488 
489 		ret_stack = per_cpu(idle_ret_stack, cpu);
490 		if (!ret_stack) {
491 			ret_stack =
492 				kmalloc_array(FTRACE_RETFUNC_DEPTH,
493 					      sizeof(struct ftrace_ret_stack),
494 					      GFP_KERNEL);
495 			if (!ret_stack)
496 				return;
497 			per_cpu(idle_ret_stack, cpu) = ret_stack;
498 		}
499 		graph_init_task(t, ret_stack);
500 	}
501 }
502 
503 /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)504 void ftrace_graph_init_task(struct task_struct *t)
505 {
506 	/* Make sure we do not use the parent ret_stack */
507 	t->ret_stack = NULL;
508 	t->curr_ret_stack = -1;
509 	t->curr_ret_depth = -1;
510 
511 	if (ftrace_graph_active) {
512 		struct ftrace_ret_stack *ret_stack;
513 
514 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
515 					  sizeof(struct ftrace_ret_stack),
516 					  GFP_KERNEL);
517 		if (!ret_stack)
518 			return;
519 		graph_init_task(t, ret_stack);
520 	}
521 }
522 
ftrace_graph_exit_task(struct task_struct * t)523 void ftrace_graph_exit_task(struct task_struct *t)
524 {
525 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
526 
527 	t->ret_stack = NULL;
528 	/* NULL must become visible to IRQs before we free it: */
529 	barrier();
530 
531 	kfree(ret_stack);
532 }
533 
534 /* Allocate a return stack for each task */
start_graph_tracing(void)535 static int start_graph_tracing(void)
536 {
537 	struct ftrace_ret_stack **ret_stack_list;
538 	int ret, cpu;
539 
540 	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
541 				       sizeof(struct ftrace_ret_stack *),
542 				       GFP_KERNEL);
543 
544 	if (!ret_stack_list)
545 		return -ENOMEM;
546 
547 	/* The cpu_boot init_task->ret_stack will never be freed */
548 	for_each_online_cpu(cpu) {
549 		if (!idle_task(cpu)->ret_stack)
550 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
551 	}
552 
553 	do {
554 		ret = alloc_retstack_tasklist(ret_stack_list);
555 	} while (ret == -EAGAIN);
556 
557 	if (!ret) {
558 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
559 		if (ret)
560 			pr_info("ftrace_graph: Couldn't activate tracepoint"
561 				" probe to kernel_sched_switch\n");
562 	}
563 
564 	kfree(ret_stack_list);
565 	return ret;
566 }
567 
register_ftrace_graph(struct fgraph_ops * gops)568 int register_ftrace_graph(struct fgraph_ops *gops)
569 {
570 	int ret = 0;
571 
572 	mutex_lock(&ftrace_lock);
573 
574 	/* we currently allow only one tracer registered at a time */
575 	if (ftrace_graph_active) {
576 		ret = -EBUSY;
577 		goto out;
578 	}
579 
580 	register_pm_notifier(&ftrace_suspend_notifier);
581 
582 	ftrace_graph_active++;
583 	ret = start_graph_tracing();
584 	if (ret) {
585 		ftrace_graph_active--;
586 		goto out;
587 	}
588 
589 	ftrace_graph_return = gops->retfunc;
590 
591 	/*
592 	 * Update the indirect function to the entryfunc, and the
593 	 * function that gets called to the entry_test first. Then
594 	 * call the update fgraph entry function to determine if
595 	 * the entryfunc should be called directly or not.
596 	 */
597 	__ftrace_graph_entry = gops->entryfunc;
598 	ftrace_graph_entry = ftrace_graph_entry_test;
599 	update_function_graph_func();
600 
601 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
602 out:
603 	mutex_unlock(&ftrace_lock);
604 	return ret;
605 }
606 
unregister_ftrace_graph(struct fgraph_ops * gops)607 void unregister_ftrace_graph(struct fgraph_ops *gops)
608 {
609 	mutex_lock(&ftrace_lock);
610 
611 	if (unlikely(!ftrace_graph_active))
612 		goto out;
613 
614 	ftrace_graph_active--;
615 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
616 	ftrace_graph_entry = ftrace_graph_entry_stub;
617 	__ftrace_graph_entry = ftrace_graph_entry_stub;
618 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
619 	unregister_pm_notifier(&ftrace_suspend_notifier);
620 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
621 
622  out:
623 	mutex_unlock(&ftrace_lock);
624 }
625