• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/ftrace.h>
10 #include <linux/sched.h>
11 #include <linux/sched/debug.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/stacktrace.h>
14 
15 #include <asm/irq.h>
16 #include <asm/stack_pointer.h>
17 #include <asm/stacktrace.h>
18 
19 /*
20  * Start an unwind from a pt_regs.
21  *
22  * The unwind will begin at the PC within the regs.
23  *
24  * The regs must be on a stack currently owned by the calling task.
25  */
unwind_init_from_regs(struct unwind_state * state,struct pt_regs * regs)26 static inline void unwind_init_from_regs(struct unwind_state *state,
27 					 struct pt_regs *regs)
28 {
29 	unwind_init_common(state, current);
30 
31 	state->fp = regs->regs[29];
32 	state->pc = regs->pc;
33 }
34 
35 /*
36  * Start an unwind from a caller.
37  *
38  * The unwind will begin at the caller of whichever function this is inlined
39  * into.
40  *
41  * The function which invokes this must be noinline.
42  */
unwind_init_from_caller(struct unwind_state * state)43 static __always_inline void unwind_init_from_caller(struct unwind_state *state)
44 {
45 	unwind_init_common(state, current);
46 
47 	state->fp = (unsigned long)__builtin_frame_address(1);
48 	state->pc = (unsigned long)__builtin_return_address(0);
49 }
50 
51 /*
52  * Start an unwind from a blocked task.
53  *
54  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
55  * cpu_switch_to()).
56  *
57  * The caller should ensure the task is blocked in cpu_switch_to() for the
58  * duration of the unwind, or the unwind will be bogus. It is never valid to
59  * call this for the current task.
60  */
unwind_init_from_task(struct unwind_state * state,struct task_struct * task)61 static inline void unwind_init_from_task(struct unwind_state *state,
62 					 struct task_struct *task)
63 {
64 	unwind_init_common(state, task);
65 
66 	state->fp = thread_saved_fp(task);
67 	state->pc = thread_saved_pc(task);
68 }
69 
70 /*
71  * We can only safely access per-cpu stacks from current in a non-preemptible
72  * context.
73  */
on_accessible_stack(const struct task_struct * tsk,unsigned long sp,unsigned long size,struct stack_info * info)74 static bool on_accessible_stack(const struct task_struct *tsk,
75 				unsigned long sp, unsigned long size,
76 				struct stack_info *info)
77 {
78 	if (info)
79 		info->type = STACK_TYPE_UNKNOWN;
80 
81 	if (on_task_stack(tsk, sp, size, info))
82 		return true;
83 	if (tsk != current || preemptible())
84 		return false;
85 	if (on_irq_stack(sp, size, info))
86 		return true;
87 	if (on_overflow_stack(sp, size, info))
88 		return true;
89 	if (on_sdei_stack(sp, size, info))
90 		return true;
91 
92 	return false;
93 }
94 
95 /*
96  * Unwind from one frame record (A) to the next frame record (B).
97  *
98  * We terminate early if the location of B indicates a malformed chain of frame
99  * records (e.g. a cycle), determined based on the location and fp value of A
100  * and the location (but not the fp value) of B.
101  */
unwind_next(struct unwind_state * state)102 static int notrace unwind_next(struct unwind_state *state)
103 {
104 	struct task_struct *tsk = state->task;
105 	unsigned long fp = state->fp;
106 	struct stack_info info;
107 	int err;
108 
109 	/* Final frame; nothing to unwind */
110 	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
111 		return -ENOENT;
112 
113 	err = unwind_next_common(state, &info, on_accessible_stack, NULL);
114 	if (err)
115 		return err;
116 
117 	state->pc = ptrauth_strip_insn_pac(state->pc);
118 
119 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
120 	if (tsk->ret_stack &&
121 		(state->pc == (unsigned long)return_to_handler)) {
122 		unsigned long orig_pc;
123 		/*
124 		 * This is a case where function graph tracer has
125 		 * modified a return address (LR) in a stack frame
126 		 * to hook a function return.
127 		 * So replace it to an original value.
128 		 */
129 		orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
130 						(void *)state->fp);
131 		if (WARN_ON_ONCE(state->pc == orig_pc))
132 			return -EINVAL;
133 		state->pc = orig_pc;
134 	}
135 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
136 
137 	return 0;
138 }
139 NOKPROBE_SYMBOL(unwind_next);
140 
unwind(struct unwind_state * state,stack_trace_consume_fn consume_entry,void * cookie)141 static void notrace unwind(struct unwind_state *state,
142 			   stack_trace_consume_fn consume_entry, void *cookie)
143 {
144 	while (1) {
145 		int ret;
146 
147 		if (!consume_entry(cookie, state->pc))
148 			break;
149 		ret = unwind_next(state);
150 		if (ret < 0)
151 			break;
152 	}
153 }
154 NOKPROBE_SYMBOL(unwind);
155 
dump_backtrace_entry(void * arg,unsigned long where)156 static bool dump_backtrace_entry(void *arg, unsigned long where)
157 {
158 	char *loglvl = arg;
159 	printk("%s %pSb\n", loglvl, (void *)where);
160 	return true;
161 }
162 
dump_backtrace(struct pt_regs * regs,struct task_struct * tsk,const char * loglvl)163 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
164 		    const char *loglvl)
165 {
166 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
167 
168 	if (regs && user_mode(regs))
169 		return;
170 
171 	if (!tsk)
172 		tsk = current;
173 
174 	if (!try_get_task_stack(tsk))
175 		return;
176 
177 	printk("%sCall trace:\n", loglvl);
178 	arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
179 
180 	put_task_stack(tsk);
181 }
182 EXPORT_SYMBOL_GPL(dump_backtrace);
183 
show_stack(struct task_struct * tsk,unsigned long * sp,const char * loglvl)184 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
185 {
186 	dump_backtrace(NULL, tsk, loglvl);
187 	barrier();
188 }
189 
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)190 noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
191 			      void *cookie, struct task_struct *task,
192 			      struct pt_regs *regs)
193 {
194 	struct unwind_state state;
195 
196 	if (regs) {
197 		if (task != current)
198 			return;
199 		unwind_init_from_regs(&state, regs);
200 	} else if (task == current) {
201 		unwind_init_from_caller(&state);
202 	} else {
203 		unwind_init_from_task(&state, task);
204 	}
205 
206 	unwind(&state, consume_entry, cookie);
207 }
208