1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
17
18 #include <asm/efi.h>
19 #include <asm/irq.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
22
23 /*
24 * Kernel unwind state
25 *
26 * @common: Common unwind state.
27 * @task: The task being unwound.
28 * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
29 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
30 * associated with the most recently encountered replacement lr
31 * value.
32 */
33 struct kunwind_state {
34 struct unwind_state common;
35 struct task_struct *task;
36 int graph_idx;
37 #ifdef CONFIG_KRETPROBES
38 struct llist_node *kr_cur;
39 #endif
40 };
41
42 static __always_inline void
kunwind_init(struct kunwind_state * state,struct task_struct * task)43 kunwind_init(struct kunwind_state *state,
44 struct task_struct *task)
45 {
46 unwind_init_common(&state->common);
47 state->task = task;
48 }
49
50 /*
51 * Start an unwind from a pt_regs.
52 *
53 * The unwind will begin at the PC within the regs.
54 *
55 * The regs must be on a stack currently owned by the calling task.
56 */
57 static __always_inline void
kunwind_init_from_regs(struct kunwind_state * state,struct pt_regs * regs)58 kunwind_init_from_regs(struct kunwind_state *state,
59 struct pt_regs *regs)
60 {
61 kunwind_init(state, current);
62
63 state->common.fp = regs->regs[29];
64 state->common.pc = regs->pc;
65 }
66
67 /*
68 * Start an unwind from a caller.
69 *
70 * The unwind will begin at the caller of whichever function this is inlined
71 * into.
72 *
73 * The function which invokes this must be noinline.
74 */
75 static __always_inline void
kunwind_init_from_caller(struct kunwind_state * state)76 kunwind_init_from_caller(struct kunwind_state *state)
77 {
78 kunwind_init(state, current);
79
80 state->common.fp = (unsigned long)__builtin_frame_address(1);
81 state->common.pc = (unsigned long)__builtin_return_address(0);
82 }
83
84 /*
85 * Start an unwind from a blocked task.
86 *
87 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
88 * cpu_switch_to()).
89 *
90 * The caller should ensure the task is blocked in cpu_switch_to() for the
91 * duration of the unwind, or the unwind will be bogus. It is never valid to
92 * call this for the current task.
93 */
94 static __always_inline void
kunwind_init_from_task(struct kunwind_state * state,struct task_struct * task)95 kunwind_init_from_task(struct kunwind_state *state,
96 struct task_struct *task)
97 {
98 kunwind_init(state, task);
99
100 state->common.fp = thread_saved_fp(task);
101 state->common.pc = thread_saved_pc(task);
102 }
103
104 static __always_inline int
kunwind_recover_return_address(struct kunwind_state * state)105 kunwind_recover_return_address(struct kunwind_state *state)
106 {
107 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
108 if (state->task->ret_stack &&
109 (state->common.pc == (unsigned long)return_to_handler)) {
110 unsigned long orig_pc;
111 orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
112 state->common.pc,
113 (void *)state->common.fp);
114 if (WARN_ON_ONCE(state->common.pc == orig_pc))
115 return -EINVAL;
116 state->common.pc = orig_pc;
117 }
118 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
119
120 #ifdef CONFIG_KRETPROBES
121 if (is_kretprobe_trampoline(state->common.pc)) {
122 unsigned long orig_pc;
123 orig_pc = kretprobe_find_ret_addr(state->task,
124 (void *)state->common.fp,
125 &state->kr_cur);
126 if (!orig_pc)
127 return -EINVAL;
128 state->common.pc = orig_pc;
129 }
130 #endif /* CONFIG_KRETPROBES */
131
132 return 0;
133 }
134
135 /*
136 * Unwind from one frame record (A) to the next frame record (B).
137 *
138 * We terminate early if the location of B indicates a malformed chain of frame
139 * records (e.g. a cycle), determined based on the location and fp value of A
140 * and the location (but not the fp value) of B.
141 */
142 static __always_inline int
kunwind_next(struct kunwind_state * state)143 kunwind_next(struct kunwind_state *state)
144 {
145 struct task_struct *tsk = state->task;
146 unsigned long fp = state->common.fp;
147 int err;
148
149 /* Final frame; nothing to unwind */
150 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
151 return -ENOENT;
152
153 err = unwind_next_frame_record(&state->common);
154 if (err)
155 return err;
156
157 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
158
159 return kunwind_recover_return_address(state);
160 }
161
162 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
163
164 static __always_inline void
do_kunwind(struct kunwind_state * state,kunwind_consume_fn consume_state,void * cookie)165 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
166 void *cookie)
167 {
168 if (kunwind_recover_return_address(state))
169 return;
170
171 while (1) {
172 int ret;
173
174 if (!consume_state(state, cookie))
175 break;
176 ret = kunwind_next(state);
177 if (ret < 0)
178 break;
179 }
180 }
181
182 /*
183 * Per-cpu stacks are only accessible when unwinding the current task in a
184 * non-preemptible context.
185 */
186 #define STACKINFO_CPU(name) \
187 ({ \
188 ((task == current) && !preemptible()) \
189 ? stackinfo_get_##name() \
190 : stackinfo_get_unknown(); \
191 })
192
193 /*
194 * SDEI stacks are only accessible when unwinding the current task in an NMI
195 * context.
196 */
197 #define STACKINFO_SDEI(name) \
198 ({ \
199 ((task == current) && in_nmi()) \
200 ? stackinfo_get_sdei_##name() \
201 : stackinfo_get_unknown(); \
202 })
203
204 #define STACKINFO_EFI \
205 ({ \
206 ((task == current) && current_in_efi()) \
207 ? stackinfo_get_efi() \
208 : stackinfo_get_unknown(); \
209 })
210
211 static __always_inline void
kunwind_stack_walk(kunwind_consume_fn consume_state,void * cookie,struct task_struct * task,struct pt_regs * regs)212 kunwind_stack_walk(kunwind_consume_fn consume_state,
213 void *cookie, struct task_struct *task,
214 struct pt_regs *regs)
215 {
216 struct stack_info stacks[] = {
217 stackinfo_get_task(task),
218 STACKINFO_CPU(irq),
219 #if defined(CONFIG_VMAP_STACK)
220 STACKINFO_CPU(overflow),
221 #endif
222 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
223 STACKINFO_SDEI(normal),
224 STACKINFO_SDEI(critical),
225 #endif
226 #ifdef CONFIG_EFI
227 STACKINFO_EFI,
228 #endif
229 };
230 struct kunwind_state state = {
231 .common = {
232 .stacks = stacks,
233 .nr_stacks = ARRAY_SIZE(stacks),
234 },
235 };
236
237 if (regs) {
238 if (task != current)
239 return;
240 kunwind_init_from_regs(&state, regs);
241 } else if (task == current) {
242 kunwind_init_from_caller(&state);
243 } else {
244 kunwind_init_from_task(&state, task);
245 }
246
247 do_kunwind(&state, consume_state, cookie);
248 }
249
250 struct kunwind_consume_entry_data {
251 stack_trace_consume_fn consume_entry;
252 void *cookie;
253 };
254
255 static __always_inline bool
arch_kunwind_consume_entry(const struct kunwind_state * state,void * cookie)256 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
257 {
258 struct kunwind_consume_entry_data *data = cookie;
259 return data->consume_entry(data->cookie, state->common.pc);
260 }
261
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)262 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
263 void *cookie, struct task_struct *task,
264 struct pt_regs *regs)
265 {
266 struct kunwind_consume_entry_data data = {
267 .consume_entry = consume_entry,
268 .cookie = cookie,
269 };
270
271 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
272 }
273
274 struct bpf_unwind_consume_entry_data {
275 bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
276 void *cookie;
277 };
278
279 static bool
arch_bpf_unwind_consume_entry(const struct kunwind_state * state,void * cookie)280 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
281 {
282 struct bpf_unwind_consume_entry_data *data = cookie;
283
284 return data->consume_entry(data->cookie, state->common.pc, 0,
285 state->common.fp);
286 }
287
arch_bpf_stack_walk(bool (* consume_entry)(void * cookie,u64 ip,u64 sp,u64 fp),void * cookie)288 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
289 u64 fp), void *cookie)
290 {
291 struct bpf_unwind_consume_entry_data data = {
292 .consume_entry = consume_entry,
293 .cookie = cookie,
294 };
295
296 kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
297 }
298 EXPORT_SYMBOL_GPL(arch_stack_walk);
299
dump_backtrace_entry(void * arg,unsigned long where)300 static bool dump_backtrace_entry(void *arg, unsigned long where)
301 {
302 char *loglvl = arg;
303 printk("%s %pSb\n", loglvl, (void *)where);
304 return true;
305 }
306
dump_backtrace(struct pt_regs * regs,struct task_struct * tsk,const char * loglvl)307 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
308 const char *loglvl)
309 {
310 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
311
312 if (regs && user_mode(regs))
313 return;
314
315 if (!tsk)
316 tsk = current;
317
318 if (!try_get_task_stack(tsk))
319 return;
320
321 printk("%sCall trace:\n", loglvl);
322 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
323
324 put_task_stack(tsk);
325 }
326 EXPORT_SYMBOL_GPL(dump_backtrace);
327
show_stack(struct task_struct * tsk,unsigned long * sp,const char * loglvl)328 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
329 {
330 dump_backtrace(NULL, tsk, loglvl);
331 barrier();
332 }
333
334 /*
335 * The struct defined for userspace stack frame in AARCH64 mode.
336 */
337 struct frame_tail {
338 struct frame_tail __user *fp;
339 unsigned long lr;
340 } __attribute__((packed));
341
342 /*
343 * Get the return address for a single stackframe and return a pointer to the
344 * next frame tail.
345 */
346 static struct frame_tail __user *
unwind_user_frame(struct frame_tail __user * tail,void * cookie,stack_trace_consume_fn consume_entry)347 unwind_user_frame(struct frame_tail __user *tail, void *cookie,
348 stack_trace_consume_fn consume_entry)
349 {
350 struct frame_tail buftail;
351 unsigned long err;
352 unsigned long lr;
353
354 /* Also check accessibility of one struct frame_tail beyond */
355 if (!access_ok(tail, sizeof(buftail)))
356 return NULL;
357
358 pagefault_disable();
359 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
360 pagefault_enable();
361
362 if (err)
363 return NULL;
364
365 lr = ptrauth_strip_user_insn_pac(buftail.lr);
366
367 if (!consume_entry(cookie, lr))
368 return NULL;
369
370 /*
371 * Frame pointers should strictly progress back up the stack
372 * (towards higher addresses).
373 */
374 if (tail >= buftail.fp)
375 return NULL;
376
377 return buftail.fp;
378 }
379
380 #ifdef CONFIG_COMPAT
381 /*
382 * The registers we're interested in are at the end of the variable
383 * length saved register structure. The fp points at the end of this
384 * structure so the address of this struct is:
385 * (struct compat_frame_tail *)(xxx->fp)-1
386 *
387 * This code has been adapted from the ARM OProfile support.
388 */
389 struct compat_frame_tail {
390 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
391 u32 sp;
392 u32 lr;
393 } __attribute__((packed));
394
395 static struct compat_frame_tail __user *
unwind_compat_user_frame(struct compat_frame_tail __user * tail,void * cookie,stack_trace_consume_fn consume_entry)396 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
397 stack_trace_consume_fn consume_entry)
398 {
399 struct compat_frame_tail buftail;
400 unsigned long err;
401
402 /* Also check accessibility of one struct frame_tail beyond */
403 if (!access_ok(tail, sizeof(buftail)))
404 return NULL;
405
406 pagefault_disable();
407 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
408 pagefault_enable();
409
410 if (err)
411 return NULL;
412
413 if (!consume_entry(cookie, buftail.lr))
414 return NULL;
415
416 /*
417 * Frame pointers should strictly progress back up the stack
418 * (towards higher addresses).
419 */
420 if (tail + 1 >= (struct compat_frame_tail __user *)
421 compat_ptr(buftail.fp))
422 return NULL;
423
424 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
425 }
426 #endif /* CONFIG_COMPAT */
427
428
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)429 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
430 const struct pt_regs *regs)
431 {
432 if (!consume_entry(cookie, regs->pc))
433 return;
434
435 if (!compat_user_mode(regs)) {
436 /* AARCH64 mode */
437 struct frame_tail __user *tail;
438
439 tail = (struct frame_tail __user *)regs->regs[29];
440 while (tail && !((unsigned long)tail & 0x7))
441 tail = unwind_user_frame(tail, cookie, consume_entry);
442 } else {
443 #ifdef CONFIG_COMPAT
444 /* AARCH32 compat mode */
445 struct compat_frame_tail __user *tail;
446
447 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
448 while (tail && !((unsigned long)tail & 0x3))
449 tail = unwind_compat_user_frame(tail, cookie, consume_entry);
450 #endif
451 }
452 }
453