1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
7
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/types.h>
12
13 #include <asm/memory.h>
14 #include <asm/ptrace.h>
15 #include <asm/sdei.h>
16
17 enum stack_type {
18 STACK_TYPE_UNKNOWN,
19 STACK_TYPE_TASK,
20 STACK_TYPE_IRQ,
21 STACK_TYPE_OVERFLOW,
22 STACK_TYPE_SDEI_NORMAL,
23 STACK_TYPE_SDEI_CRITICAL,
24 __NR_STACK_TYPES
25 };
26
27 struct stack_info {
28 unsigned long low;
29 unsigned long high;
30 enum stack_type type;
31 };
32
33 /*
34 * A snapshot of a frame record or fp/lr register values, along with some
35 * accounting information necessary for robust unwinding.
36 *
37 * @fp: The fp value in the frame record (or the real fp)
38 * @pc: The fp value in the frame record (or the real lr)
39 *
40 * @stacks_done: Stacks which have been entirely unwound, for which it is no
41 * longer valid to unwind to.
42 *
43 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
44 * of 0. This is used to ensure that within a stack, each
45 * subsequent frame record is at an increasing address.
46 * @prev_type: The type of stack this frame record was on, or a synthetic
47 * value of STACK_TYPE_UNKNOWN. This is used to detect a
48 * transition from one stack to another.
49 *
50 * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
51 * replacement lr value in the ftrace graph stack.
52 */
53 struct stackframe {
54 unsigned long fp;
55 unsigned long pc;
56 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
57 unsigned long prev_fp;
58 enum stack_type prev_type;
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 int graph;
61 #endif
62 };
63
64 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
65 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
66 int (*fn)(struct stackframe *, void *), void *data);
67 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
68
69 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
70
71 #ifdef CONFIG_SHADOW_CALL_STACK
72 DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
73 #endif
74
on_irq_stack(unsigned long sp,struct stack_info * info)75 static inline bool on_irq_stack(unsigned long sp,
76 struct stack_info *info)
77 {
78 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
79 unsigned long high = low + IRQ_STACK_SIZE;
80
81 if (!low)
82 return false;
83
84 if (sp < low || sp >= high)
85 return false;
86
87 if (info) {
88 info->low = low;
89 info->high = high;
90 info->type = STACK_TYPE_IRQ;
91 }
92
93 return true;
94 }
95
on_task_stack(const struct task_struct * tsk,unsigned long sp,struct stack_info * info)96 static inline bool on_task_stack(const struct task_struct *tsk,
97 unsigned long sp,
98 struct stack_info *info)
99 {
100 unsigned long low = (unsigned long)task_stack_page(tsk);
101 unsigned long high = low + THREAD_SIZE;
102
103 if (sp < low || sp >= high)
104 return false;
105
106 if (info) {
107 info->low = low;
108 info->high = high;
109 info->type = STACK_TYPE_TASK;
110 }
111
112 return true;
113 }
114
115 #ifdef CONFIG_VMAP_STACK
116 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
117
on_overflow_stack(unsigned long sp,struct stack_info * info)118 static inline bool on_overflow_stack(unsigned long sp,
119 struct stack_info *info)
120 {
121 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
122 unsigned long high = low + OVERFLOW_STACK_SIZE;
123
124 if (sp < low || sp >= high)
125 return false;
126
127 if (info) {
128 info->low = low;
129 info->high = high;
130 info->type = STACK_TYPE_OVERFLOW;
131 }
132
133 return true;
134 }
135 #else
on_overflow_stack(unsigned long sp,struct stack_info * info)136 static inline bool on_overflow_stack(unsigned long sp,
137 struct stack_info *info) { return false; }
138 #endif
139
140
141 /*
142 * We can only safely access per-cpu stacks from current in a non-preemptible
143 * context.
144 */
on_accessible_stack(const struct task_struct * tsk,unsigned long sp,struct stack_info * info)145 static inline bool on_accessible_stack(const struct task_struct *tsk,
146 unsigned long sp,
147 struct stack_info *info)
148 {
149 if (info)
150 info->type = STACK_TYPE_UNKNOWN;
151
152 if (on_task_stack(tsk, sp, info))
153 return true;
154 if (tsk != current || preemptible())
155 return false;
156 if (on_irq_stack(sp, info))
157 return true;
158 if (on_overflow_stack(sp, info))
159 return true;
160 if (on_sdei_stack(sp, info))
161 return true;
162
163 return false;
164 }
165
start_backtrace(struct stackframe * frame,unsigned long fp,unsigned long pc)166 static inline void start_backtrace(struct stackframe *frame,
167 unsigned long fp, unsigned long pc)
168 {
169 frame->fp = fp;
170 frame->pc = pc;
171 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
172 frame->graph = 0;
173 #endif
174
175 /*
176 * Prime the first unwind.
177 *
178 * In unwind_frame() we'll check that the FP points to a valid stack,
179 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
180 * treated as a transition to whichever stack that happens to be. The
181 * prev_fp value won't be used, but we set it to 0 such that it is
182 * definitely not an accessible stack address.
183 */
184 bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
185 frame->prev_fp = 0;
186 frame->prev_type = STACK_TYPE_UNKNOWN;
187 }
188
189 #endif /* __ASM_STACKTRACE_H */
190