1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Stack trace management functions
4 *
5 * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
6 * Copyright (C) 2020 Loongson Technology Co., Ltd.
7 */
8 #include <linux/sched.h>
9 #include <linux/sched/debug.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/stacktrace.h>
12 #include <linux/export.h>
13 #include <linux/uaccess.h>
14
15 #include <asm/stacktrace.h>
16 #include <asm/unwind.h>
17
18 typedef bool (*stack_trace_consume_fn)(struct stack_trace *trace,
19 unsigned long addr);
20
consume_entry(struct stack_trace * trace,unsigned long addr)21 static bool consume_entry(struct stack_trace *trace, unsigned long addr)
22 {
23 if (trace->nr_entries >= trace->max_entries)
24 return false;
25
26 if (trace->skip > 0) {
27 trace->skip--;
28 return true;
29 }
30
31 trace->entries[trace->nr_entries++] = addr;
32 return trace->nr_entries < trace->max_entries;
33 }
34
consume_entry_nosched(struct stack_trace * trace,unsigned long addr)35 static bool consume_entry_nosched(struct stack_trace *trace,
36 unsigned long addr)
37 {
38 if (in_sched_functions(addr))
39 return true;
40 return consume_entry(trace, addr);
41 }
42
save_context_stack(struct task_struct * tsk,struct stack_trace * trace,struct pt_regs * regs,stack_trace_consume_fn fn)43 static void save_context_stack(struct task_struct *tsk,
44 struct stack_trace *trace,
45 struct pt_regs *regs,
46 stack_trace_consume_fn fn)
47 {
48 struct pt_regs dummyregs;
49 struct unwind_state state;
50 unsigned long addr;
51
52 regs = &dummyregs;
53
54 if (tsk == current) {
55 regs->csr_era = (unsigned long)__builtin_return_address(0);
56 regs->regs[3] = (unsigned long)__builtin_frame_address(0);
57 } else {
58 regs->csr_era = thread_saved_ra(tsk);
59 regs->regs[3] = thread_saved_fp(tsk);
60 }
61
62 regs->regs[1] = 0;
63 regs->regs[22] = 0;
64
65 for (unwind_start(&state, tsk, regs);
66 !unwind_done(&state); unwind_next_frame(&state)) {
67 addr = unwind_get_return_address(&state);
68 if (!addr || !fn(trace, addr))
69 return;
70 }
71 }
72
73 /*
74 * Save stack-backtrace addresses into a stack_trace buffer.
75 */
save_stack_trace(struct stack_trace * trace)76 void save_stack_trace(struct stack_trace *trace)
77 {
78 stack_trace_consume_fn consume = consume_entry;
79
80 WARN_ON(trace->nr_entries || !trace->max_entries);
81
82 save_context_stack(current, trace, NULL, consume);
83 }
84 EXPORT_SYMBOL_GPL(save_stack_trace);
85
save_stack_trace_regs(struct pt_regs * regs,struct stack_trace * trace)86 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
87 {
88 stack_trace_consume_fn consume = consume_entry;
89
90 /* We don't want this function nor the caller */
91 trace->skip += 7;
92 WARN_ON(trace->nr_entries || !trace->max_entries);
93
94 save_context_stack(current, trace, regs, consume);
95 }
96 EXPORT_SYMBOL_GPL(save_stack_trace_regs);
97
save_stack_trace_tsk(struct task_struct * tsk,struct stack_trace * trace)98 void save_stack_trace_tsk(struct task_struct *tsk,
99 struct stack_trace *trace)
100 {
101 stack_trace_consume_fn consume = consume_entry_nosched;
102
103 WARN_ON(trace->nr_entries || !trace->max_entries);
104
105 save_context_stack(tsk, trace, NULL, consume);
106 }
107 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
108
109 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
110
111 static int __always_inline
__save_stack_trace_reliable(struct stack_trace * trace,struct task_struct * tsk)112 __save_stack_trace_reliable(struct stack_trace *trace,
113 struct task_struct *tsk)
114 {
115 struct unwind_state state;
116 struct pt_regs dummyregs;
117 struct pt_regs *regs = &dummyregs;
118 unsigned long addr;
119
120 if (tsk == current) {
121 regs->csr_era = (unsigned long)__builtin_return_address(0);
122 regs->regs[3] = (unsigned long)__builtin_frame_address(0);
123 } else {
124 regs->csr_era = thread_saved_ra(tsk);
125 regs->regs[3] = thread_saved_fp(tsk);
126 }
127
128 for (unwind_start(&state, tsk, regs);
129 !unwind_done(&state) && !unwind_error(&state);
130 unwind_next_frame(&state)) {
131
132 addr = unwind_get_return_address(&state);
133
134 /*
135 * A NULL or invalid return address probably means there's some
136 * generated code which __kernel_text_address() doesn't know
137 * about.
138 */
139 if (!addr)
140 return -EINVAL;
141
142 if (!consume_entry(trace, addr))
143 return -EINVAL;
144 }
145
146 /* Check for stack corruption */
147 if (unwind_error(&state))
148 return -EINVAL;
149
150 return 0;
151 }
152
153 /*
154 * This function returns an error if it detects any unreliable features of the
155 * stack. Otherwise it guarantees that the stack trace is reliable.
156 *
157 * If the task is not 'current', the caller *must* ensure the task is inactive.
158 */
save_stack_trace_tsk_reliable(struct task_struct * tsk,struct stack_trace * trace)159 int save_stack_trace_tsk_reliable(struct task_struct *tsk,
160 struct stack_trace *trace)
161 {
162 int ret;
163
164 /*
165 * If the task doesn't have a stack (e.g., a zombie), the stack is
166 * "reliably" empty.
167 */
168 if (!try_get_task_stack(tsk))
169 return 0;
170
171 ret = __save_stack_trace_reliable(trace, tsk);
172
173 put_task_stack(tsk);
174
175 return ret;
176 }
177 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
178
179 static int
copy_stack_frame(unsigned long fp,struct stack_frame * frame)180 copy_stack_frame(unsigned long fp, struct stack_frame *frame)
181 {
182 int ret;
183 unsigned long err;
184 unsigned long __user *user_frame_tail;
185
186 user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
187 if (!access_ok(user_frame_tail, sizeof(*frame)))
188 return 0;
189
190 ret = 1;
191 pagefault_disable();
192 err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
193 if (err || (unsigned long)user_frame_tail >= frame->fp)
194 ret = 0;
195 pagefault_enable();
196
197 return ret;
198 }
199
__save_stack_trace_user(struct stack_trace * trace)200 static inline void __save_stack_trace_user(struct stack_trace *trace)
201 {
202 const struct pt_regs *regs = task_pt_regs(current);
203 unsigned long fp = regs->regs[22];
204
205 if (trace->nr_entries < trace->max_entries)
206 trace->entries[trace->nr_entries++] = regs->csr_era;
207
208 while (trace->nr_entries < trace->max_entries && fp && !((unsigned long)fp & 0xf)) {
209 struct stack_frame frame;
210
211 frame.fp = 0;
212 frame.ra = 0;
213 if (!copy_stack_frame(fp, &frame))
214 break;
215 if (!frame.ra)
216 break;
217 trace->entries[trace->nr_entries++] =
218 frame.ra;
219 fp = frame.fp;
220 }
221 }
222
save_stack_trace_user(struct stack_trace * trace)223 void save_stack_trace_user(struct stack_trace *trace)
224 {
225 /*
226 * Trace user stack if we are not a kernel thread
227 */
228 if (current->mm)
229 __save_stack_trace_user(trace);
230
231 if (trace->nr_entries < trace->max_entries)
232 trace->entries[trace->nr_entries++] = ULONG_MAX;
233 }
234