1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Common arm64 stack unwinder code.
4 *
5 * To implement a new arm64 stack unwinder:
6 * 1) Include this header
7 *
8 * 2) Call into unwind_next_common() from your top level unwind
9 * function, passing it the validation and translation callbacks
10 * (though the later can be NULL if no translation is required).
11 *
12 * See: arch/arm64/kernel/stacktrace.c for the reference implementation.
13 *
14 * Copyright (C) 2012 ARM Ltd.
15 */
16 #ifndef __ASM_STACKTRACE_COMMON_H
17 #define __ASM_STACKTRACE_COMMON_H
18
19 #include <linux/bitmap.h>
20 #include <linux/bitops.h>
21 #include <linux/kprobes.h>
22 #include <linux/types.h>
23
24 enum stack_type {
25 STACK_TYPE_UNKNOWN,
26 STACK_TYPE_TASK,
27 STACK_TYPE_IRQ,
28 STACK_TYPE_OVERFLOW,
29 STACK_TYPE_SDEI_NORMAL,
30 STACK_TYPE_SDEI_CRITICAL,
31 STACK_TYPE_HYP,
32 __NR_STACK_TYPES
33 };
34
35 struct stack_info {
36 unsigned long low;
37 unsigned long high;
38 enum stack_type type;
39 };
40
41 /*
42 * A snapshot of a frame record or fp/lr register values, along with some
43 * accounting information necessary for robust unwinding.
44 *
45 * @fp: The fp value in the frame record (or the real fp)
46 * @pc: The lr value in the frame record (or the real lr)
47 *
48 * @stacks_done: Stacks which have been entirely unwound, for which it is no
49 * longer valid to unwind to.
50 *
51 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
52 * of 0. This is used to ensure that within a stack, each
53 * subsequent frame record is at an increasing address.
54 * @prev_type: The type of stack this frame record was on, or a synthetic
55 * value of STACK_TYPE_UNKNOWN. This is used to detect a
56 * transition from one stack to another.
57 *
58 * @task: The task being unwound.
59 */
60 struct unwind_state {
61 unsigned long fp;
62 unsigned long pc;
63 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
64 unsigned long prev_fp;
65 enum stack_type prev_type;
66 struct task_struct *task;
67 };
68
on_stack(unsigned long sp,unsigned long size,unsigned long low,unsigned long high,enum stack_type type,struct stack_info * info)69 static inline bool on_stack(unsigned long sp, unsigned long size,
70 unsigned long low, unsigned long high,
71 enum stack_type type, struct stack_info *info)
72 {
73 if (!low)
74 return false;
75
76 if (sp < low || sp + size < sp || sp + size > high)
77 return false;
78
79 if (info) {
80 info->low = low;
81 info->high = high;
82 info->type = type;
83 }
84 return true;
85 }
86
unwind_init_common(struct unwind_state * state,struct task_struct * task)87 static inline void unwind_init_common(struct unwind_state *state,
88 struct task_struct *task)
89 {
90 state->task = task;
91
92 /*
93 * Prime the first unwind.
94 *
95 * In unwind_next() we'll check that the FP points to a valid stack,
96 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
97 * treated as a transition to whichever stack that happens to be. The
98 * prev_fp value won't be used, but we set it to 0 such that it is
99 * definitely not an accessible stack address.
100 */
101 bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
102 state->prev_fp = 0;
103 state->prev_type = STACK_TYPE_UNKNOWN;
104 }
105
106 /*
107 * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to
108 * a kernel address.
109 *
110 * @fp: the frame pointer to be updated to its kernel address.
111 * @type: the stack type associated with frame pointer @fp
112 *
113 * Returns true and success and @fp is updated to the corresponding
114 * kernel virtual address; otherwise returns false.
115 */
116 typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
117 enum stack_type type);
118
119 /*
120 * on_accessible_stack_fn() - Check whether a stack range is on any
121 * of the possible stacks.
122 *
123 * @tsk: task whose stack is being unwound
124 * @sp: stack address being checked
125 * @size: size of the stack range being checked
126 * @info: stack unwinding context
127 */
128 typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk,
129 unsigned long sp, unsigned long size,
130 struct stack_info *info);
131
unwind_next_common(struct unwind_state * state,struct stack_info * info,on_accessible_stack_fn accessible,stack_trace_translate_fp_fn translate_fp)132 static inline int unwind_next_common(struct unwind_state *state,
133 struct stack_info *info,
134 on_accessible_stack_fn accessible,
135 stack_trace_translate_fp_fn translate_fp)
136 {
137 unsigned long fp = state->fp, kern_fp = fp;
138 struct task_struct *tsk = state->task;
139
140 if (fp & 0x7)
141 return -EINVAL;
142
143 if (!accessible(tsk, fp, 16, info))
144 return -EINVAL;
145
146 if (test_bit(info->type, state->stacks_done))
147 return -EINVAL;
148
149 /*
150 * If fp is not from the current address space perform the necessary
151 * translation before dereferencing it to get the next fp.
152 */
153 if (translate_fp && !translate_fp(&kern_fp, info->type))
154 return -EINVAL;
155
156 /*
157 * As stacks grow downward, any valid record on the same stack must be
158 * at a strictly higher address than the prior record.
159 *
160 * Stacks can nest in several valid orders, e.g.
161 *
162 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
163 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
164 * HYP -> OVERFLOW
165 *
166 * ... but the nesting itself is strict. Once we transition from one
167 * stack to another, it's never valid to unwind back to that first
168 * stack.
169 */
170 if (info->type == state->prev_type) {
171 if (fp <= state->prev_fp)
172 return -EINVAL;
173 } else {
174 __set_bit(state->prev_type, state->stacks_done);
175 }
176
177 /*
178 * Record this frame record's values and location. The prev_fp and
179 * prev_type are only meaningful to the next unwind_next() invocation.
180 */
181 state->fp = READ_ONCE(*(unsigned long *)(kern_fp));
182 state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8));
183 state->prev_fp = fp;
184 state->prev_type = info->type;
185
186 return 0;
187 }
188
189 #endif /* __ASM_STACKTRACE_COMMON_H */
190