1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arm64 callchain support
4 *
5 * Copyright (C) 2015 ARM Limited
6 */
7 #include <linux/perf_event.h>
8 #include <linux/uaccess.h>
9
10 #include <asm/pointer_auth.h>
11 #include <asm/stacktrace.h>
12
13 struct frame_tail {
14 struct frame_tail __user *fp;
15 unsigned long lr;
16 } __attribute__((packed));
17
18 /*
19 * Get the return address for a single stackframe and return a pointer to the
20 * next frame tail.
21 */
22 static struct frame_tail __user *
user_backtrace(struct frame_tail __user * tail,struct perf_callchain_entry_ctx * entry)23 user_backtrace(struct frame_tail __user *tail,
24 struct perf_callchain_entry_ctx *entry)
25 {
26 struct frame_tail buftail;
27 unsigned long err;
28 unsigned long lr;
29
30 /* Also check accessibility of one struct frame_tail beyond */
31 if (!access_ok(tail, sizeof(buftail)))
32 return NULL;
33
34 pagefault_disable();
35 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
36 pagefault_enable();
37
38 if (err)
39 return NULL;
40
41 lr = ptrauth_strip_insn_pac(buftail.lr);
42
43 perf_callchain_store(entry, lr);
44
45 /*
46 * Frame pointers should strictly progress back up the stack
47 * (towards higher addresses).
48 */
49 if (tail >= buftail.fp)
50 return NULL;
51
52 return buftail.fp;
53 }
54
55 #ifdef CONFIG_COMPAT
56 /*
57 * The registers we're interested in are at the end of the variable
58 * length saved register structure. The fp points at the end of this
59 * structure so the address of this struct is:
60 * (struct compat_frame_tail *)(xxx->fp)-1
61 *
62 * This code has been adapted from the ARM OProfile support.
63 */
64 struct compat_frame_tail {
65 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
66 u32 sp;
67 u32 lr;
68 } __attribute__((packed));
69
70 static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user * tail,struct perf_callchain_entry_ctx * entry)71 compat_user_backtrace(struct compat_frame_tail __user *tail,
72 struct perf_callchain_entry_ctx *entry)
73 {
74 struct compat_frame_tail buftail;
75 unsigned long err;
76
77 /* Also check accessibility of one struct frame_tail beyond */
78 if (!access_ok(tail, sizeof(buftail)))
79 return NULL;
80
81 pagefault_disable();
82 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
83 pagefault_enable();
84
85 if (err)
86 return NULL;
87
88 perf_callchain_store(entry, buftail.lr);
89
90 /*
91 * Frame pointers should strictly progress back up the stack
92 * (towards higher addresses).
93 */
94 if (tail + 1 >= (struct compat_frame_tail __user *)
95 compat_ptr(buftail.fp))
96 return NULL;
97
98 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
99 }
100 #endif /* CONFIG_COMPAT */
101
perf_callchain_user(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)102 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
103 struct pt_regs *regs)
104 {
105 struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
106
107 if (guest_cbs && guest_cbs->is_in_guest()) {
108 /* We don't support guest os callchain now */
109 return;
110 }
111
112 perf_callchain_store(entry, regs->pc);
113
114 if (!compat_user_mode(regs)) {
115 /* AARCH64 mode */
116 struct frame_tail __user *tail;
117
118 tail = (struct frame_tail __user *)regs->regs[29];
119
120 while (entry->nr < entry->max_stack &&
121 tail && !((unsigned long)tail & 0x7))
122 tail = user_backtrace(tail, entry);
123 } else {
124 #ifdef CONFIG_COMPAT
125 /* AARCH32 compat mode */
126 struct compat_frame_tail __user *tail;
127
128 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
129
130 while ((entry->nr < entry->max_stack) &&
131 tail && !((unsigned long)tail & 0x3))
132 tail = compat_user_backtrace(tail, entry);
133 #endif
134 }
135 }
136
137 /*
138 * Gets called by walk_stackframe() for every stackframe. This will be called
139 * whist unwinding the stackframe and is like a subroutine return so we use
140 * the PC.
141 */
callchain_trace(void * data,unsigned long pc)142 static bool callchain_trace(void *data, unsigned long pc)
143 {
144 struct perf_callchain_entry_ctx *entry = data;
145 perf_callchain_store(entry, pc);
146 return true;
147 }
148
perf_callchain_kernel(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)149 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
150 struct pt_regs *regs)
151 {
152 struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
153 struct stackframe frame;
154
155 if (guest_cbs && guest_cbs->is_in_guest()) {
156 /* We don't support guest os callchain now */
157 return;
158 }
159
160 start_backtrace(&frame, regs->regs[29], regs->pc);
161 walk_stackframe(current, &frame, callchain_trace, entry);
162 }
163
perf_instruction_pointer(struct pt_regs * regs)164 unsigned long perf_instruction_pointer(struct pt_regs *regs)
165 {
166 struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
167
168 if (guest_cbs && guest_cbs->is_in_guest())
169 return guest_cbs->get_guest_ip();
170
171 return instruction_pointer(regs);
172 }
173
perf_misc_flags(struct pt_regs * regs)174 unsigned long perf_misc_flags(struct pt_regs *regs)
175 {
176 struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
177 int misc = 0;
178
179 if (guest_cbs && guest_cbs->is_in_guest()) {
180 if (guest_cbs->is_user_mode())
181 misc |= PERF_RECORD_MISC_GUEST_USER;
182 else
183 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
184 } else {
185 if (user_mode(regs))
186 misc |= PERF_RECORD_MISC_USER;
187 else
188 misc |= PERF_RECORD_MISC_KERNEL;
189 }
190
191 return misc;
192 }
193