1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Performance counter callchain support - powerpc architecture code
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corporation.
6 */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
12 #include <linux/mm.h>
13 #include <asm/ptrace.h>
14 #include <asm/pgtable.h>
15 #include <asm/sigcontext.h>
16 #include <asm/ucontext.h>
17 #include <asm/vdso.h>
18 #ifdef CONFIG_PPC64
19 #include "../kernel/ppc32.h"
20 #endif
21 #include <asm/pte-walk.h>
22
23
24 /*
25 * Is sp valid as the address of the next kernel stack frame after prev_sp?
26 * The next frame may be in a different stack area but should not go
27 * back down in the same stack area.
28 */
valid_next_sp(unsigned long sp,unsigned long prev_sp)29 static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
30 {
31 if (sp & 0xf)
32 return 0; /* must be 16-byte aligned */
33 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
34 return 0;
35 if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
36 return 1;
37 /*
38 * sp could decrease when we jump off an interrupt stack
39 * back to the regular process stack.
40 */
41 if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
42 return 1;
43 return 0;
44 }
45
46 void
perf_callchain_kernel(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)47 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
48 {
49 unsigned long sp, next_sp;
50 unsigned long next_ip;
51 unsigned long lr;
52 long level = 0;
53 unsigned long *fp;
54
55 lr = regs->link;
56 sp = regs->gpr[1];
57 perf_callchain_store(entry, perf_instruction_pointer(regs));
58
59 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
60 return;
61
62 for (;;) {
63 fp = (unsigned long *) sp;
64 next_sp = fp[0];
65
66 if (next_sp == sp + STACK_INT_FRAME_SIZE &&
67 validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
68 fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
69 /*
70 * This looks like an interrupt frame for an
71 * interrupt that occurred in the kernel
72 */
73 regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
74 next_ip = regs->nip;
75 lr = regs->link;
76 level = 0;
77 perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
78
79 } else {
80 if (level == 0)
81 next_ip = lr;
82 else
83 next_ip = fp[STACK_FRAME_LR_SAVE];
84
85 /*
86 * We can't tell which of the first two addresses
87 * we get are valid, but we can filter out the
88 * obviously bogus ones here. We replace them
89 * with 0 rather than removing them entirely so
90 * that userspace can tell which is which.
91 */
92 if ((level == 1 && next_ip == lr) ||
93 (level <= 1 && !kernel_text_address(next_ip)))
94 next_ip = 0;
95
96 ++level;
97 }
98
99 perf_callchain_store(entry, next_ip);
100 if (!valid_next_sp(next_sp, sp))
101 return;
102 sp = next_sp;
103 }
104 }
105
106 #ifdef CONFIG_PPC64
107 /*
108 * On 64-bit we don't want to invoke hash_page on user addresses from
109 * interrupt context, so if the access faults, we read the page tables
110 * to find which page (if any) is mapped and access it directly.
111 */
read_user_stack_slow(void __user * ptr,void * buf,int nb)112 static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
113 {
114 int ret = -EFAULT;
115 pgd_t *pgdir;
116 pte_t *ptep, pte;
117 unsigned shift;
118 unsigned long addr = (unsigned long) ptr;
119 unsigned long offset;
120 unsigned long pfn, flags;
121 void *kaddr;
122
123 pgdir = current->mm->pgd;
124 if (!pgdir)
125 return -EFAULT;
126
127 local_irq_save(flags);
128 ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
129 if (!ptep)
130 goto err_out;
131 if (!shift)
132 shift = PAGE_SHIFT;
133
134 /* align address to page boundary */
135 offset = addr & ((1UL << shift) - 1);
136
137 pte = READ_ONCE(*ptep);
138 if (!pte_present(pte) || !pte_user(pte))
139 goto err_out;
140 pfn = pte_pfn(pte);
141 if (!page_is_ram(pfn))
142 goto err_out;
143
144 /* no highmem to worry about here */
145 kaddr = pfn_to_kaddr(pfn);
146 memcpy(buf, kaddr + offset, nb);
147 ret = 0;
148 err_out:
149 local_irq_restore(flags);
150 return ret;
151 }
152
read_user_stack_64(unsigned long __user * ptr,unsigned long * ret)153 static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
154 {
155 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
156 ((unsigned long)ptr & 7))
157 return -EFAULT;
158
159 pagefault_disable();
160 if (!__get_user_inatomic(*ret, ptr)) {
161 pagefault_enable();
162 return 0;
163 }
164 pagefault_enable();
165
166 return read_user_stack_slow(ptr, ret, 8);
167 }
168
read_user_stack_32(unsigned int __user * ptr,unsigned int * ret)169 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
170 {
171 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
172 ((unsigned long)ptr & 3))
173 return -EFAULT;
174
175 pagefault_disable();
176 if (!__get_user_inatomic(*ret, ptr)) {
177 pagefault_enable();
178 return 0;
179 }
180 pagefault_enable();
181
182 return read_user_stack_slow(ptr, ret, 4);
183 }
184
valid_user_sp(unsigned long sp,int is_64)185 static inline int valid_user_sp(unsigned long sp, int is_64)
186 {
187 if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
188 return 0;
189 return 1;
190 }
191
192 /*
193 * 64-bit user processes use the same stack frame for RT and non-RT signals.
194 */
195 struct signal_frame_64 {
196 char dummy[__SIGNAL_FRAMESIZE];
197 struct ucontext uc;
198 unsigned long unused[2];
199 unsigned int tramp[6];
200 struct siginfo *pinfo;
201 void *puc;
202 struct siginfo info;
203 char abigap[288];
204 };
205
is_sigreturn_64_address(unsigned long nip,unsigned long fp)206 static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
207 {
208 if (nip == fp + offsetof(struct signal_frame_64, tramp))
209 return 1;
210 if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
211 nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
212 return 1;
213 return 0;
214 }
215
216 /*
217 * Do some sanity checking on the signal frame pointed to by sp.
218 * We check the pinfo and puc pointers in the frame.
219 */
sane_signal_64_frame(unsigned long sp)220 static int sane_signal_64_frame(unsigned long sp)
221 {
222 struct signal_frame_64 __user *sf;
223 unsigned long pinfo, puc;
224
225 sf = (struct signal_frame_64 __user *) sp;
226 if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
227 read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
228 return 0;
229 return pinfo == (unsigned long) &sf->info &&
230 puc == (unsigned long) &sf->uc;
231 }
232
perf_callchain_user_64(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)233 static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
234 struct pt_regs *regs)
235 {
236 unsigned long sp, next_sp;
237 unsigned long next_ip;
238 unsigned long lr;
239 long level = 0;
240 struct signal_frame_64 __user *sigframe;
241 unsigned long __user *fp, *uregs;
242
243 next_ip = perf_instruction_pointer(regs);
244 lr = regs->link;
245 sp = regs->gpr[1];
246 perf_callchain_store(entry, next_ip);
247
248 while (entry->nr < entry->max_stack) {
249 fp = (unsigned long __user *) sp;
250 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
251 return;
252 if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
253 return;
254
255 /*
256 * Note: the next_sp - sp >= signal frame size check
257 * is true when next_sp < sp, which can happen when
258 * transitioning from an alternate signal stack to the
259 * normal stack.
260 */
261 if (next_sp - sp >= sizeof(struct signal_frame_64) &&
262 (is_sigreturn_64_address(next_ip, sp) ||
263 (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
264 sane_signal_64_frame(sp)) {
265 /*
266 * This looks like an signal frame
267 */
268 sigframe = (struct signal_frame_64 __user *) sp;
269 uregs = sigframe->uc.uc_mcontext.gp_regs;
270 if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
271 read_user_stack_64(&uregs[PT_LNK], &lr) ||
272 read_user_stack_64(&uregs[PT_R1], &sp))
273 return;
274 level = 0;
275 perf_callchain_store_context(entry, PERF_CONTEXT_USER);
276 perf_callchain_store(entry, next_ip);
277 continue;
278 }
279
280 if (level == 0)
281 next_ip = lr;
282 perf_callchain_store(entry, next_ip);
283 ++level;
284 sp = next_sp;
285 }
286 }
287
current_is_64bit(void)288 static inline int current_is_64bit(void)
289 {
290 /*
291 * We can't use test_thread_flag() here because we may be on an
292 * interrupt stack, and the thread flags don't get copied over
293 * from the thread_info on the main stack to the interrupt stack.
294 */
295 return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
296 }
297
298 #else /* CONFIG_PPC64 */
299 /*
300 * On 32-bit we just access the address and let hash_page create a
301 * HPTE if necessary, so there is no need to fall back to reading
302 * the page tables. Since this is called at interrupt level,
303 * do_page_fault() won't treat a DSI as a page fault.
304 */
read_user_stack_32(unsigned int __user * ptr,unsigned int * ret)305 static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
306 {
307 int rc;
308
309 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
310 ((unsigned long)ptr & 3))
311 return -EFAULT;
312
313 pagefault_disable();
314 rc = __get_user_inatomic(*ret, ptr);
315 pagefault_enable();
316
317 return rc;
318 }
319
perf_callchain_user_64(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)320 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
321 struct pt_regs *regs)
322 {
323 }
324
current_is_64bit(void)325 static inline int current_is_64bit(void)
326 {
327 return 0;
328 }
329
valid_user_sp(unsigned long sp,int is_64)330 static inline int valid_user_sp(unsigned long sp, int is_64)
331 {
332 if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
333 return 0;
334 return 1;
335 }
336
337 #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
338 #define sigcontext32 sigcontext
339 #define mcontext32 mcontext
340 #define ucontext32 ucontext
341 #define compat_siginfo_t struct siginfo
342
343 #endif /* CONFIG_PPC64 */
344
345 /*
346 * Layout for non-RT signal frames
347 */
348 struct signal_frame_32 {
349 char dummy[__SIGNAL_FRAMESIZE32];
350 struct sigcontext32 sctx;
351 struct mcontext32 mctx;
352 int abigap[56];
353 };
354
355 /*
356 * Layout for RT signal frames
357 */
358 struct rt_signal_frame_32 {
359 char dummy[__SIGNAL_FRAMESIZE32 + 16];
360 compat_siginfo_t info;
361 struct ucontext32 uc;
362 int abigap[56];
363 };
364
is_sigreturn_32_address(unsigned int nip,unsigned int fp)365 static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
366 {
367 if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
368 return 1;
369 if (vdso32_sigtramp && current->mm->context.vdso_base &&
370 nip == current->mm->context.vdso_base + vdso32_sigtramp)
371 return 1;
372 return 0;
373 }
374
is_rt_sigreturn_32_address(unsigned int nip,unsigned int fp)375 static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
376 {
377 if (nip == fp + offsetof(struct rt_signal_frame_32,
378 uc.uc_mcontext.mc_pad))
379 return 1;
380 if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
381 nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
382 return 1;
383 return 0;
384 }
385
sane_signal_32_frame(unsigned int sp)386 static int sane_signal_32_frame(unsigned int sp)
387 {
388 struct signal_frame_32 __user *sf;
389 unsigned int regs;
390
391 sf = (struct signal_frame_32 __user *) (unsigned long) sp;
392 if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
393 return 0;
394 return regs == (unsigned long) &sf->mctx;
395 }
396
sane_rt_signal_32_frame(unsigned int sp)397 static int sane_rt_signal_32_frame(unsigned int sp)
398 {
399 struct rt_signal_frame_32 __user *sf;
400 unsigned int regs;
401
402 sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
403 if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
404 return 0;
405 return regs == (unsigned long) &sf->uc.uc_mcontext;
406 }
407
signal_frame_32_regs(unsigned int sp,unsigned int next_sp,unsigned int next_ip)408 static unsigned int __user *signal_frame_32_regs(unsigned int sp,
409 unsigned int next_sp, unsigned int next_ip)
410 {
411 struct mcontext32 __user *mctx = NULL;
412 struct signal_frame_32 __user *sf;
413 struct rt_signal_frame_32 __user *rt_sf;
414
415 /*
416 * Note: the next_sp - sp >= signal frame size check
417 * is true when next_sp < sp, for example, when
418 * transitioning from an alternate signal stack to the
419 * normal stack.
420 */
421 if (next_sp - sp >= sizeof(struct signal_frame_32) &&
422 is_sigreturn_32_address(next_ip, sp) &&
423 sane_signal_32_frame(sp)) {
424 sf = (struct signal_frame_32 __user *) (unsigned long) sp;
425 mctx = &sf->mctx;
426 }
427
428 if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
429 is_rt_sigreturn_32_address(next_ip, sp) &&
430 sane_rt_signal_32_frame(sp)) {
431 rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
432 mctx = &rt_sf->uc.uc_mcontext;
433 }
434
435 if (!mctx)
436 return NULL;
437 return mctx->mc_gregs;
438 }
439
perf_callchain_user_32(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)440 static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
441 struct pt_regs *regs)
442 {
443 unsigned int sp, next_sp;
444 unsigned int next_ip;
445 unsigned int lr;
446 long level = 0;
447 unsigned int __user *fp, *uregs;
448
449 next_ip = perf_instruction_pointer(regs);
450 lr = regs->link;
451 sp = regs->gpr[1];
452 perf_callchain_store(entry, next_ip);
453
454 while (entry->nr < entry->max_stack) {
455 fp = (unsigned int __user *) (unsigned long) sp;
456 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
457 return;
458 if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
459 return;
460
461 uregs = signal_frame_32_regs(sp, next_sp, next_ip);
462 if (!uregs && level <= 1)
463 uregs = signal_frame_32_regs(sp, next_sp, lr);
464 if (uregs) {
465 /*
466 * This looks like an signal frame, so restart
467 * the stack trace with the values in it.
468 */
469 if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
470 read_user_stack_32(&uregs[PT_LNK], &lr) ||
471 read_user_stack_32(&uregs[PT_R1], &sp))
472 return;
473 level = 0;
474 perf_callchain_store_context(entry, PERF_CONTEXT_USER);
475 perf_callchain_store(entry, next_ip);
476 continue;
477 }
478
479 if (level == 0)
480 next_ip = lr;
481 perf_callchain_store(entry, next_ip);
482 ++level;
483 sp = next_sp;
484 }
485 }
486
487 void
perf_callchain_user(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)488 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
489 {
490 if (current_is_64bit())
491 perf_callchain_user_64(entry, regs);
492 else
493 perf_callchain_user_32(entry, regs);
494 }
495