• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/pfn.h>
20 #include <linux/kallsyms.h>
21 #include <linux/stacktrace.h>
22 #include <linux/uaccess.h>
23 #include <linux/mmzone.h>
24 #include <linux/dcache.h>
25 #include <linux/fs.h>
26 #include <linux/hardirq.h>
27 #include <linux/string.h>
28 #include <asm/backtrace.h>
29 #include <asm/page.h>
30 #include <asm/ucontext.h>
31 #include <asm/switch_to.h>
32 #include <asm/sigframe.h>
33 #include <asm/stack.h>
34 #include <asm/vdso.h>
35 #include <arch/abi.h>
36 #include <arch/interrupts.h>
37 
38 #define KBT_ONGOING	0  /* Backtrace still ongoing */
39 #define KBT_DONE	1  /* Backtrace cleanly completed */
40 #define KBT_RUNNING	2  /* Can't run backtrace on a running task */
41 #define KBT_LOOP	3  /* Backtrace entered a loop */
42 
43 /* Is address on the specified kernel stack? */
in_kernel_stack(struct KBacktraceIterator * kbt,unsigned long sp)44 static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
45 {
46 	ulong kstack_base = (ulong) kbt->task->stack;
47 	if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */
48 		return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
49 	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
50 }
51 
52 /* Callback for backtracer; basically a glorified memcpy */
read_memory_func(void * result,unsigned long address,unsigned int size,void * vkbt)53 static bool read_memory_func(void *result, unsigned long address,
54 			     unsigned int size, void *vkbt)
55 {
56 	int retval;
57 	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
58 
59 	if (address == 0)
60 		return 0;
61 	if (__kernel_text_address(address)) {
62 		/* OK to read kernel code. */
63 	} else if (address >= PAGE_OFFSET) {
64 		/* We only tolerate kernel-space reads of this task's stack */
65 		if (!in_kernel_stack(kbt, address))
66 			return 0;
67 	} else if (!kbt->is_current) {
68 		return 0;	/* can't read from other user address spaces */
69 	}
70 	pagefault_disable();
71 	retval = __copy_from_user_inatomic(result,
72 					   (void __user __force *)address,
73 					   size);
74 	pagefault_enable();
75 	return (retval == 0);
76 }
77 
78 /* Return a pt_regs pointer for a valid fault handler frame */
valid_fault_handler(struct KBacktraceIterator * kbt)79 static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
80 {
81 	char fault[64];
82 	unsigned long sp = kbt->it.sp;
83 	struct pt_regs *p;
84 
85 	if (sp % sizeof(long) != 0)
86 		return NULL;
87 	if (!in_kernel_stack(kbt, sp))
88 		return NULL;
89 	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
90 		return NULL;
91 	p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
92 	if (kbt->verbose) {     /* else we aren't going to use it */
93 		if (p->faultnum == INT_SWINT_1 ||
94 		    p->faultnum == INT_SWINT_1_SIGRETURN)
95 			snprintf(fault, sizeof(fault),
96 				 "syscall %ld", p->regs[TREG_SYSCALL_NR]);
97 		else
98 			snprintf(fault, sizeof(fault),
99 				 "interrupt %ld", p->faultnum);
100 	}
101 	if (EX1_PL(p->ex1) == KERNEL_PL &&
102 	    __kernel_text_address(p->pc) &&
103 	    in_kernel_stack(kbt, p->sp) &&
104 	    p->sp >= sp) {
105 		if (kbt->verbose)
106 			pr_err("  <%s while in kernel mode>\n", fault);
107 	} else if (user_mode(p) &&
108 		   p->sp < PAGE_OFFSET && p->sp != 0) {
109 		if (kbt->verbose)
110 			pr_err("  <%s while in user mode>\n", fault);
111 	} else {
112 		if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
113 			pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
114 			       p->pc, p->sp, p->ex1);
115 		return NULL;
116 	}
117 	if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
118 		return NULL;
119 	return p;
120 }
121 
122 /* Is the iterator pointing to a sigreturn trampoline? */
is_sigreturn(struct KBacktraceIterator * kbt)123 static int is_sigreturn(struct KBacktraceIterator *kbt)
124 {
125 	return kbt->task->mm &&
126 		(kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
127 				(ulong)&__vdso_rt_sigreturn));
128 }
129 
130 /* Return a pt_regs pointer for a valid signal handler frame */
valid_sigframe(struct KBacktraceIterator * kbt,struct rt_sigframe * kframe)131 static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
132 				      struct rt_sigframe* kframe)
133 {
134 	BacktraceIterator *b = &kbt->it;
135 
136 	if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
137 	    b->sp % sizeof(long) == 0) {
138 		int retval;
139 		pagefault_disable();
140 		retval = __copy_from_user_inatomic(
141 			kframe, (void __user __force *)b->sp,
142 			sizeof(*kframe));
143 		pagefault_enable();
144 		if (retval != 0 ||
145 		    (unsigned int)(kframe->info.si_signo) >= _NSIG)
146 			return NULL;
147 		if (kbt->verbose) {
148 			pr_err("  <received signal %d>\n",
149 			       kframe->info.si_signo);
150 		}
151 		return (struct pt_regs *)&kframe->uc.uc_mcontext;
152 	}
153 	return NULL;
154 }
155 
KBacktraceIterator_restart(struct KBacktraceIterator * kbt)156 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
157 {
158 	struct pt_regs *p;
159 	struct rt_sigframe kframe;
160 
161 	p = valid_fault_handler(kbt);
162 	if (p == NULL)
163 		p = valid_sigframe(kbt, &kframe);
164 	if (p == NULL)
165 		return 0;
166 	backtrace_init(&kbt->it, read_memory_func, kbt,
167 		       p->pc, p->lr, p->sp, p->regs[52]);
168 	kbt->new_context = 1;
169 	return 1;
170 }
171 
172 /* Find a frame that isn't a sigreturn, if there is one. */
KBacktraceIterator_next_item_inclusive(struct KBacktraceIterator * kbt)173 static int KBacktraceIterator_next_item_inclusive(
174 	struct KBacktraceIterator *kbt)
175 {
176 	for (;;) {
177 		do {
178 			if (!is_sigreturn(kbt))
179 				return KBT_ONGOING;
180 		} while (backtrace_next(&kbt->it));
181 
182 		if (!KBacktraceIterator_restart(kbt))
183 			return KBT_DONE;
184 	}
185 }
186 
187 /*
188  * If the current sp is on a page different than what we recorded
189  * as the top-of-kernel-stack last time we context switched, we have
190  * probably blown the stack, and nothing is going to work out well.
191  * If we can at least get out a warning, that may help the debug,
192  * though we probably won't be able to backtrace into the code that
193  * actually did the recursive damage.
194  */
validate_stack(struct pt_regs * regs)195 static void validate_stack(struct pt_regs *regs)
196 {
197 	int cpu = raw_smp_processor_id();
198 	unsigned long ksp0 = get_current_ksp0();
199 	unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
200 	unsigned long sp = stack_pointer;
201 
202 	if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
203 		pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
204 		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
205 		       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
206 	}
207 
208 	else if (sp < ksp0_base + sizeof(struct thread_info)) {
209 		pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
210 		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
211 		       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
212 	}
213 }
214 
KBacktraceIterator_init(struct KBacktraceIterator * kbt,struct task_struct * t,struct pt_regs * regs)215 void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
216 			     struct task_struct *t, struct pt_regs *regs)
217 {
218 	unsigned long pc, lr, sp, r52;
219 	int is_current;
220 
221 	/*
222 	 * Set up callback information.  We grab the kernel stack base
223 	 * so we will allow reads of that address range.
224 	 */
225 	is_current = (t == NULL || t == current);
226 	kbt->is_current = is_current;
227 	if (is_current)
228 		t = validate_current();
229 	kbt->task = t;
230 	kbt->verbose = 0;   /* override in caller if desired */
231 	kbt->profile = 0;   /* override in caller if desired */
232 	kbt->end = KBT_ONGOING;
233 	kbt->new_context = 1;
234 	if (is_current)
235 		validate_stack(regs);
236 
237 	if (regs == NULL) {
238 		if (is_current || t->state == TASK_RUNNING) {
239 			/* Can't do this; we need registers */
240 			kbt->end = KBT_RUNNING;
241 			return;
242 		}
243 		pc = get_switch_to_pc();
244 		lr = t->thread.pc;
245 		sp = t->thread.ksp;
246 		r52 = 0;
247 	} else {
248 		pc = regs->pc;
249 		lr = regs->lr;
250 		sp = regs->sp;
251 		r52 = regs->regs[52];
252 	}
253 
254 	backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
255 	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
256 }
257 EXPORT_SYMBOL(KBacktraceIterator_init);
258 
KBacktraceIterator_end(struct KBacktraceIterator * kbt)259 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
260 {
261 	return kbt->end != KBT_ONGOING;
262 }
263 EXPORT_SYMBOL(KBacktraceIterator_end);
264 
KBacktraceIterator_next(struct KBacktraceIterator * kbt)265 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
266 {
267 	unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
268 	kbt->new_context = 0;
269 	if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
270 		kbt->end = KBT_DONE;
271 		return;
272 	}
273 	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
274 	if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
275 		/* Trapped in a loop; give up. */
276 		kbt->end = KBT_LOOP;
277 	}
278 }
279 EXPORT_SYMBOL(KBacktraceIterator_next);
280 
describe_addr(struct KBacktraceIterator * kbt,unsigned long address,int have_mmap_sem,char * buf,size_t bufsize)281 static void describe_addr(struct KBacktraceIterator *kbt,
282 			  unsigned long address,
283 			  int have_mmap_sem, char *buf, size_t bufsize)
284 {
285 	struct vm_area_struct *vma;
286 	size_t namelen, remaining;
287 	unsigned long size, offset, adjust;
288 	char *p, *modname;
289 	const char *name;
290 	int rc;
291 
292 	/*
293 	 * Look one byte back for every caller frame (i.e. those that
294 	 * aren't a new context) so we look up symbol data for the
295 	 * call itself, not the following instruction, which may be on
296 	 * a different line (or in a different function).
297 	 */
298 	adjust = !kbt->new_context;
299 	address -= adjust;
300 
301 	if (address >= PAGE_OFFSET) {
302 		/* Handle kernel symbols. */
303 		BUG_ON(bufsize < KSYM_NAME_LEN);
304 		name = kallsyms_lookup(address, &size, &offset,
305 				       &modname, buf);
306 		if (name == NULL) {
307 			buf[0] = '\0';
308 			return;
309 		}
310 		namelen = strlen(buf);
311 		remaining = (bufsize - 1) - namelen;
312 		p = buf + namelen;
313 		rc = snprintf(p, remaining, "+%#lx/%#lx ",
314 			      offset + adjust, size);
315 		if (modname && rc < remaining)
316 			snprintf(p + rc, remaining - rc, "[%s] ", modname);
317 		buf[bufsize-1] = '\0';
318 		return;
319 	}
320 
321 	/* If we don't have the mmap_sem, we can't show any more info. */
322 	buf[0] = '\0';
323 	if (!have_mmap_sem)
324 		return;
325 
326 	/* Find vma info. */
327 	vma = find_vma(kbt->task->mm, address);
328 	if (vma == NULL || address < vma->vm_start) {
329 		snprintf(buf, bufsize, "[unmapped address] ");
330 		return;
331 	}
332 
333 	if (vma->vm_file) {
334 		p = file_path(vma->vm_file, buf, bufsize);
335 		if (IS_ERR(p))
336 			p = "?";
337 		name = kbasename(p);
338 	} else {
339 		name = "anon";
340 	}
341 
342 	/* Generate a string description of the vma info. */
343 	namelen = strlen(name);
344 	remaining = (bufsize - 1) - namelen;
345 	memmove(buf, name, namelen);
346 	snprintf(buf + namelen, remaining, "[%lx+%lx] ",
347 		 vma->vm_start, vma->vm_end - vma->vm_start);
348 }
349 
350 /*
351  * Avoid possible crash recursion during backtrace.  If it happens, it
352  * makes it easy to lose the actual root cause of the failure, so we
353  * put a simple guard on all the backtrace loops.
354  */
start_backtrace(void)355 static bool start_backtrace(void)
356 {
357 	if (current_thread_info()->in_backtrace) {
358 		pr_err("Backtrace requested while in backtrace!\n");
359 		return false;
360 	}
361 	current_thread_info()->in_backtrace = true;
362 	return true;
363 }
364 
end_backtrace(void)365 static void end_backtrace(void)
366 {
367 	current_thread_info()->in_backtrace = false;
368 }
369 
370 /*
371  * This method wraps the backtracer's more generic support.
372  * It is only invoked from the architecture-specific code; show_stack()
373  * and dump_stack() are architecture-independent entry points.
374  */
tile_show_stack(struct KBacktraceIterator * kbt)375 void tile_show_stack(struct KBacktraceIterator *kbt)
376 {
377 	int i;
378 	int have_mmap_sem = 0;
379 
380 	if (!start_backtrace())
381 		return;
382 	kbt->verbose = 1;
383 	i = 0;
384 	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
385 		char namebuf[KSYM_NAME_LEN+100];
386 		unsigned long address = kbt->it.pc;
387 
388 		/*
389 		 * Try to acquire the mmap_sem as we pass into userspace.
390 		 * If we're in an interrupt context, don't even try, since
391 		 * it's not safe to call e.g. d_path() from an interrupt,
392 		 * since it uses spin locks without disabling interrupts.
393 		 * Note we test "kbt->task == current", not "kbt->is_current",
394 		 * since we're checking that "current" will work in d_path().
395 		 */
396 		if (kbt->task == current && address < PAGE_OFFSET &&
397 		    !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
398 			have_mmap_sem =
399 				down_read_trylock(&kbt->task->mm->mmap_sem);
400 		}
401 
402 		describe_addr(kbt, address, have_mmap_sem,
403 			      namebuf, sizeof(namebuf));
404 
405 		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
406 		       i++, address, namebuf, (unsigned long)(kbt->it.sp));
407 
408 		if (i >= 100) {
409 			pr_err("Stack dump truncated (%d frames)\n", i);
410 			break;
411 		}
412 	}
413 	if (kbt->end == KBT_LOOP)
414 		pr_err("Stack dump stopped; next frame identical to this one\n");
415 	if (have_mmap_sem)
416 		up_read(&kbt->task->mm->mmap_sem);
417 	end_backtrace();
418 }
419 EXPORT_SYMBOL(tile_show_stack);
420 
regs_to_pt_regs(struct pt_regs * regs,ulong pc,ulong lr,ulong sp,ulong r52)421 static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
422 				       ulong pc, ulong lr, ulong sp, ulong r52)
423 {
424 	memset(regs, 0, sizeof(struct pt_regs));
425 	regs->pc = pc;
426 	regs->lr = lr;
427 	regs->sp = sp;
428 	regs->regs[52] = r52;
429 	return regs;
430 }
431 
432 /* Deprecated function currently only used by kernel_double_fault(). */
_dump_stack(int dummy,ulong pc,ulong lr,ulong sp,ulong r52)433 void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
434 {
435 	struct KBacktraceIterator kbt;
436 	struct pt_regs regs;
437 
438 	regs_to_pt_regs(&regs, pc, lr, sp, r52);
439 	KBacktraceIterator_init(&kbt, NULL, &regs);
440 	tile_show_stack(&kbt);
441 }
442 
443 /* This is called from KBacktraceIterator_init_current() */
_KBacktraceIterator_init_current(struct KBacktraceIterator * kbt,ulong pc,ulong lr,ulong sp,ulong r52)444 void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
445 				      ulong lr, ulong sp, ulong r52)
446 {
447 	struct pt_regs regs;
448 	KBacktraceIterator_init(kbt, NULL,
449 				regs_to_pt_regs(&regs, pc, lr, sp, r52));
450 }
451 
452 /*
453  * Called from sched_show_task() with task != NULL, or dump_stack()
454  * with task == NULL.  The esp argument is always NULL.
455  */
show_stack(struct task_struct * task,unsigned long * esp)456 void show_stack(struct task_struct *task, unsigned long *esp)
457 {
458 	struct KBacktraceIterator kbt;
459 	if (task == NULL || task == current) {
460 		KBacktraceIterator_init_current(&kbt);
461 		KBacktraceIterator_next(&kbt);  /* don't show first frame */
462 	} else {
463 		KBacktraceIterator_init(&kbt, task, NULL);
464 	}
465 	tile_show_stack(&kbt);
466 }
467 
468 #ifdef CONFIG_STACKTRACE
469 
470 /* Support generic Linux stack API too */
471 
save_stack_trace_common(struct task_struct * task,struct pt_regs * regs,bool user,struct stack_trace * trace)472 static void save_stack_trace_common(struct task_struct *task,
473 				    struct pt_regs *regs,
474 				    bool user,
475 				    struct stack_trace *trace)
476 {
477 	struct KBacktraceIterator kbt;
478 	int skip = trace->skip;
479 	int i = 0;
480 
481 	if (!start_backtrace())
482 		goto done;
483 	if (regs != NULL) {
484 		KBacktraceIterator_init(&kbt, NULL, regs);
485 	} else if (task == NULL || task == current) {
486 		KBacktraceIterator_init_current(&kbt);
487 		skip++;  /* don't show KBacktraceIterator_init_current */
488 	} else {
489 		KBacktraceIterator_init(&kbt, task, NULL);
490 	}
491 	for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
492 		if (skip) {
493 			--skip;
494 			continue;
495 		}
496 		if (i >= trace->max_entries ||
497 		    (!user && kbt.it.pc < PAGE_OFFSET))
498 			break;
499 		trace->entries[i++] = kbt.it.pc;
500 	}
501 	end_backtrace();
502 done:
503 	if (i < trace->max_entries)
504 		trace->entries[i++] = ULONG_MAX;
505 	trace->nr_entries = i;
506 }
507 
save_stack_trace_tsk(struct task_struct * task,struct stack_trace * trace)508 void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
509 {
510 	save_stack_trace_common(task, NULL, false, trace);
511 }
512 EXPORT_SYMBOL(save_stack_trace_tsk);
513 
save_stack_trace(struct stack_trace * trace)514 void save_stack_trace(struct stack_trace *trace)
515 {
516 	save_stack_trace_common(NULL, NULL, false, trace);
517 }
518 EXPORT_SYMBOL_GPL(save_stack_trace);
519 
save_stack_trace_regs(struct pt_regs * regs,struct stack_trace * trace)520 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
521 {
522 	save_stack_trace_common(NULL, regs, false, trace);
523 }
524 
save_stack_trace_user(struct stack_trace * trace)525 void save_stack_trace_user(struct stack_trace *trace)
526 {
527 	/* Trace user stack if we are not a kernel thread. */
528 	if (current->mm)
529 		save_stack_trace_common(NULL, task_pt_regs(current),
530 					true, trace);
531 	else if (trace->nr_entries < trace->max_entries)
532 		trace->entries[trace->nr_entries++] = ULONG_MAX;
533 }
534 #endif
535 
536 /* In entry.S */
537 EXPORT_SYMBOL(KBacktraceIterator_init_current);
538