• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Kernel unwinding support
3  *
4  * (c) 2002-2004 Randolph Chung <tausq@debian.org>
5  *
6  * Derived partially from the IA64 implementation. The PA-RISC
7  * Runtime Architecture Document is also a useful reference to
8  * understand what is happening here
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sort.h>
17 
18 #include <asm/uaccess.h>
19 #include <asm/assembly.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/ptrace.h>
22 
23 #include <asm/unwind.h>
24 
25 /* #define DEBUG 1 */
26 #ifdef DEBUG
27 #define dbg(x...) printk(x)
28 #else
29 #define dbg(x...)
30 #endif
31 
32 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
33 
34 extern struct unwind_table_entry __start___unwind[];
35 extern struct unwind_table_entry __stop___unwind[];
36 
37 static spinlock_t unwind_lock;
38 /*
39  * the kernel unwind block is not dynamically allocated so that
40  * we can call unwind_init as early in the bootup process as
41  * possible (before the slab allocator is initialized)
42  */
43 static struct unwind_table kernel_unwind_table __read_mostly;
44 static LIST_HEAD(unwind_tables);
45 
46 static inline const struct unwind_table_entry *
find_unwind_entry_in_table(const struct unwind_table * table,unsigned long addr)47 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
48 {
49 	const struct unwind_table_entry *e = NULL;
50 	unsigned long lo, hi, mid;
51 
52 	lo = 0;
53 	hi = table->length - 1;
54 
55 	while (lo <= hi) {
56 		mid = (hi - lo) / 2 + lo;
57 		e = &table->table[mid];
58 		if (addr < e->region_start)
59 			hi = mid - 1;
60 		else if (addr > e->region_end)
61 			lo = mid + 1;
62 		else
63 			return e;
64 	}
65 
66 	return NULL;
67 }
68 
69 static const struct unwind_table_entry *
find_unwind_entry(unsigned long addr)70 find_unwind_entry(unsigned long addr)
71 {
72 	struct unwind_table *table;
73 	const struct unwind_table_entry *e = NULL;
74 
75 	if (addr >= kernel_unwind_table.start &&
76 	    addr <= kernel_unwind_table.end)
77 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 	else
79 		list_for_each_entry(table, &unwind_tables, list) {
80 			if (addr >= table->start &&
81 			    addr <= table->end)
82 				e = find_unwind_entry_in_table(table, addr);
83 			if (e) {
84 				/* Move-to-front to exploit common traces */
85 				list_move(&table->list, &unwind_tables);
86 				break;
87 			}
88 		}
89 
90 	return e;
91 }
92 
93 static void
unwind_table_init(struct unwind_table * table,const char * name,unsigned long base_addr,unsigned long gp,void * table_start,void * table_end)94 unwind_table_init(struct unwind_table *table, const char *name,
95 		  unsigned long base_addr, unsigned long gp,
96 		  void *table_start, void *table_end)
97 {
98 	struct unwind_table_entry *start = table_start;
99 	struct unwind_table_entry *end =
100 		(struct unwind_table_entry *)table_end - 1;
101 
102 	table->name = name;
103 	table->base_addr = base_addr;
104 	table->gp = gp;
105 	table->start = base_addr + start->region_start;
106 	table->end = base_addr + end->region_end;
107 	table->table = (struct unwind_table_entry *)table_start;
108 	table->length = end - start + 1;
109 	INIT_LIST_HEAD(&table->list);
110 
111 	for (; start <= end; start++) {
112 		if (start < end &&
113 		    start->region_end > (start+1)->region_start) {
114 			printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
115 		}
116 
117 		start->region_start += base_addr;
118 		start->region_end += base_addr;
119 	}
120 }
121 
cmp_unwind_table_entry(const void * a,const void * b)122 static int cmp_unwind_table_entry(const void *a, const void *b)
123 {
124 	return ((const struct unwind_table_entry *)a)->region_start
125 	     - ((const struct unwind_table_entry *)b)->region_start;
126 }
127 
128 static void
unwind_table_sort(struct unwind_table_entry * start,struct unwind_table_entry * finish)129 unwind_table_sort(struct unwind_table_entry *start,
130 		  struct unwind_table_entry *finish)
131 {
132 	sort(start, finish - start, sizeof(struct unwind_table_entry),
133 	     cmp_unwind_table_entry, NULL);
134 }
135 
136 struct unwind_table *
unwind_table_add(const char * name,unsigned long base_addr,unsigned long gp,void * start,void * end)137 unwind_table_add(const char *name, unsigned long base_addr,
138 		 unsigned long gp,
139                  void *start, void *end)
140 {
141 	struct unwind_table *table;
142 	unsigned long flags;
143 	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
144 	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
145 
146 	unwind_table_sort(s, e);
147 
148 	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
149 	if (table == NULL)
150 		return NULL;
151 	unwind_table_init(table, name, base_addr, gp, start, end);
152 	spin_lock_irqsave(&unwind_lock, flags);
153 	list_add_tail(&table->list, &unwind_tables);
154 	spin_unlock_irqrestore(&unwind_lock, flags);
155 
156 	return table;
157 }
158 
unwind_table_remove(struct unwind_table * table)159 void unwind_table_remove(struct unwind_table *table)
160 {
161 	unsigned long flags;
162 
163 	spin_lock_irqsave(&unwind_lock, flags);
164 	list_del(&table->list);
165 	spin_unlock_irqrestore(&unwind_lock, flags);
166 
167 	kfree(table);
168 }
169 
170 /* Called from setup_arch to import the kernel unwind info */
unwind_init(void)171 int unwind_init(void)
172 {
173 	long start, stop;
174 	register unsigned long gp __asm__ ("r27");
175 
176 	start = (long)&__start___unwind[0];
177 	stop = (long)&__stop___unwind[0];
178 
179 	spin_lock_init(&unwind_lock);
180 
181 	printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
182 	    start, stop,
183 	    (stop - start) / sizeof(struct unwind_table_entry));
184 
185 	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
186 			  gp,
187 			  &__start___unwind[0], &__stop___unwind[0]);
188 #if 0
189 	{
190 		int i;
191 		for (i = 0; i < 10; i++)
192 		{
193 			printk("region 0x%x-0x%x\n",
194 				__start___unwind[i].region_start,
195 				__start___unwind[i].region_end);
196 		}
197 	}
198 #endif
199 	return 0;
200 }
201 
202 #ifdef CONFIG_64BIT
203 #define get_func_addr(fptr) fptr[2]
204 #else
205 #define get_func_addr(fptr) fptr[0]
206 #endif
207 
unwind_special(struct unwind_frame_info * info,unsigned long pc,int frame_size)208 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
209 {
210 	extern void handle_interruption(int, struct pt_regs *);
211 	static unsigned long *hi = (unsigned long *)&handle_interruption;
212 
213 	if (pc == get_func_addr(hi)) {
214 		struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
215 		dbg("Unwinding through handle_interruption()\n");
216 		info->prev_sp = regs->gr[30];
217 		info->prev_ip = regs->iaoq[0];
218 
219 		return 1;
220 	}
221 
222 	return 0;
223 }
224 
unwind_frame_regs(struct unwind_frame_info * info)225 static void unwind_frame_regs(struct unwind_frame_info *info)
226 {
227 	const struct unwind_table_entry *e;
228 	unsigned long npc;
229 	unsigned int insn;
230 	long frame_size = 0;
231 	int looking_for_rp, rpoffset = 0;
232 
233 	e = find_unwind_entry(info->ip);
234 	if (e == NULL) {
235 		unsigned long sp;
236 		extern char _stext[], _etext[];
237 
238 		dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
239 
240 #ifdef CONFIG_KALLSYMS
241 		/* Handle some frequent special cases.... */
242 		{
243 			char symname[KSYM_NAME_LEN];
244 			char *modname;
245 
246 			kallsyms_lookup(info->ip, NULL, NULL, &modname,
247 				symname);
248 
249 			dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
250 
251 			if (strcmp(symname, "_switch_to_ret") == 0) {
252 				info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
253 				info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
254 				dbg("_switch_to_ret @ %lx - setting "
255 				    "prev_sp=%lx prev_ip=%lx\n",
256 				    info->ip, info->prev_sp,
257 				    info->prev_ip);
258 				return;
259 			} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
260 				   strcmp(symname, "syscall_exit") == 0) {
261 				info->prev_ip = info->prev_sp = 0;
262 				return;
263 			}
264 		}
265 #endif
266 
267 		/* Since we are doing the unwinding blind, we don't know if
268 		   we are adjusting the stack correctly or extracting the rp
269 		   correctly. The rp is checked to see if it belongs to the
270 		   kernel text section, if not we assume we don't have a
271 		   correct stack frame and we continue to unwind the stack.
272 		   This is not quite correct, and will fail for loadable
273 		   modules. */
274 		sp = info->sp & ~63;
275 		do {
276 			unsigned long tmp;
277 
278 			info->prev_sp = sp - 64;
279 			info->prev_ip = 0;
280 			if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
281 				break;
282 			info->prev_ip = tmp;
283 			sp = info->prev_sp;
284 		} while (info->prev_ip < (unsigned long)_stext ||
285 			 info->prev_ip > (unsigned long)_etext);
286 
287 		info->rp = 0;
288 
289 		dbg("analyzing func @ %lx with no unwind info, setting "
290 		    "prev_sp=%lx prev_ip=%lx\n", info->ip,
291 		    info->prev_sp, info->prev_ip);
292 	} else {
293 		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
294 		    "Save_RP = %d, Millicode = %d size = %u\n",
295 		    e->region_start, e->region_end, e->Save_SP, e->Save_RP,
296 		    e->Millicode, e->Total_frame_size);
297 
298 		looking_for_rp = e->Save_RP;
299 
300 		for (npc = e->region_start;
301 		     (frame_size < (e->Total_frame_size << 3) ||
302 		      looking_for_rp) &&
303 		     npc < info->ip;
304 		     npc += 4) {
305 
306 			insn = *(unsigned int *)npc;
307 
308 			if ((insn & 0xffffc000) == 0x37de0000 ||
309 			    (insn & 0xffe00000) == 0x6fc00000) {
310 				/* ldo X(sp), sp, or stwm X,D(sp) */
311 				frame_size += (insn & 0x1 ? -1 << 13 : 0) |
312 					((insn & 0x3fff) >> 1);
313 				dbg("analyzing func @ %lx, insn=%08x @ "
314 				    "%lx, frame_size = %ld\n", info->ip,
315 				    insn, npc, frame_size);
316 			} else if ((insn & 0xffe00008) == 0x73c00008) {
317 				/* std,ma X,D(sp) */
318 				frame_size += (insn & 0x1 ? -1 << 13 : 0) |
319 					(((insn >> 4) & 0x3ff) << 3);
320 				dbg("analyzing func @ %lx, insn=%08x @ "
321 				    "%lx, frame_size = %ld\n", info->ip,
322 				    insn, npc, frame_size);
323 			} else if (insn == 0x6bc23fd9) {
324 				/* stw rp,-20(sp) */
325 				rpoffset = 20;
326 				looking_for_rp = 0;
327 				dbg("analyzing func @ %lx, insn=stw rp,"
328 				    "-20(sp) @ %lx\n", info->ip, npc);
329 			} else if (insn == 0x0fc212c1) {
330 				/* std rp,-16(sr0,sp) */
331 				rpoffset = 16;
332 				looking_for_rp = 0;
333 				dbg("analyzing func @ %lx, insn=std rp,"
334 				    "-16(sp) @ %lx\n", info->ip, npc);
335 			}
336 		}
337 
338 		if (!unwind_special(info, e->region_start, frame_size)) {
339 			info->prev_sp = info->sp - frame_size;
340 			if (e->Millicode)
341 				info->rp = info->r31;
342 			else if (rpoffset)
343 				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
344 			info->prev_ip = info->rp;
345 			info->rp = 0;
346 		}
347 
348 		dbg("analyzing func @ %lx, setting prev_sp=%lx "
349 		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
350 		    info->prev_ip, npc);
351 	}
352 }
353 
unwind_frame_init(struct unwind_frame_info * info,struct task_struct * t,struct pt_regs * regs)354 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
355 		       struct pt_regs *regs)
356 {
357 	memset(info, 0, sizeof(struct unwind_frame_info));
358 	info->t = t;
359 	info->sp = regs->gr[30];
360 	info->ip = regs->iaoq[0];
361 	info->rp = regs->gr[2];
362 	info->r31 = regs->gr[31];
363 
364 	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
365 	    t ? (int)t->pid : -1, info->sp, info->ip);
366 }
367 
unwind_frame_init_from_blocked_task(struct unwind_frame_info * info,struct task_struct * t)368 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
369 {
370 	struct pt_regs *r = &t->thread.regs;
371 	struct pt_regs *r2;
372 
373 	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
374 	if (!r2)
375 		return;
376 	*r2 = *r;
377 	r2->gr[30] = r->ksp;
378 	r2->iaoq[0] = r->kpc;
379 	unwind_frame_init(info, t, r2);
380 	kfree(r2);
381 }
382 
unwind_frame_init_running(struct unwind_frame_info * info,struct pt_regs * regs)383 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
384 {
385 	unwind_frame_init(info, current, regs);
386 }
387 
unwind_once(struct unwind_frame_info * next_frame)388 int unwind_once(struct unwind_frame_info *next_frame)
389 {
390 	unwind_frame_regs(next_frame);
391 
392 	if (next_frame->prev_sp == 0 ||
393 	    next_frame->prev_ip == 0)
394 		return -1;
395 
396 	next_frame->sp = next_frame->prev_sp;
397 	next_frame->ip = next_frame->prev_ip;
398 	next_frame->prev_sp = 0;
399 	next_frame->prev_ip = 0;
400 
401 	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
402 	    next_frame->t ? (int)next_frame->t->pid : -1,
403 	    next_frame->sp, next_frame->ip);
404 
405 	return 0;
406 }
407 
unwind_to_user(struct unwind_frame_info * info)408 int unwind_to_user(struct unwind_frame_info *info)
409 {
410 	int ret;
411 
412 	do {
413 		ret = unwind_once(info);
414 	} while (!ret && !(info->ip & 3));
415 
416 	return ret;
417 }
418 
return_address(unsigned int level)419 unsigned long return_address(unsigned int level)
420 {
421 	struct unwind_frame_info info;
422 	struct pt_regs r;
423 	unsigned long sp;
424 
425 	/* initialize unwind info */
426 	asm volatile ("copy %%r30, %0" : "=r"(sp));
427 	memset(&r, 0, sizeof(struct pt_regs));
428 	r.iaoq[0] = (unsigned long) current_text_addr();
429 	r.gr[2] = (unsigned long) __builtin_return_address(0);
430 	r.gr[30] = sp;
431 	unwind_frame_init(&info, current, &r);
432 
433 	/* unwind stack */
434 	++level;
435 	do {
436 		if (unwind_once(&info) < 0 || info.ip == 0)
437 			return 0;
438 		if (!__kernel_text_address(info.ip)) {
439 			return 0;
440 		}
441 	} while (info.ip && level--);
442 
443 	return info.ip;
444 }
445