• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 1999-2004 Hewlett-Packard Co
3  *	David Mosberger-Tang <davidm@hpl.hp.com>
4  * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5  * 	- Change pt_regs_off() to make it less dependent on pt_regs structure.
6  */
7 /*
8  * This file implements call frame unwind support for the Linux
9  * kernel.  Parsing and processing the unwind information is
10  * time-consuming, so this implementation translates the unwind
11  * descriptors into unwind scripts.  These scripts are very simple
12  * (basically a sequence of assignments) and efficient to execute.
13  * They are cached for later re-use.  Each script is specific for a
14  * given instruction pointer address and the set of predicate values
15  * that the script depends on (most unwind descriptors are
16  * unconditional and scripts often do not depend on predicates at
17  * all).  This code is based on the unwind conventions described in
18  * the "IA-64 Software Conventions and Runtime Architecture" manual.
19  *
20  * SMP conventions:
21  *	o updates to the global unwind data (in structure "unw") are serialized
22  *	  by the unw.lock spinlock
23  *	o each unwind script has its own read-write lock; a thread must acquire
24  *	  a read lock before executing a script and must acquire a write lock
25  *	  before modifying a script
26  *	o if both the unw.lock spinlock and a script's read-write lock must be
27  *	  acquired, then the read-write lock must be acquired first.
28  */
29 #include <linux/module.h>
30 #include <linux/bootmem.h>
31 #include <linux/elf.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 
36 #include <asm/unwind.h>
37 
38 #include <asm/delay.h>
39 #include <asm/page.h>
40 #include <asm/ptrace.h>
41 #include <asm/ptrace_offsets.h>
42 #include <asm/rse.h>
43 #include <asm/sections.h>
44 #include <asm/uaccess.h>
45 
46 #include "entry.h"
47 #include "unwind_i.h"
48 
49 #define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
50 #define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
51 
52 #define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
53 #define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
54 
55 #define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
56 
57 #ifdef UNW_DEBUG
58   static unsigned int unw_debug_level = UNW_DEBUG;
59 #  define UNW_DEBUG_ON(n)	unw_debug_level >= n
60    /* Do not code a printk level, not all debug lines end in newline */
61 #  define UNW_DPRINT(n, ...)  if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
62 #  undef inline
63 #  define inline
64 #else /* !UNW_DEBUG */
65 #  define UNW_DEBUG_ON(n)  0
66 #  define UNW_DPRINT(n, ...)
67 #endif /* UNW_DEBUG */
68 
69 #if UNW_STATS
70 # define STAT(x...)	x
71 #else
72 # define STAT(x...)
73 #endif
74 
75 #define alloc_reg_state()	kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
76 #define free_reg_state(usr)	kfree(usr)
77 #define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
78 #define free_labeled_state(usr)	kfree(usr)
79 
80 typedef unsigned long unw_word;
81 typedef unsigned char unw_hash_index_t;
82 
83 static struct {
84 	spinlock_t lock;			/* spinlock for unwind data */
85 
86 	/* list of unwind tables (one per load-module) */
87 	struct unw_table *tables;
88 
89 	unsigned long r0;			/* constant 0 for r0 */
90 
91 	/* table of registers that prologues can save (and order in which they're saved): */
92 	const unsigned char save_order[8];
93 
94 	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
95 	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
96 
97 	unsigned short lru_head;		/* index of lead-recently used script */
98 	unsigned short lru_tail;		/* index of most-recently used script */
99 
100 	/* index into unw_frame_info for preserved register i */
101 	unsigned short preg_index[UNW_NUM_REGS];
102 
103 	short pt_regs_offsets[32];
104 
105 	/* unwind table for the kernel: */
106 	struct unw_table kernel_table;
107 
108 	/* unwind table describing the gate page (kernel code that is mapped into user space): */
109 	size_t gate_table_size;
110 	unsigned long *gate_table;
111 
112 	/* hash table that maps instruction pointer to script index: */
113 	unsigned short hash[UNW_HASH_SIZE];
114 
115 	/* script cache: */
116 	struct unw_script cache[UNW_CACHE_SIZE];
117 
118 # ifdef UNW_DEBUG
119 	const char *preg_name[UNW_NUM_REGS];
120 # endif
121 # if UNW_STATS
122 	struct {
123 		struct {
124 			int lookups;
125 			int hinted_hits;
126 			int normal_hits;
127 			int collision_chain_traversals;
128 		} cache;
129 		struct {
130 			unsigned long build_time;
131 			unsigned long run_time;
132 			unsigned long parse_time;
133 			int builds;
134 			int news;
135 			int collisions;
136 			int runs;
137 		} script;
138 		struct {
139 			unsigned long init_time;
140 			unsigned long unwind_time;
141 			int inits;
142 			int unwinds;
143 		} api;
144 	} stat;
145 # endif
146 } unw = {
147 	.tables = &unw.kernel_table,
148 	.lock = __SPIN_LOCK_UNLOCKED(unw.lock),
149 	.save_order = {
150 		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
151 		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
152 	},
153 	.preg_index = {
154 		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
155 		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
156 		offsetof(struct unw_frame_info, bsp_loc)/8,
157 		offsetof(struct unw_frame_info, bspstore_loc)/8,
158 		offsetof(struct unw_frame_info, pfs_loc)/8,
159 		offsetof(struct unw_frame_info, rnat_loc)/8,
160 		offsetof(struct unw_frame_info, psp)/8,
161 		offsetof(struct unw_frame_info, rp_loc)/8,
162 		offsetof(struct unw_frame_info, r4)/8,
163 		offsetof(struct unw_frame_info, r5)/8,
164 		offsetof(struct unw_frame_info, r6)/8,
165 		offsetof(struct unw_frame_info, r7)/8,
166 		offsetof(struct unw_frame_info, unat_loc)/8,
167 		offsetof(struct unw_frame_info, pr_loc)/8,
168 		offsetof(struct unw_frame_info, lc_loc)/8,
169 		offsetof(struct unw_frame_info, fpsr_loc)/8,
170 		offsetof(struct unw_frame_info, b1_loc)/8,
171 		offsetof(struct unw_frame_info, b2_loc)/8,
172 		offsetof(struct unw_frame_info, b3_loc)/8,
173 		offsetof(struct unw_frame_info, b4_loc)/8,
174 		offsetof(struct unw_frame_info, b5_loc)/8,
175 		offsetof(struct unw_frame_info, f2_loc)/8,
176 		offsetof(struct unw_frame_info, f3_loc)/8,
177 		offsetof(struct unw_frame_info, f4_loc)/8,
178 		offsetof(struct unw_frame_info, f5_loc)/8,
179 		offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
180 		offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
181 		offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
182 		offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
183 		offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
184 		offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
185 		offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
186 		offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
187 		offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
188 		offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
189 		offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
190 		offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
191 		offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
192 		offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
193 		offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
194 		offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
195 	},
196 	.pt_regs_offsets = {
197 		[0] = -1,
198 		offsetof(struct pt_regs,  r1),
199 		offsetof(struct pt_regs,  r2),
200 		offsetof(struct pt_regs,  r3),
201 		[4] = -1, [5] = -1, [6] = -1, [7] = -1,
202 		offsetof(struct pt_regs,  r8),
203 		offsetof(struct pt_regs,  r9),
204 		offsetof(struct pt_regs, r10),
205 		offsetof(struct pt_regs, r11),
206 		offsetof(struct pt_regs, r12),
207 		offsetof(struct pt_regs, r13),
208 		offsetof(struct pt_regs, r14),
209 		offsetof(struct pt_regs, r15),
210 		offsetof(struct pt_regs, r16),
211 		offsetof(struct pt_regs, r17),
212 		offsetof(struct pt_regs, r18),
213 		offsetof(struct pt_regs, r19),
214 		offsetof(struct pt_regs, r20),
215 		offsetof(struct pt_regs, r21),
216 		offsetof(struct pt_regs, r22),
217 		offsetof(struct pt_regs, r23),
218 		offsetof(struct pt_regs, r24),
219 		offsetof(struct pt_regs, r25),
220 		offsetof(struct pt_regs, r26),
221 		offsetof(struct pt_regs, r27),
222 		offsetof(struct pt_regs, r28),
223 		offsetof(struct pt_regs, r29),
224 		offsetof(struct pt_regs, r30),
225 		offsetof(struct pt_regs, r31),
226 	},
227 	.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
228 #ifdef UNW_DEBUG
229 	.preg_name = {
230 		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
231 		"r4", "r5", "r6", "r7",
232 		"ar.unat", "pr", "ar.lc", "ar.fpsr",
233 		"b1", "b2", "b3", "b4", "b5",
234 		"f2", "f3", "f4", "f5",
235 		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
236 		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
237 	}
238 #endif
239 };
240 
241 static inline int
read_only(void * addr)242 read_only (void *addr)
243 {
244 	return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
245 }
246 
247 /*
248  * Returns offset of rREG in struct pt_regs.
249  */
250 static inline unsigned long
pt_regs_off(unsigned long reg)251 pt_regs_off (unsigned long reg)
252 {
253 	short off = -1;
254 
255 	if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
256 		off = unw.pt_regs_offsets[reg];
257 
258 	if (off < 0) {
259 		UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
260 		off = 0;
261 	}
262 	return (unsigned long) off;
263 }
264 
265 static inline struct pt_regs *
get_scratch_regs(struct unw_frame_info * info)266 get_scratch_regs (struct unw_frame_info *info)
267 {
268 	if (!info->pt) {
269 		/* This should not happen with valid unwind info.  */
270 		UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
271 		if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
272 			info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
273 		else
274 			info->pt = info->sp - 16;
275 	}
276 	UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
277 	return (struct pt_regs *) info->pt;
278 }
279 
280 /* Unwind accessors.  */
281 
282 int
unw_access_gr(struct unw_frame_info * info,int regnum,unsigned long * val,char * nat,int write)283 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
284 {
285 	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
286 	struct unw_ireg *ireg;
287 	struct pt_regs *pt;
288 
289 	if ((unsigned) regnum - 1 >= 127) {
290 		if (regnum == 0 && !write) {
291 			*val = 0;	/* read r0 always returns 0 */
292 			*nat = 0;
293 			return 0;
294 		}
295 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
296 			   __func__, regnum);
297 		return -1;
298 	}
299 
300 	if (regnum < 32) {
301 		if (regnum >= 4 && regnum <= 7) {
302 			/* access a preserved register */
303 			ireg = &info->r4 + (regnum - 4);
304 			addr = ireg->loc;
305 			if (addr) {
306 				nat_addr = addr + ireg->nat.off;
307 				switch (ireg->nat.type) {
308 				      case UNW_NAT_VAL:
309 					/* simulate getf.sig/setf.sig */
310 					if (write) {
311 						if (*nat) {
312 							/* write NaTVal and be done with it */
313 							addr[0] = 0;
314 							addr[1] = 0x1fffe;
315 							return 0;
316 						}
317 						addr[1] = 0x1003e;
318 					} else {
319 						if (addr[0] == 0 && addr[1] == 0x1ffe) {
320 							/* return NaT and be done with it */
321 							*val = 0;
322 							*nat = 1;
323 							return 0;
324 						}
325 					}
326 					/* fall through */
327 				      case UNW_NAT_NONE:
328 					dummy_nat = 0;
329 					nat_addr = &dummy_nat;
330 					break;
331 
332 				      case UNW_NAT_MEMSTK:
333 					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
334 					break;
335 
336 				      case UNW_NAT_REGSTK:
337 					nat_addr = ia64_rse_rnat_addr(addr);
338 					if ((unsigned long) addr < info->regstk.limit
339 					    || (unsigned long) addr >= info->regstk.top)
340 					{
341 						UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
342 							"[0x%lx-0x%lx)\n",
343 							__func__, (void *) addr,
344 							info->regstk.limit,
345 							info->regstk.top);
346 						return -1;
347 					}
348 					if ((unsigned long) nat_addr >= info->regstk.top)
349 						nat_addr = &info->sw->ar_rnat;
350 					nat_mask = (1UL << ia64_rse_slot_num(addr));
351 					break;
352 				}
353 			} else {
354 				addr = &info->sw->r4 + (regnum - 4);
355 				nat_addr = &info->sw->ar_unat;
356 				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
357 			}
358 		} else {
359 			/* access a scratch register */
360 			pt = get_scratch_regs(info);
361 			addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
362 			if (info->pri_unat_loc)
363 				nat_addr = info->pri_unat_loc;
364 			else
365 				nat_addr = &info->sw->caller_unat;
366 			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
367 		}
368 	} else {
369 		/* access a stacked register */
370 		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
371 		nat_addr = ia64_rse_rnat_addr(addr);
372 		if ((unsigned long) addr < info->regstk.limit
373 		    || (unsigned long) addr >= info->regstk.top)
374 		{
375 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
376 				   "of rbs\n",  __func__);
377 			return -1;
378 		}
379 		if ((unsigned long) nat_addr >= info->regstk.top)
380 			nat_addr = &info->sw->ar_rnat;
381 		nat_mask = (1UL << ia64_rse_slot_num(addr));
382 	}
383 
384 	if (write) {
385 		if (read_only(addr)) {
386 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
387 				__func__);
388 		} else {
389 			*addr = *val;
390 			if (*nat)
391 				*nat_addr |= nat_mask;
392 			else
393 				*nat_addr &= ~nat_mask;
394 		}
395 	} else {
396 		if ((*nat_addr & nat_mask) == 0) {
397 			*val = *addr;
398 			*nat = 0;
399 		} else {
400 			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
401 			*nat = 1;
402 		}
403 	}
404 	return 0;
405 }
406 EXPORT_SYMBOL(unw_access_gr);
407 
408 int
unw_access_br(struct unw_frame_info * info,int regnum,unsigned long * val,int write)409 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
410 {
411 	unsigned long *addr;
412 	struct pt_regs *pt;
413 
414 	switch (regnum) {
415 		/* scratch: */
416 	      case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
417 	      case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
418 	      case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
419 
420 		/* preserved: */
421 	      case 1: case 2: case 3: case 4: case 5:
422 		addr = *(&info->b1_loc + (regnum - 1));
423 		if (!addr)
424 			addr = &info->sw->b1 + (regnum - 1);
425 		break;
426 
427 	      default:
428 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
429 			   __func__, regnum);
430 		return -1;
431 	}
432 	if (write)
433 		if (read_only(addr)) {
434 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
435 				__func__);
436 		} else
437 			*addr = *val;
438 	else
439 		*val = *addr;
440 	return 0;
441 }
442 EXPORT_SYMBOL(unw_access_br);
443 
444 int
unw_access_fr(struct unw_frame_info * info,int regnum,struct ia64_fpreg * val,int write)445 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
446 {
447 	struct ia64_fpreg *addr = NULL;
448 	struct pt_regs *pt;
449 
450 	if ((unsigned) (regnum - 2) >= 126) {
451 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
452 			   __func__, regnum);
453 		return -1;
454 	}
455 
456 	if (regnum <= 5) {
457 		addr = *(&info->f2_loc + (regnum - 2));
458 		if (!addr)
459 			addr = &info->sw->f2 + (regnum - 2);
460 	} else if (regnum <= 15) {
461 		if (regnum <= 11) {
462 			pt = get_scratch_regs(info);
463 			addr = &pt->f6  + (regnum - 6);
464 		}
465 		else
466 			addr = &info->sw->f12 + (regnum - 12);
467 	} else if (regnum <= 31) {
468 		addr = info->fr_loc[regnum - 16];
469 		if (!addr)
470 			addr = &info->sw->f16 + (regnum - 16);
471 	} else {
472 		struct task_struct *t = info->task;
473 
474 		if (write)
475 			ia64_sync_fph(t);
476 		else
477 			ia64_flush_fph(t);
478 		addr = t->thread.fph + (regnum - 32);
479 	}
480 
481 	if (write)
482 		if (read_only(addr)) {
483 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
484 				__func__);
485 		} else
486 			*addr = *val;
487 	else
488 		*val = *addr;
489 	return 0;
490 }
491 EXPORT_SYMBOL(unw_access_fr);
492 
493 int
unw_access_ar(struct unw_frame_info * info,int regnum,unsigned long * val,int write)494 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
495 {
496 	unsigned long *addr;
497 	struct pt_regs *pt;
498 
499 	switch (regnum) {
500 	      case UNW_AR_BSP:
501 		addr = info->bsp_loc;
502 		if (!addr)
503 			addr = &info->sw->ar_bspstore;
504 		break;
505 
506 	      case UNW_AR_BSPSTORE:
507 		addr = info->bspstore_loc;
508 		if (!addr)
509 			addr = &info->sw->ar_bspstore;
510 		break;
511 
512 	      case UNW_AR_PFS:
513 		addr = info->pfs_loc;
514 		if (!addr)
515 			addr = &info->sw->ar_pfs;
516 		break;
517 
518 	      case UNW_AR_RNAT:
519 		addr = info->rnat_loc;
520 		if (!addr)
521 			addr = &info->sw->ar_rnat;
522 		break;
523 
524 	      case UNW_AR_UNAT:
525 		addr = info->unat_loc;
526 		if (!addr)
527 			addr = &info->sw->caller_unat;
528 		break;
529 
530 	      case UNW_AR_LC:
531 		addr = info->lc_loc;
532 		if (!addr)
533 			addr = &info->sw->ar_lc;
534 		break;
535 
536 	      case UNW_AR_EC:
537 		if (!info->cfm_loc)
538 			return -1;
539 		if (write)
540 			*info->cfm_loc =
541 				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
542 		else
543 			*val = (*info->cfm_loc >> 52) & 0x3f;
544 		return 0;
545 
546 	      case UNW_AR_FPSR:
547 		addr = info->fpsr_loc;
548 		if (!addr)
549 			addr = &info->sw->ar_fpsr;
550 		break;
551 
552 	      case UNW_AR_RSC:
553 		pt = get_scratch_regs(info);
554 		addr = &pt->ar_rsc;
555 		break;
556 
557 	      case UNW_AR_CCV:
558 		pt = get_scratch_regs(info);
559 		addr = &pt->ar_ccv;
560 		break;
561 
562 	      case UNW_AR_CSD:
563 		pt = get_scratch_regs(info);
564 		addr = &pt->ar_csd;
565 		break;
566 
567 	      case UNW_AR_SSD:
568 		pt = get_scratch_regs(info);
569 		addr = &pt->ar_ssd;
570 		break;
571 
572 	      default:
573 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
574 			   __func__, regnum);
575 		return -1;
576 	}
577 
578 	if (write) {
579 		if (read_only(addr)) {
580 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
581 				__func__);
582 		} else
583 			*addr = *val;
584 	} else
585 		*val = *addr;
586 	return 0;
587 }
588 EXPORT_SYMBOL(unw_access_ar);
589 
590 int
unw_access_pr(struct unw_frame_info * info,unsigned long * val,int write)591 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
592 {
593 	unsigned long *addr;
594 
595 	addr = info->pr_loc;
596 	if (!addr)
597 		addr = &info->sw->pr;
598 
599 	if (write) {
600 		if (read_only(addr)) {
601 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
602 				__func__);
603 		} else
604 			*addr = *val;
605 	} else
606 		*val = *addr;
607 	return 0;
608 }
609 EXPORT_SYMBOL(unw_access_pr);
610 
611 
612 /* Routines to manipulate the state stack.  */
613 
614 static inline void
push(struct unw_state_record * sr)615 push (struct unw_state_record *sr)
616 {
617 	struct unw_reg_state *rs;
618 
619 	rs = alloc_reg_state();
620 	if (!rs) {
621 		printk(KERN_ERR "unwind: cannot stack reg state!\n");
622 		return;
623 	}
624 	memcpy(rs, &sr->curr, sizeof(*rs));
625 	sr->curr.next = rs;
626 }
627 
628 static void
pop(struct unw_state_record * sr)629 pop (struct unw_state_record *sr)
630 {
631 	struct unw_reg_state *rs = sr->curr.next;
632 
633 	if (!rs) {
634 		printk(KERN_ERR "unwind: stack underflow!\n");
635 		return;
636 	}
637 	memcpy(&sr->curr, rs, sizeof(*rs));
638 	free_reg_state(rs);
639 }
640 
641 /* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
642 static struct unw_reg_state *
dup_state_stack(struct unw_reg_state * rs)643 dup_state_stack (struct unw_reg_state *rs)
644 {
645 	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
646 
647 	while (rs) {
648 		copy = alloc_reg_state();
649 		if (!copy) {
650 			printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
651 			return NULL;
652 		}
653 		memcpy(copy, rs, sizeof(*copy));
654 		if (first)
655 			prev->next = copy;
656 		else
657 			first = copy;
658 		rs = rs->next;
659 		prev = copy;
660 	}
661 	return first;
662 }
663 
664 /* Free all stacked register states (but not RS itself).  */
665 static void
free_state_stack(struct unw_reg_state * rs)666 free_state_stack (struct unw_reg_state *rs)
667 {
668 	struct unw_reg_state *p, *next;
669 
670 	for (p = rs->next; p != NULL; p = next) {
671 		next = p->next;
672 		free_reg_state(p);
673 	}
674 	rs->next = NULL;
675 }
676 
677 /* Unwind decoder routines */
678 
679 static enum unw_register_index __attribute_const__
decode_abreg(unsigned char abreg,int memory)680 decode_abreg (unsigned char abreg, int memory)
681 {
682 	switch (abreg) {
683 	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
684 	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
685 	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
686 	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
687 	      case 0x60: return UNW_REG_PR;
688 	      case 0x61: return UNW_REG_PSP;
689 	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
690 	      case 0x63: return UNW_REG_RP;
691 	      case 0x64: return UNW_REG_BSP;
692 	      case 0x65: return UNW_REG_BSPSTORE;
693 	      case 0x66: return UNW_REG_RNAT;
694 	      case 0x67: return UNW_REG_UNAT;
695 	      case 0x68: return UNW_REG_FPSR;
696 	      case 0x69: return UNW_REG_PFS;
697 	      case 0x6a: return UNW_REG_LC;
698 	      default:
699 		break;
700 	}
701 	UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
702 	return UNW_REG_LC;
703 }
704 
705 static void
set_reg(struct unw_reg_info * reg,enum unw_where where,int when,unsigned long val)706 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
707 {
708 	reg->val = val;
709 	reg->where = where;
710 	if (reg->when == UNW_WHEN_NEVER)
711 		reg->when = when;
712 }
713 
714 static void
alloc_spill_area(unsigned long * offp,unsigned long regsize,struct unw_reg_info * lo,struct unw_reg_info * hi)715 alloc_spill_area (unsigned long *offp, unsigned long regsize,
716 		  struct unw_reg_info *lo, struct unw_reg_info *hi)
717 {
718 	struct unw_reg_info *reg;
719 
720 	for (reg = hi; reg >= lo; --reg) {
721 		if (reg->where == UNW_WHERE_SPILL_HOME) {
722 			reg->where = UNW_WHERE_PSPREL;
723 			*offp -= regsize;
724 			reg->val = *offp;
725 		}
726 	}
727 }
728 
729 static inline void
spill_next_when(struct unw_reg_info ** regp,struct unw_reg_info * lim,unw_word t)730 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
731 {
732 	struct unw_reg_info *reg;
733 
734 	for (reg = *regp; reg <= lim; ++reg) {
735 		if (reg->where == UNW_WHERE_SPILL_HOME) {
736 			reg->when = t;
737 			*regp = reg + 1;
738 			return;
739 		}
740 	}
741 	UNW_DPRINT(0, "unwind.%s: excess spill!\n",  __func__);
742 }
743 
744 static inline void
finish_prologue(struct unw_state_record * sr)745 finish_prologue (struct unw_state_record *sr)
746 {
747 	struct unw_reg_info *reg;
748 	unsigned long off;
749 	int i;
750 
751 	/*
752 	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
753 	 * for Using Unwind Descriptors", rule 3):
754 	 */
755 	for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
756 		reg = sr->curr.reg + unw.save_order[i];
757 		if (reg->where == UNW_WHERE_GR_SAVE) {
758 			reg->where = UNW_WHERE_GR;
759 			reg->val = sr->gr_save_loc++;
760 		}
761 	}
762 
763 	/*
764 	 * Next, compute when the fp, general, and branch registers get
765 	 * saved.  This must come before alloc_spill_area() because
766 	 * we need to know which registers are spilled to their home
767 	 * locations.
768 	 */
769 	if (sr->imask) {
770 		unsigned char kind, mask = 0, *cp = sr->imask;
771 		int t;
772 		static const unsigned char limit[3] = {
773 			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
774 		};
775 		struct unw_reg_info *(regs[3]);
776 
777 		regs[0] = sr->curr.reg + UNW_REG_F2;
778 		regs[1] = sr->curr.reg + UNW_REG_R4;
779 		regs[2] = sr->curr.reg + UNW_REG_B1;
780 
781 		for (t = 0; t < sr->region_len; ++t) {
782 			if ((t & 3) == 0)
783 				mask = *cp++;
784 			kind = (mask >> 2*(3-(t & 3))) & 3;
785 			if (kind > 0)
786 				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
787 						sr->region_start + t);
788 		}
789 	}
790 	/*
791 	 * Next, lay out the memory stack spill area:
792 	 */
793 	if (sr->any_spills) {
794 		off = sr->spill_offset;
795 		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
796 		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
797 		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
798 	}
799 }
800 
801 /*
802  * Region header descriptors.
803  */
804 
805 static void
desc_prologue(int body,unw_word rlen,unsigned char mask,unsigned char grsave,struct unw_state_record * sr)806 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
807 	       struct unw_state_record *sr)
808 {
809 	int i, region_start;
810 
811 	if (!(sr->in_body || sr->first_region))
812 		finish_prologue(sr);
813 	sr->first_region = 0;
814 
815 	/* check if we're done: */
816 	if (sr->when_target < sr->region_start + sr->region_len) {
817 		sr->done = 1;
818 		return;
819 	}
820 
821 	region_start = sr->region_start + sr->region_len;
822 
823 	for (i = 0; i < sr->epilogue_count; ++i)
824 		pop(sr);
825 	sr->epilogue_count = 0;
826 	sr->epilogue_start = UNW_WHEN_NEVER;
827 
828 	sr->region_start = region_start;
829 	sr->region_len = rlen;
830 	sr->in_body = body;
831 
832 	if (!body) {
833 		push(sr);
834 
835 		for (i = 0; i < 4; ++i) {
836 			if (mask & 0x8)
837 				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
838 					sr->region_start + sr->region_len - 1, grsave++);
839 			mask <<= 1;
840 		}
841 		sr->gr_save_loc = grsave;
842 		sr->any_spills = 0;
843 		sr->imask = NULL;
844 		sr->spill_offset = 0x10;	/* default to psp+16 */
845 	}
846 }
847 
848 /*
849  * Prologue descriptors.
850  */
851 
852 static inline void
desc_abi(unsigned char abi,unsigned char context,struct unw_state_record * sr)853 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
854 {
855 	if (abi == 3 && context == 'i') {
856 		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
857 		UNW_DPRINT(3, "unwind.%s: interrupt frame\n",  __func__);
858 	}
859 	else
860 		UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
861 				__func__, abi, context);
862 }
863 
864 static inline void
desc_br_gr(unsigned char brmask,unsigned char gr,struct unw_state_record * sr)865 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
866 {
867 	int i;
868 
869 	for (i = 0; i < 5; ++i) {
870 		if (brmask & 1)
871 			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
872 				sr->region_start + sr->region_len - 1, gr++);
873 		brmask >>= 1;
874 	}
875 }
876 
877 static inline void
desc_br_mem(unsigned char brmask,struct unw_state_record * sr)878 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
879 {
880 	int i;
881 
882 	for (i = 0; i < 5; ++i) {
883 		if (brmask & 1) {
884 			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
885 				sr->region_start + sr->region_len - 1, 0);
886 			sr->any_spills = 1;
887 		}
888 		brmask >>= 1;
889 	}
890 }
891 
892 static inline void
desc_frgr_mem(unsigned char grmask,unw_word frmask,struct unw_state_record * sr)893 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
894 {
895 	int i;
896 
897 	for (i = 0; i < 4; ++i) {
898 		if ((grmask & 1) != 0) {
899 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
900 				sr->region_start + sr->region_len - 1, 0);
901 			sr->any_spills = 1;
902 		}
903 		grmask >>= 1;
904 	}
905 	for (i = 0; i < 20; ++i) {
906 		if ((frmask & 1) != 0) {
907 			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
908 			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
909 				sr->region_start + sr->region_len - 1, 0);
910 			sr->any_spills = 1;
911 		}
912 		frmask >>= 1;
913 	}
914 }
915 
916 static inline void
desc_fr_mem(unsigned char frmask,struct unw_state_record * sr)917 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
918 {
919 	int i;
920 
921 	for (i = 0; i < 4; ++i) {
922 		if ((frmask & 1) != 0) {
923 			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
924 				sr->region_start + sr->region_len - 1, 0);
925 			sr->any_spills = 1;
926 		}
927 		frmask >>= 1;
928 	}
929 }
930 
931 static inline void
desc_gr_gr(unsigned char grmask,unsigned char gr,struct unw_state_record * sr)932 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
933 {
934 	int i;
935 
936 	for (i = 0; i < 4; ++i) {
937 		if ((grmask & 1) != 0)
938 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
939 				sr->region_start + sr->region_len - 1, gr++);
940 		grmask >>= 1;
941 	}
942 }
943 
944 static inline void
desc_gr_mem(unsigned char grmask,struct unw_state_record * sr)945 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
946 {
947 	int i;
948 
949 	for (i = 0; i < 4; ++i) {
950 		if ((grmask & 1) != 0) {
951 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
952 				sr->region_start + sr->region_len - 1, 0);
953 			sr->any_spills = 1;
954 		}
955 		grmask >>= 1;
956 	}
957 }
958 
959 static inline void
desc_mem_stack_f(unw_word t,unw_word size,struct unw_state_record * sr)960 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
961 {
962 	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
963 		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
964 }
965 
966 static inline void
desc_mem_stack_v(unw_word t,struct unw_state_record * sr)967 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
968 {
969 	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
970 }
971 
972 static inline void
desc_reg_gr(unsigned char reg,unsigned char dst,struct unw_state_record * sr)973 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
974 {
975 	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
976 }
977 
978 static inline void
desc_reg_psprel(unsigned char reg,unw_word pspoff,struct unw_state_record * sr)979 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
980 {
981 	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
982 		0x10 - 4*pspoff);
983 }
984 
985 static inline void
desc_reg_sprel(unsigned char reg,unw_word spoff,struct unw_state_record * sr)986 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
987 {
988 	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
989 		4*spoff);
990 }
991 
992 static inline void
desc_rp_br(unsigned char dst,struct unw_state_record * sr)993 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
994 {
995 	sr->return_link_reg = dst;
996 }
997 
998 static inline void
desc_reg_when(unsigned char regnum,unw_word t,struct unw_state_record * sr)999 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1000 {
1001 	struct unw_reg_info *reg = sr->curr.reg + regnum;
1002 
1003 	if (reg->where == UNW_WHERE_NONE)
1004 		reg->where = UNW_WHERE_GR_SAVE;
1005 	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1006 }
1007 
1008 static inline void
desc_spill_base(unw_word pspoff,struct unw_state_record * sr)1009 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1010 {
1011 	sr->spill_offset = 0x10 - 4*pspoff;
1012 }
1013 
1014 static inline unsigned char *
desc_spill_mask(unsigned char * imaskp,struct unw_state_record * sr)1015 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1016 {
1017 	sr->imask = imaskp;
1018 	return imaskp + (2*sr->region_len + 7)/8;
1019 }
1020 
1021 /*
1022  * Body descriptors.
1023  */
1024 static inline void
desc_epilogue(unw_word t,unw_word ecount,struct unw_state_record * sr)1025 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1026 {
1027 	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1028 	sr->epilogue_count = ecount + 1;
1029 }
1030 
1031 static inline void
desc_copy_state(unw_word label,struct unw_state_record * sr)1032 desc_copy_state (unw_word label, struct unw_state_record *sr)
1033 {
1034 	struct unw_labeled_state *ls;
1035 
1036 	for (ls = sr->labeled_states; ls; ls = ls->next) {
1037 		if (ls->label == label) {
1038 			free_state_stack(&sr->curr);
1039 			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1040 			sr->curr.next = dup_state_stack(ls->saved_state.next);
1041 			return;
1042 		}
1043 	}
1044 	printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1045 }
1046 
1047 static inline void
desc_label_state(unw_word label,struct unw_state_record * sr)1048 desc_label_state (unw_word label, struct unw_state_record *sr)
1049 {
1050 	struct unw_labeled_state *ls;
1051 
1052 	ls = alloc_labeled_state();
1053 	if (!ls) {
1054 		printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1055 		return;
1056 	}
1057 	ls->label = label;
1058 	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1059 	ls->saved_state.next = dup_state_stack(sr->curr.next);
1060 
1061 	/* insert into list of labeled states: */
1062 	ls->next = sr->labeled_states;
1063 	sr->labeled_states = ls;
1064 }
1065 
1066 /*
1067  * General descriptors.
1068  */
1069 
1070 static inline int
desc_is_active(unsigned char qp,unw_word t,struct unw_state_record * sr)1071 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1072 {
1073 	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1074 		return 0;
1075 	if (qp > 0) {
1076 		if ((sr->pr_val & (1UL << qp)) == 0)
1077 			return 0;
1078 		sr->pr_mask |= (1UL << qp);
1079 	}
1080 	return 1;
1081 }
1082 
1083 static inline void
desc_restore_p(unsigned char qp,unw_word t,unsigned char abreg,struct unw_state_record * sr)1084 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1085 {
1086 	struct unw_reg_info *r;
1087 
1088 	if (!desc_is_active(qp, t, sr))
1089 		return;
1090 
1091 	r = sr->curr.reg + decode_abreg(abreg, 0);
1092 	r->where = UNW_WHERE_NONE;
1093 	r->when = UNW_WHEN_NEVER;
1094 	r->val = 0;
1095 }
1096 
1097 static inline void
desc_spill_reg_p(unsigned char qp,unw_word t,unsigned char abreg,unsigned char x,unsigned char ytreg,struct unw_state_record * sr)1098 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1099 		     unsigned char ytreg, struct unw_state_record *sr)
1100 {
1101 	enum unw_where where = UNW_WHERE_GR;
1102 	struct unw_reg_info *r;
1103 
1104 	if (!desc_is_active(qp, t, sr))
1105 		return;
1106 
1107 	if (x)
1108 		where = UNW_WHERE_BR;
1109 	else if (ytreg & 0x80)
1110 		where = UNW_WHERE_FR;
1111 
1112 	r = sr->curr.reg + decode_abreg(abreg, 0);
1113 	r->where = where;
1114 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1115 	r->val = (ytreg & 0x7f);
1116 }
1117 
1118 static inline void
desc_spill_psprel_p(unsigned char qp,unw_word t,unsigned char abreg,unw_word pspoff,struct unw_state_record * sr)1119 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1120 		     struct unw_state_record *sr)
1121 {
1122 	struct unw_reg_info *r;
1123 
1124 	if (!desc_is_active(qp, t, sr))
1125 		return;
1126 
1127 	r = sr->curr.reg + decode_abreg(abreg, 1);
1128 	r->where = UNW_WHERE_PSPREL;
1129 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1130 	r->val = 0x10 - 4*pspoff;
1131 }
1132 
1133 static inline void
desc_spill_sprel_p(unsigned char qp,unw_word t,unsigned char abreg,unw_word spoff,struct unw_state_record * sr)1134 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1135 		       struct unw_state_record *sr)
1136 {
1137 	struct unw_reg_info *r;
1138 
1139 	if (!desc_is_active(qp, t, sr))
1140 		return;
1141 
1142 	r = sr->curr.reg + decode_abreg(abreg, 1);
1143 	r->where = UNW_WHERE_SPREL;
1144 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1145 	r->val = 4*spoff;
1146 }
1147 
1148 #define UNW_DEC_BAD_CODE(code)			printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1149 						       code);
1150 
1151 /*
1152  * region headers:
1153  */
1154 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
1155 #define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
1156 /*
1157  * prologue descriptors:
1158  */
1159 #define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
1160 #define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
1161 #define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
1162 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
1163 #define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
1164 #define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
1165 #define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
1166 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
1167 #define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
1168 #define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
1169 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
1170 #define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
1171 #define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
1172 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1173 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1174 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1175 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1176 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177 #define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
1178 #define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
1179 #define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
1180 /*
1181  * body descriptors:
1182  */
1183 #define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
1184 #define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
1185 #define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
1186 /*
1187  * general unwind descriptors:
1188  */
1189 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
1190 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
1191 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
1192 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
1193 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
1194 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
1195 #define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
1196 #define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
1197 
1198 #include "unwind_decoder.c"
1199 
1200 
1201 /* Unwind scripts. */
1202 
1203 static inline unw_hash_index_t
hash(unsigned long ip)1204 hash (unsigned long ip)
1205 {
1206 	/* magic number = ((sqrt(5)-1)/2)*2^64 */
1207 	static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1208 
1209 	return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1210 }
1211 
1212 static inline long
cache_match(struct unw_script * script,unsigned long ip,unsigned long pr)1213 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1214 {
1215 	read_lock(&script->lock);
1216 	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1217 		/* keep the read lock... */
1218 		return 1;
1219 	read_unlock(&script->lock);
1220 	return 0;
1221 }
1222 
1223 static inline struct unw_script *
script_lookup(struct unw_frame_info * info)1224 script_lookup (struct unw_frame_info *info)
1225 {
1226 	struct unw_script *script = unw.cache + info->hint;
1227 	unsigned short index;
1228 	unsigned long ip, pr;
1229 
1230 	if (UNW_DEBUG_ON(0))
1231 		return NULL;	/* Always regenerate scripts in debug mode */
1232 
1233 	STAT(++unw.stat.cache.lookups);
1234 
1235 	ip = info->ip;
1236 	pr = info->pr;
1237 
1238 	if (cache_match(script, ip, pr)) {
1239 		STAT(++unw.stat.cache.hinted_hits);
1240 		return script;
1241 	}
1242 
1243 	index = unw.hash[hash(ip)];
1244 	if (index >= UNW_CACHE_SIZE)
1245 		return NULL;
1246 
1247 	script = unw.cache + index;
1248 	while (1) {
1249 		if (cache_match(script, ip, pr)) {
1250 			/* update hint; no locking required as single-word writes are atomic */
1251 			STAT(++unw.stat.cache.normal_hits);
1252 			unw.cache[info->prev_script].hint = script - unw.cache;
1253 			return script;
1254 		}
1255 		if (script->coll_chain >= UNW_HASH_SIZE)
1256 			return NULL;
1257 		script = unw.cache + script->coll_chain;
1258 		STAT(++unw.stat.cache.collision_chain_traversals);
1259 	}
1260 }
1261 
1262 /*
1263  * On returning, a write lock for the SCRIPT is still being held.
1264  */
1265 static inline struct unw_script *
script_new(unsigned long ip)1266 script_new (unsigned long ip)
1267 {
1268 	struct unw_script *script, *prev, *tmp;
1269 	unw_hash_index_t index;
1270 	unsigned short head;
1271 
1272 	STAT(++unw.stat.script.news);
1273 
1274 	/*
1275 	 * Can't (easily) use cmpxchg() here because of ABA problem
1276 	 * that is intrinsic in cmpxchg()...
1277 	 */
1278 	head = unw.lru_head;
1279 	script = unw.cache + head;
1280 	unw.lru_head = script->lru_chain;
1281 
1282 	/*
1283 	 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1284 	 * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
1285 	 * alternative would be to disable interrupts whenever we hold a read-lock, but
1286 	 * that seems silly.
1287 	 */
1288 	if (!write_trylock(&script->lock))
1289 		return NULL;
1290 
1291 	/* re-insert script at the tail of the LRU chain: */
1292 	unw.cache[unw.lru_tail].lru_chain = head;
1293 	unw.lru_tail = head;
1294 
1295 	/* remove the old script from the hash table (if it's there): */
1296 	if (script->ip) {
1297 		index = hash(script->ip);
1298 		tmp = unw.cache + unw.hash[index];
1299 		prev = NULL;
1300 		while (1) {
1301 			if (tmp == script) {
1302 				if (prev)
1303 					prev->coll_chain = tmp->coll_chain;
1304 				else
1305 					unw.hash[index] = tmp->coll_chain;
1306 				break;
1307 			} else
1308 				prev = tmp;
1309 			if (tmp->coll_chain >= UNW_CACHE_SIZE)
1310 			/* old script wasn't in the hash-table */
1311 				break;
1312 			tmp = unw.cache + tmp->coll_chain;
1313 		}
1314 	}
1315 
1316 	/* enter new script in the hash table */
1317 	index = hash(ip);
1318 	script->coll_chain = unw.hash[index];
1319 	unw.hash[index] = script - unw.cache;
1320 
1321 	script->ip = ip;	/* set new IP while we're holding the locks */
1322 
1323 	STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1324 
1325 	script->flags = 0;
1326 	script->hint = 0;
1327 	script->count = 0;
1328 	return script;
1329 }
1330 
1331 static void
script_finalize(struct unw_script * script,struct unw_state_record * sr)1332 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1333 {
1334 	script->pr_mask = sr->pr_mask;
1335 	script->pr_val = sr->pr_val;
1336 	/*
1337 	 * We could down-grade our write-lock on script->lock here but
1338 	 * the rwlock API doesn't offer atomic lock downgrading, so
1339 	 * we'll just keep the write-lock and release it later when
1340 	 * we're done using the script.
1341 	 */
1342 }
1343 
1344 static inline void
script_emit(struct unw_script * script,struct unw_insn insn)1345 script_emit (struct unw_script *script, struct unw_insn insn)
1346 {
1347 	if (script->count >= UNW_MAX_SCRIPT_LEN) {
1348 		UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1349 			__func__, UNW_MAX_SCRIPT_LEN);
1350 		return;
1351 	}
1352 	script->insn[script->count++] = insn;
1353 }
1354 
1355 static inline void
emit_nat_info(struct unw_state_record * sr,int i,struct unw_script * script)1356 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1357 {
1358 	struct unw_reg_info *r = sr->curr.reg + i;
1359 	enum unw_insn_opcode opc;
1360 	struct unw_insn insn;
1361 	unsigned long val = 0;
1362 
1363 	switch (r->where) {
1364 	      case UNW_WHERE_GR:
1365 		if (r->val >= 32) {
1366 			/* register got spilled to a stacked register */
1367 			opc = UNW_INSN_SETNAT_TYPE;
1368 			val = UNW_NAT_REGSTK;
1369 		} else
1370 			/* register got spilled to a scratch register */
1371 			opc = UNW_INSN_SETNAT_MEMSTK;
1372 		break;
1373 
1374 	      case UNW_WHERE_FR:
1375 		opc = UNW_INSN_SETNAT_TYPE;
1376 		val = UNW_NAT_VAL;
1377 		break;
1378 
1379 	      case UNW_WHERE_BR:
1380 		opc = UNW_INSN_SETNAT_TYPE;
1381 		val = UNW_NAT_NONE;
1382 		break;
1383 
1384 	      case UNW_WHERE_PSPREL:
1385 	      case UNW_WHERE_SPREL:
1386 		opc = UNW_INSN_SETNAT_MEMSTK;
1387 		break;
1388 
1389 	      default:
1390 		UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1391 			   __func__, r->where);
1392 		return;
1393 	}
1394 	insn.opc = opc;
1395 	insn.dst = unw.preg_index[i];
1396 	insn.val = val;
1397 	script_emit(script, insn);
1398 }
1399 
1400 static void
compile_reg(struct unw_state_record * sr,int i,struct unw_script * script)1401 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1402 {
1403 	struct unw_reg_info *r = sr->curr.reg + i;
1404 	enum unw_insn_opcode opc;
1405 	unsigned long val, rval;
1406 	struct unw_insn insn;
1407 	long need_nat_info;
1408 
1409 	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1410 		return;
1411 
1412 	opc = UNW_INSN_MOVE;
1413 	val = rval = r->val;
1414 	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1415 
1416 	switch (r->where) {
1417 	      case UNW_WHERE_GR:
1418 		if (rval >= 32) {
1419 			opc = UNW_INSN_MOVE_STACKED;
1420 			val = rval - 32;
1421 		} else if (rval >= 4 && rval <= 7) {
1422 			if (need_nat_info) {
1423 				opc = UNW_INSN_MOVE2;
1424 				need_nat_info = 0;
1425 			}
1426 			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1427 		} else if (rval == 0) {
1428 			opc = UNW_INSN_MOVE_CONST;
1429 			val = 0;
1430 		} else {
1431 			/* register got spilled to a scratch register */
1432 			opc = UNW_INSN_MOVE_SCRATCH;
1433 			val = pt_regs_off(rval);
1434 		}
1435 		break;
1436 
1437 	      case UNW_WHERE_FR:
1438 		if (rval <= 5)
1439 			val = unw.preg_index[UNW_REG_F2  + (rval -  2)];
1440 		else if (rval >= 16 && rval <= 31)
1441 			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1442 		else {
1443 			opc = UNW_INSN_MOVE_SCRATCH;
1444 			if (rval <= 11)
1445 				val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1446 			else
1447 				UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1448 					   __func__, rval);
1449 		}
1450 		break;
1451 
1452 	      case UNW_WHERE_BR:
1453 		if (rval >= 1 && rval <= 5)
1454 			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1455 		else {
1456 			opc = UNW_INSN_MOVE_SCRATCH;
1457 			if (rval == 0)
1458 				val = offsetof(struct pt_regs, b0);
1459 			else if (rval == 6)
1460 				val = offsetof(struct pt_regs, b6);
1461 			else
1462 				val = offsetof(struct pt_regs, b7);
1463 		}
1464 		break;
1465 
1466 	      case UNW_WHERE_SPREL:
1467 		opc = UNW_INSN_ADD_SP;
1468 		break;
1469 
1470 	      case UNW_WHERE_PSPREL:
1471 		opc = UNW_INSN_ADD_PSP;
1472 		break;
1473 
1474 	      default:
1475 		UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1476 			   __func__, i, r->where);
1477 		break;
1478 	}
1479 	insn.opc = opc;
1480 	insn.dst = unw.preg_index[i];
1481 	insn.val = val;
1482 	script_emit(script, insn);
1483 	if (need_nat_info)
1484 		emit_nat_info(sr, i, script);
1485 
1486 	if (i == UNW_REG_PSP) {
1487 		/*
1488 		 * info->psp must contain the _value_ of the previous
1489 		 * sp, not it's save location.  We get this by
1490 		 * dereferencing the value we just stored in
1491 		 * info->psp:
1492 		 */
1493 		insn.opc = UNW_INSN_LOAD;
1494 		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1495 		script_emit(script, insn);
1496 	}
1497 }
1498 
1499 static inline const struct unw_table_entry *
lookup(struct unw_table * table,unsigned long rel_ip)1500 lookup (struct unw_table *table, unsigned long rel_ip)
1501 {
1502 	const struct unw_table_entry *e = NULL;
1503 	unsigned long lo, hi, mid;
1504 
1505 	/* do a binary search for right entry: */
1506 	for (lo = 0, hi = table->length; lo < hi; ) {
1507 		mid = (lo + hi) / 2;
1508 		e = &table->array[mid];
1509 		if (rel_ip < e->start_offset)
1510 			hi = mid;
1511 		else if (rel_ip >= e->end_offset)
1512 			lo = mid + 1;
1513 		else
1514 			break;
1515 	}
1516 	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1517 		return NULL;
1518 	return e;
1519 }
1520 
1521 /*
1522  * Build an unwind script that unwinds from state OLD_STATE to the
1523  * entrypoint of the function that called OLD_STATE.
1524  */
1525 static inline struct unw_script *
build_script(struct unw_frame_info * info)1526 build_script (struct unw_frame_info *info)
1527 {
1528 	const struct unw_table_entry *e = NULL;
1529 	struct unw_script *script = NULL;
1530 	struct unw_labeled_state *ls, *next;
1531 	unsigned long ip = info->ip;
1532 	struct unw_state_record sr;
1533 	struct unw_table *table, *prev;
1534 	struct unw_reg_info *r;
1535 	struct unw_insn insn;
1536 	u8 *dp, *desc_end;
1537 	u64 hdr;
1538 	int i;
1539 	STAT(unsigned long start, parse_start;)
1540 
1541 	STAT(++unw.stat.script.builds; start = ia64_get_itc());
1542 
1543 	/* build state record */
1544 	memset(&sr, 0, sizeof(sr));
1545 	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1546 		r->when = UNW_WHEN_NEVER;
1547 	sr.pr_val = info->pr;
1548 
1549 	UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1550 	script = script_new(ip);
1551 	if (!script) {
1552 		UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n",  __func__);
1553 		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1554 		return NULL;
1555 	}
1556 	unw.cache[info->prev_script].hint = script - unw.cache;
1557 
1558 	/* search the kernels and the modules' unwind tables for IP: */
1559 
1560 	STAT(parse_start = ia64_get_itc());
1561 
1562 	prev = NULL;
1563 	for (table = unw.tables; table; table = table->next) {
1564 		if (ip >= table->start && ip < table->end) {
1565 			/*
1566 			 * Leave the kernel unwind table at the very front,
1567 			 * lest moving it breaks some assumption elsewhere.
1568 			 * Otherwise, move the matching table to the second
1569 			 * position in the list so that traversals can benefit
1570 			 * from commonality in backtrace paths.
1571 			 */
1572 			if (prev && prev != unw.tables) {
1573 				/* unw is safe - we're already spinlocked */
1574 				prev->next = table->next;
1575 				table->next = unw.tables->next;
1576 				unw.tables->next = table;
1577 			}
1578 			e = lookup(table, ip - table->segment_base);
1579 			break;
1580 		}
1581 		prev = table;
1582 	}
1583 	if (!e) {
1584 		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
1585 		UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1586 			__func__, ip, unw.cache[info->prev_script].ip);
1587 		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1588 		sr.curr.reg[UNW_REG_RP].when = -1;
1589 		sr.curr.reg[UNW_REG_RP].val = 0;
1590 		compile_reg(&sr, UNW_REG_RP, script);
1591 		script_finalize(script, &sr);
1592 		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1593 		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1594 		return script;
1595 	}
1596 
1597 	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1598 			  + (ip & 0xfUL));
1599 	hdr = *(u64 *) (table->segment_base + e->info_offset);
1600 	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
1601 	desc_end = dp + 8*UNW_LENGTH(hdr);
1602 
1603 	while (!sr.done && dp < desc_end)
1604 		dp = unw_decode(dp, sr.in_body, &sr);
1605 
1606 	if (sr.when_target > sr.epilogue_start) {
1607 		/*
1608 		 * sp has been restored and all values on the memory stack below
1609 		 * psp also have been restored.
1610 		 */
1611 		sr.curr.reg[UNW_REG_PSP].val = 0;
1612 		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1613 		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1614 		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1615 			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1616 			    || r->where == UNW_WHERE_SPREL)
1617 			{
1618 				r->val = 0;
1619 				r->where = UNW_WHERE_NONE;
1620 				r->when = UNW_WHEN_NEVER;
1621 			}
1622 	}
1623 
1624 	script->flags = sr.flags;
1625 
1626 	/*
1627 	 * If RP did't get saved, generate entry for the return link
1628 	 * register.
1629 	 */
1630 	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1631 		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1632 		sr.curr.reg[UNW_REG_RP].when = -1;
1633 		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1634 		UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1635 			   __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1636 			   sr.curr.reg[UNW_REG_RP].val);
1637 	}
1638 
1639 #ifdef UNW_DEBUG
1640 	UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1641 		__func__, table->segment_base + e->start_offset, sr.when_target);
1642 	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1643 		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1644 			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
1645 			switch (r->where) {
1646 			      case UNW_WHERE_GR:     UNW_DPRINT(1, "r%lu", r->val); break;
1647 			      case UNW_WHERE_FR:     UNW_DPRINT(1, "f%lu", r->val); break;
1648 			      case UNW_WHERE_BR:     UNW_DPRINT(1, "b%lu", r->val); break;
1649 			      case UNW_WHERE_SPREL:  UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1650 			      case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1651 			      case UNW_WHERE_NONE:
1652 				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1653 				break;
1654 
1655 			      default:
1656 				UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1657 				break;
1658 			}
1659 			UNW_DPRINT(1, "\t\t%d\n", r->when);
1660 		}
1661 	}
1662 #endif
1663 
1664 	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1665 
1666 	/* translate state record into unwinder instructions: */
1667 
1668 	/*
1669 	 * First, set psp if we're dealing with a fixed-size frame;
1670 	 * subsequent instructions may depend on this value.
1671 	 */
1672 	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1673 	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1674 	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
1675 		/* new psp is sp plus frame size */
1676 		insn.opc = UNW_INSN_ADD;
1677 		insn.dst = offsetof(struct unw_frame_info, psp)/8;
1678 		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
1679 		script_emit(script, insn);
1680 	}
1681 
1682 	/* determine where the primary UNaT is: */
1683 	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1684 		i = UNW_REG_PRI_UNAT_MEM;
1685 	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1686 		i = UNW_REG_PRI_UNAT_GR;
1687 	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1688 		i = UNW_REG_PRI_UNAT_MEM;
1689 	else
1690 		i = UNW_REG_PRI_UNAT_GR;
1691 
1692 	compile_reg(&sr, i, script);
1693 
1694 	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1695 		compile_reg(&sr, i, script);
1696 
1697 	/* free labeled register states & stack: */
1698 
1699 	STAT(parse_start = ia64_get_itc());
1700 	for (ls = sr.labeled_states; ls; ls = next) {
1701 		next = ls->next;
1702 		free_state_stack(&ls->saved_state);
1703 		free_labeled_state(ls);
1704 	}
1705 	free_state_stack(&sr.curr);
1706 	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1707 
1708 	script_finalize(script, &sr);
1709 	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1710 	return script;
1711 }
1712 
1713 /*
1714  * Apply the unwinding actions represented by OPS and update SR to
1715  * reflect the state that existed upon entry to the function that this
1716  * unwinder represents.
1717  */
1718 static inline void
run_script(struct unw_script * script,struct unw_frame_info * state)1719 run_script (struct unw_script *script, struct unw_frame_info *state)
1720 {
1721 	struct unw_insn *ip, *limit, next_insn;
1722 	unsigned long opc, dst, val, off;
1723 	unsigned long *s = (unsigned long *) state;
1724 	STAT(unsigned long start;)
1725 
1726 	STAT(++unw.stat.script.runs; start = ia64_get_itc());
1727 	state->flags = script->flags;
1728 	ip = script->insn;
1729 	limit = script->insn + script->count;
1730 	next_insn = *ip;
1731 
1732 	while (ip++ < limit) {
1733 		opc = next_insn.opc;
1734 		dst = next_insn.dst;
1735 		val = next_insn.val;
1736 		next_insn = *ip;
1737 
1738 	  redo:
1739 		switch (opc) {
1740 		      case UNW_INSN_ADD:
1741 			s[dst] += val;
1742 			break;
1743 
1744 		      case UNW_INSN_MOVE2:
1745 			if (!s[val])
1746 				goto lazy_init;
1747 			s[dst+1] = s[val+1];
1748 			s[dst] = s[val];
1749 			break;
1750 
1751 		      case UNW_INSN_MOVE:
1752 			if (!s[val])
1753 				goto lazy_init;
1754 			s[dst] = s[val];
1755 			break;
1756 
1757 		      case UNW_INSN_MOVE_SCRATCH:
1758 			if (state->pt) {
1759 				s[dst] = (unsigned long) get_scratch_regs(state) + val;
1760 			} else {
1761 				s[dst] = 0;
1762 				UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1763 					   __func__, dst, val);
1764 			}
1765 			break;
1766 
1767 		      case UNW_INSN_MOVE_CONST:
1768 			if (val == 0)
1769 				s[dst] = (unsigned long) &unw.r0;
1770 			else {
1771 				s[dst] = 0;
1772 				UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1773 					   __func__, val);
1774 			}
1775 			break;
1776 
1777 
1778 		      case UNW_INSN_MOVE_STACKED:
1779 			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1780 								    val);
1781 			break;
1782 
1783 		      case UNW_INSN_ADD_PSP:
1784 			s[dst] = state->psp + val;
1785 			break;
1786 
1787 		      case UNW_INSN_ADD_SP:
1788 			s[dst] = state->sp + val;
1789 			break;
1790 
1791 		      case UNW_INSN_SETNAT_MEMSTK:
1792 			if (!state->pri_unat_loc)
1793 				state->pri_unat_loc = &state->sw->caller_unat;
1794 			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1795 			s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1796 			break;
1797 
1798 		      case UNW_INSN_SETNAT_TYPE:
1799 			s[dst+1] = val;
1800 			break;
1801 
1802 		      case UNW_INSN_LOAD:
1803 #ifdef UNW_DEBUG
1804 			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1805 			    || s[val] < TASK_SIZE)
1806 			{
1807 				UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1808 					   __func__, s[val]);
1809 				break;
1810 			}
1811 #endif
1812 			s[dst] = *(unsigned long *) s[val];
1813 			break;
1814 		}
1815 	}
1816 	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1817 	return;
1818 
1819   lazy_init:
1820 	off = unw.sw_off[val];
1821 	s[val] = (unsigned long) state->sw + off;
1822 	if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1823 		/*
1824 		 * We're initializing a general register: init NaT info, too.  Note that
1825 		 * the offset is a multiple of 8 which gives us the 3 bits needed for
1826 		 * the type field.
1827 		 */
1828 		s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1829 	goto redo;
1830 }
1831 
1832 static int
find_save_locs(struct unw_frame_info * info)1833 find_save_locs (struct unw_frame_info *info)
1834 {
1835 	int have_write_lock = 0;
1836 	struct unw_script *scr;
1837 	unsigned long flags = 0;
1838 
1839 	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1840 		/* don't let obviously bad addresses pollute the cache */
1841 		/* FIXME: should really be level 0 but it occurs too often. KAO */
1842 		UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1843 		info->rp_loc = NULL;
1844 		return -1;
1845 	}
1846 
1847 	scr = script_lookup(info);
1848 	if (!scr) {
1849 		spin_lock_irqsave(&unw.lock, flags);
1850 		scr = build_script(info);
1851 		if (!scr) {
1852 			spin_unlock_irqrestore(&unw.lock, flags);
1853 			UNW_DPRINT(0,
1854 				   "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1855 				   __func__, info->ip);
1856 			return -1;
1857 		}
1858 		have_write_lock = 1;
1859 	}
1860 	info->hint = scr->hint;
1861 	info->prev_script = scr - unw.cache;
1862 
1863 	run_script(scr, info);
1864 
1865 	if (have_write_lock) {
1866 		write_unlock(&scr->lock);
1867 		spin_unlock_irqrestore(&unw.lock, flags);
1868 	} else
1869 		read_unlock(&scr->lock);
1870 	return 0;
1871 }
1872 
1873 static int
unw_valid(const struct unw_frame_info * info,unsigned long * p)1874 unw_valid(const struct unw_frame_info *info, unsigned long* p)
1875 {
1876 	unsigned long loc = (unsigned long)p;
1877 	return (loc >= info->regstk.limit && loc < info->regstk.top) ||
1878 	       (loc >= info->memstk.top && loc < info->memstk.limit);
1879 }
1880 
1881 int
unw_unwind(struct unw_frame_info * info)1882 unw_unwind (struct unw_frame_info *info)
1883 {
1884 	unsigned long prev_ip, prev_sp, prev_bsp;
1885 	unsigned long ip, pr, num_regs;
1886 	STAT(unsigned long start, flags;)
1887 	int retval;
1888 
1889 	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1890 
1891 	prev_ip = info->ip;
1892 	prev_sp = info->sp;
1893 	prev_bsp = info->bsp;
1894 
1895 	/* validate the return IP pointer */
1896 	if (!unw_valid(info, info->rp_loc)) {
1897 		/* FIXME: should really be level 0 but it occurs too often. KAO */
1898 		UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1899 			   __func__, info->ip);
1900 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1901 		return -1;
1902 	}
1903 	/* restore the ip */
1904 	ip = info->ip = *info->rp_loc;
1905 	if (ip < GATE_ADDR) {
1906 		UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1907 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1908 		return -1;
1909 	}
1910 
1911 	/* validate the previous stack frame pointer */
1912 	if (!unw_valid(info, info->pfs_loc)) {
1913 		UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1914 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1915 		return -1;
1916 	}
1917 	/* restore the cfm: */
1918 	info->cfm_loc = info->pfs_loc;
1919 
1920 	/* restore the bsp: */
1921 	pr = info->pr;
1922 	num_regs = 0;
1923 	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1924 		info->pt = info->sp + 16;
1925 		if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1926 			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
1927 		info->pfs_loc =
1928 			(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1929 		UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1930 	} else
1931 		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
1932 	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1933 	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1934 		UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1935 			__func__, info->bsp, info->regstk.limit, info->regstk.top);
1936 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1937 		return -1;
1938 	}
1939 
1940 	/* restore the sp: */
1941 	info->sp = info->psp;
1942 	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1943 		UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1944 			__func__, info->sp, info->memstk.top, info->memstk.limit);
1945 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1946 		return -1;
1947 	}
1948 
1949 	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1950 		UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1951 			   __func__, ip);
1952 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1953 		return -1;
1954 	}
1955 
1956 	/* as we unwind, the saved ar.unat becomes the primary unat: */
1957 	info->pri_unat_loc = info->unat_loc;
1958 
1959 	/* finally, restore the predicates: */
1960 	unw_get_pr(info, &info->pr);
1961 
1962 	retval = find_save_locs(info);
1963 	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1964 	return retval;
1965 }
1966 EXPORT_SYMBOL(unw_unwind);
1967 
1968 int
unw_unwind_to_user(struct unw_frame_info * info)1969 unw_unwind_to_user (struct unw_frame_info *info)
1970 {
1971 	unsigned long ip, sp, pr = info->pr;
1972 
1973 	do {
1974 		unw_get_sp(info, &sp);
1975 		if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1976 		    < IA64_PT_REGS_SIZE) {
1977 			UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1978 				   __func__);
1979 			break;
1980 		}
1981 		if (unw_is_intr_frame(info) &&
1982 		    (pr & (1UL << PRED_USER_STACK)))
1983 			return 0;
1984 		if (unw_get_pr (info, &pr) < 0) {
1985 			unw_get_rp(info, &ip);
1986 			UNW_DPRINT(0, "unwind.%s: failed to read "
1987 				   "predicate register (ip=0x%lx)\n",
1988 				__func__, ip);
1989 			return -1;
1990 		}
1991 	} while (unw_unwind(info) >= 0);
1992 	unw_get_ip(info, &ip);
1993 	UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1994 		   __func__, ip);
1995 	return -1;
1996 }
1997 EXPORT_SYMBOL(unw_unwind_to_user);
1998 
1999 static void
init_frame_info(struct unw_frame_info * info,struct task_struct * t,struct switch_stack * sw,unsigned long stktop)2000 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2001 		 struct switch_stack *sw, unsigned long stktop)
2002 {
2003 	unsigned long rbslimit, rbstop, stklimit;
2004 	STAT(unsigned long start, flags;)
2005 
2006 	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2007 
2008 	/*
2009 	 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2010 	 * don't want to do that because it would be slow as each preserved register would
2011 	 * have to be processed.  Instead, what we do here is zero out the frame info and
2012 	 * start the unwind process at the function that created the switch_stack frame.
2013 	 * When a preserved value in switch_stack needs to be accessed, run_script() will
2014 	 * initialize the appropriate pointer on demand.
2015 	 */
2016 	memset(info, 0, sizeof(*info));
2017 
2018 	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2019 	stklimit = (unsigned long) t + IA64_STK_OFFSET;
2020 
2021 	rbstop   = sw->ar_bspstore;
2022 	if (rbstop > stklimit || rbstop < rbslimit)
2023 		rbstop = rbslimit;
2024 
2025 	if (stktop <= rbstop)
2026 		stktop = rbstop;
2027 	if (stktop > stklimit)
2028 		stktop = stklimit;
2029 
2030 	info->regstk.limit = rbslimit;
2031 	info->regstk.top   = rbstop;
2032 	info->memstk.limit = stklimit;
2033 	info->memstk.top   = stktop;
2034 	info->task = t;
2035 	info->sw  = sw;
2036 	info->sp = info->psp = stktop;
2037 	info->pr = sw->pr;
2038 	UNW_DPRINT(3, "unwind.%s:\n"
2039 		   "  task   0x%lx\n"
2040 		   "  rbs = [0x%lx-0x%lx)\n"
2041 		   "  stk = [0x%lx-0x%lx)\n"
2042 		   "  pr     0x%lx\n"
2043 		   "  sw     0x%lx\n"
2044 		   "  sp     0x%lx\n",
2045 		   __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2046 		   info->pr, (unsigned long) info->sw, info->sp);
2047 	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2048 }
2049 
2050 void
unw_init_frame_info(struct unw_frame_info * info,struct task_struct * t,struct switch_stack * sw)2051 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2052 {
2053 	unsigned long sol;
2054 
2055 	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2056 	info->cfm_loc = &sw->ar_pfs;
2057 	sol = (*info->cfm_loc >> 7) & 0x7f;
2058 	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2059 	info->ip = sw->b0;
2060 	UNW_DPRINT(3, "unwind.%s:\n"
2061 		   "  bsp    0x%lx\n"
2062 		   "  sol    0x%lx\n"
2063 		   "  ip     0x%lx\n",
2064 		   __func__, info->bsp, sol, info->ip);
2065 	find_save_locs(info);
2066 }
2067 
2068 EXPORT_SYMBOL(unw_init_frame_info);
2069 
2070 void
unw_init_from_blocked_task(struct unw_frame_info * info,struct task_struct * t)2071 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2072 {
2073 	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2074 
2075 	UNW_DPRINT(1, "unwind.%s\n", __func__);
2076 	unw_init_frame_info(info, t, sw);
2077 }
2078 EXPORT_SYMBOL(unw_init_from_blocked_task);
2079 
2080 static void
init_unwind_table(struct unw_table * table,const char * name,unsigned long segment_base,unsigned long gp,const void * table_start,const void * table_end)2081 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2082 		   unsigned long gp, const void *table_start, const void *table_end)
2083 {
2084 	const struct unw_table_entry *start = table_start, *end = table_end;
2085 
2086 	table->name = name;
2087 	table->segment_base = segment_base;
2088 	table->gp = gp;
2089 	table->start = segment_base + start[0].start_offset;
2090 	table->end = segment_base + end[-1].end_offset;
2091 	table->array = start;
2092 	table->length = end - start;
2093 }
2094 
2095 void *
unw_add_unwind_table(const char * name,unsigned long segment_base,unsigned long gp,const void * table_start,const void * table_end)2096 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2097 		      const void *table_start, const void *table_end)
2098 {
2099 	const struct unw_table_entry *start = table_start, *end = table_end;
2100 	struct unw_table *table;
2101 	unsigned long flags;
2102 
2103 	if (end - start <= 0) {
2104 		UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2105 			   __func__);
2106 		return NULL;
2107 	}
2108 
2109 	table = kmalloc(sizeof(*table), GFP_USER);
2110 	if (!table)
2111 		return NULL;
2112 
2113 	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2114 
2115 	spin_lock_irqsave(&unw.lock, flags);
2116 	{
2117 		/* keep kernel unwind table at the front (it's searched most commonly): */
2118 		table->next = unw.tables->next;
2119 		unw.tables->next = table;
2120 	}
2121 	spin_unlock_irqrestore(&unw.lock, flags);
2122 
2123 	return table;
2124 }
2125 
2126 void
unw_remove_unwind_table(void * handle)2127 unw_remove_unwind_table (void *handle)
2128 {
2129 	struct unw_table *table, *prev;
2130 	struct unw_script *tmp;
2131 	unsigned long flags;
2132 	long index;
2133 
2134 	if (!handle) {
2135 		UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2136 			   __func__);
2137 		return;
2138 	}
2139 
2140 	table = handle;
2141 	if (table == &unw.kernel_table) {
2142 		UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2143 			   "no-can-do!\n", __func__);
2144 		return;
2145 	}
2146 
2147 	spin_lock_irqsave(&unw.lock, flags);
2148 	{
2149 		/* first, delete the table: */
2150 
2151 		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2152 			if (prev->next == table)
2153 				break;
2154 		if (!prev) {
2155 			UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2156 				   __func__, (void *) table);
2157 			spin_unlock_irqrestore(&unw.lock, flags);
2158 			return;
2159 		}
2160 		prev->next = table->next;
2161 	}
2162 	spin_unlock_irqrestore(&unw.lock, flags);
2163 
2164 	/* next, remove hash table entries for this table */
2165 
2166 	for (index = 0; index < UNW_HASH_SIZE; ++index) {
2167 		tmp = unw.cache + unw.hash[index];
2168 		if (unw.hash[index] >= UNW_CACHE_SIZE
2169 		    || tmp->ip < table->start || tmp->ip >= table->end)
2170 			continue;
2171 
2172 		write_lock(&tmp->lock);
2173 		{
2174 			if (tmp->ip >= table->start && tmp->ip < table->end) {
2175 				unw.hash[index] = tmp->coll_chain;
2176 				tmp->ip = 0;
2177 			}
2178 		}
2179 		write_unlock(&tmp->lock);
2180 	}
2181 
2182 	kfree(table);
2183 }
2184 
2185 static int __init
create_gate_table(void)2186 create_gate_table (void)
2187 {
2188 	const struct unw_table_entry *entry, *start, *end;
2189 	unsigned long *lp, segbase = GATE_ADDR;
2190 	size_t info_size, size;
2191 	char *info;
2192 	Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2193 	int i;
2194 
2195 	for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2196 		if (phdr->p_type == PT_IA_64_UNWIND) {
2197 			punw = phdr;
2198 			break;
2199 		}
2200 
2201 	if (!punw) {
2202 		printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2203 		return 0;
2204 	}
2205 
2206 	start = (const struct unw_table_entry *) punw->p_vaddr;
2207 	end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2208 	size  = 0;
2209 
2210 	unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2211 
2212 	for (entry = start; entry < end; ++entry)
2213 		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2214 	size += 8;	/* reserve space for "end of table" marker */
2215 
2216 	unw.gate_table = kmalloc(size, GFP_KERNEL);
2217 	if (!unw.gate_table) {
2218 		unw.gate_table_size = 0;
2219 		printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2220 		return 0;
2221 	}
2222 	unw.gate_table_size = size;
2223 
2224 	lp = unw.gate_table;
2225 	info = (char *) unw.gate_table + size;
2226 
2227 	for (entry = start; entry < end; ++entry, lp += 3) {
2228 		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2229 		info -= info_size;
2230 		memcpy(info, (char *) segbase + entry->info_offset, info_size);
2231 
2232 		lp[0] = segbase + entry->start_offset;		/* start */
2233 		lp[1] = segbase + entry->end_offset;		/* end */
2234 		lp[2] = info - (char *) unw.gate_table;		/* info */
2235 	}
2236 	*lp = 0;	/* end-of-table marker */
2237 	return 0;
2238 }
2239 
2240 __initcall(create_gate_table);
2241 
2242 void __init
unw_init(void)2243 unw_init (void)
2244 {
2245 	extern char __gp[];
2246 	extern void unw_hash_index_t_is_too_narrow (void);
2247 	long i, off;
2248 
2249 	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2250 		unw_hash_index_t_is_too_narrow();
2251 
2252 	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2253 	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2254 	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2255 	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2256 	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2257 	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2258 	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2259 	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2260 	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2261 		unw.sw_off[unw.preg_index[i]] = off;
2262 	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2263 		unw.sw_off[unw.preg_index[i]] = off;
2264 	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2265 		unw.sw_off[unw.preg_index[i]] = off;
2266 	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2267 		unw.sw_off[unw.preg_index[i]] = off;
2268 
2269 	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2270 		if (i > 0)
2271 			unw.cache[i].lru_chain = (i - 1);
2272 		unw.cache[i].coll_chain = -1;
2273 		rwlock_init(&unw.cache[i].lock);
2274 	}
2275 	unw.lru_head = UNW_CACHE_SIZE - 1;
2276 	unw.lru_tail = 0;
2277 
2278 	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2279 			  __start_unwind, __end_unwind);
2280 }
2281 
2282 /*
2283  * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2284  *
2285  *	This system call has been deprecated.  The new and improved way to get
2286  *	at the kernel's unwind info is via the gate DSO.  The address of the
2287  *	ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2288  *
2289  * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2290  *
2291  * This system call copies the unwind data into the buffer pointed to by BUF and returns
2292  * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
2293  * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2294  * unwind data.
2295  *
2296  * The first portion of the unwind data contains an unwind table and rest contains the
2297  * associated unwind info (in no particular order).  The unwind table consists of a table
2298  * of entries of the form:
2299  *
2300  *	u64 start;	(64-bit address of start of function)
2301  *	u64 end;	(64-bit address of start of function)
2302  *	u64 info;	(BUF-relative offset to unwind info)
2303  *
2304  * The end of the unwind table is indicated by an entry with a START address of zero.
2305  *
2306  * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2307  * on the format of the unwind info.
2308  *
2309  * ERRORS
2310  *	EFAULT	BUF points outside your accessible address space.
2311  */
2312 asmlinkage long
sys_getunwind(void __user * buf,size_t buf_size)2313 sys_getunwind (void __user *buf, size_t buf_size)
2314 {
2315 	if (buf && buf_size >= unw.gate_table_size)
2316 		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2317 			return -EFAULT;
2318 	return unw.gate_table_size;
2319 }
2320