• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* libunwind - a platform-independent unwind library
2    Copyright (C) 2001-2005 Hewlett-Packard Co
3 	Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4 
5 This file is part of libunwind.
6 
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14 
15 The above copyright notice and this permission notice shall be
16 included in all copies or substantial portions of the Software.
17 
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
25 
26 #include "offsets.h"
27 #include "regs.h"
28 #include "unwind_i.h"
29 
30 enum ia64_script_insn_opcode
31   {
32     IA64_INSN_INC_PSP,		/* psp += val */
33     IA64_INSN_LOAD_PSP,		/* psp = *psp_loc */
34     IA64_INSN_ADD_PSP,		/* s[dst] = (s.psp + val) */
35     IA64_INSN_ADD_PSP_NAT,	/* like above, but with NaT info */
36     IA64_INSN_ADD_SP,		/* s[dst] = (s.sp + val) */
37     IA64_INSN_ADD_SP_NAT,	/* like above, but with NaT info */
38     IA64_INSN_MOVE,		/* s[dst] = s[val] */
39     IA64_INSN_MOVE_NAT,		/* like above, but with NaT info */
40     IA64_INSN_MOVE_NO_NAT,	/* like above, but clear NaT info */
41     IA64_INSN_MOVE_STACKED,	/* s[dst] = rse_skip(*s.bsp_loc, val) */
42     IA64_INSN_MOVE_STACKED_NAT,	/* like above, but with NaT info */
43     IA64_INSN_MOVE_SCRATCH,	/* s[dst] = scratch reg "val" */
44     IA64_INSN_MOVE_SCRATCH_NAT,	/* like above, but with NaT info */
45     IA64_INSN_MOVE_SCRATCH_NO_NAT /* like above, but clear NaT info */
46   };
47 
48 #ifdef HAVE___THREAD
49 static __thread struct ia64_script_cache ia64_per_thread_cache =
50   {
51 #ifdef HAVE_ATOMIC_OPS_H
52     .busy = AO_TS_INITIALIZER
53 #else
54     .lock = PTHREAD_MUTEX_INITIALIZER
55 #endif
56   };
57 #endif
58 
59 static inline unw_hash_index_t CONST_ATTR
hash(unw_word_t ip)60 hash (unw_word_t ip)
61 {
62   /* based on (sqrt(5)/2-1)*2^64 */
63 # define magic	((unw_word_t) 0x9e3779b97f4a7c16ULL)
64 
65   return (ip >> 4) * magic >> (64 - IA64_LOG_UNW_HASH_SIZE);
66 }
67 
68 static inline long
cache_match(struct ia64_script * script,unw_word_t ip,unw_word_t pr)69 cache_match (struct ia64_script *script, unw_word_t ip, unw_word_t pr)
70 {
71   if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
72     return 1;
73   return 0;
74 }
75 
76 static inline void
flush_script_cache(struct ia64_script_cache * cache)77 flush_script_cache (struct ia64_script_cache *cache)
78 {
79   int i;
80 
81   cache->lru_head = IA64_UNW_CACHE_SIZE - 1;
82   cache->lru_tail = 0;
83 
84   for (i = 0; i < IA64_UNW_CACHE_SIZE; ++i)
85     {
86       if (i > 0)
87 	cache->buckets[i].lru_chain = (i - 1);
88       cache->buckets[i].coll_chain = -1;
89       cache->buckets[i].ip = 0;
90     }
91   for (i = 0; i<IA64_UNW_HASH_SIZE; ++i)
92     cache->hash[i] = -1;
93 }
94 
95 static inline struct ia64_script_cache *
get_script_cache(unw_addr_space_t as,intrmask_t * saved_maskp)96 get_script_cache (unw_addr_space_t as, intrmask_t *saved_maskp)
97 {
98   struct ia64_script_cache *cache = &as->global_cache;
99   unw_caching_policy_t caching = as->caching_policy;
100 
101   if (caching == UNW_CACHE_NONE)
102     return NULL;
103 
104 #ifdef HAVE_ATOMIC_H
105   if (!spin_trylock_irqsave (&cache->busy, *saved_maskp))
106     return NULL;
107 #else
108 # ifdef HAVE___THREAD
109   if (as->caching_policy == UNW_CACHE_PER_THREAD)
110     cache = &ia64_per_thread_cache;
111 # endif
112 # ifdef HAVE_ATOMIC_OPS_H
113   if (AO_test_and_set (&cache->busy) == AO_TS_SET)
114     return NULL;
115 # else
116   if (likely (caching == UNW_CACHE_GLOBAL))
117     {
118       Debug (16, "acquiring lock\n");
119       lock_acquire (&cache->lock, *saved_maskp);
120     }
121 # endif
122 #endif
123 
124   if (atomic_read (&as->cache_generation) != atomic_read (&cache->generation))
125     {
126       flush_script_cache (cache);
127       cache->generation = as->cache_generation;
128     }
129   return cache;
130 }
131 
132 static inline void
put_script_cache(unw_addr_space_t as,struct ia64_script_cache * cache,intrmask_t * saved_maskp)133 put_script_cache (unw_addr_space_t as, struct ia64_script_cache *cache,
134 		  intrmask_t *saved_maskp)
135 {
136   assert (as->caching_policy != UNW_CACHE_NONE);
137 
138   Debug (16, "unmasking signals/interrupts and releasing lock\n");
139 #ifdef HAVE_ATOMIC_H
140   spin_unlock_irqrestore (&cache->busy, *saved_maskp);
141 #else
142 # ifdef HAVE_ATOMIC_OPS_H
143   AO_CLEAR (&cache->busy);
144 # else
145   if (likely (as->caching_policy == UNW_CACHE_GLOBAL))
146     lock_release (&cache->lock, *saved_maskp);
147 # endif
148 #endif
149 }
150 
151 static struct ia64_script *
script_lookup(struct ia64_script_cache * cache,struct cursor * c)152 script_lookup (struct ia64_script_cache *cache, struct cursor *c)
153 {
154   struct ia64_script *script = cache->buckets + c->hint;
155   unsigned short index;
156   unw_word_t ip, pr;
157 
158   ip = c->ip;
159   pr = c->pr;
160 
161   if (cache_match (script, ip, pr))
162     return script;
163 
164   index = cache->hash[hash (ip)];
165   if (index >= IA64_UNW_CACHE_SIZE)
166     return 0;
167 
168   script = cache->buckets + index;
169   while (1)
170     {
171       if (cache_match (script, ip, pr))
172 	{
173 	  /* update hint; no locking needed: single-word writes are atomic */
174 	  c->hint = cache->buckets[c->prev_script].hint =
175 	    (script - cache->buckets);
176 	  return script;
177 	}
178       if (script->coll_chain >= IA64_UNW_HASH_SIZE)
179 	return 0;
180       script = cache->buckets + script->coll_chain;
181     }
182 }
183 
184 static inline void
script_init(struct ia64_script * script,unw_word_t ip)185 script_init (struct ia64_script *script, unw_word_t ip)
186 {
187   script->ip = ip;
188   script->hint = 0;
189   script->count = 0;
190   script->abi_marker = 0;
191 }
192 
193 static inline struct ia64_script *
script_new(struct ia64_script_cache * cache,unw_word_t ip)194 script_new (struct ia64_script_cache *cache, unw_word_t ip)
195 {
196   struct ia64_script *script, *prev, *tmp;
197   unw_hash_index_t index;
198   unsigned short head;
199 
200   head = cache->lru_head;
201   script = cache->buckets + head;
202   cache->lru_head = script->lru_chain;
203 
204   /* re-insert script at the tail of the LRU chain: */
205   cache->buckets[cache->lru_tail].lru_chain = head;
206   cache->lru_tail = head;
207 
208   /* remove the old script from the hash table (if it's there): */
209   if (script->ip)
210     {
211       index = hash (script->ip);
212       tmp = cache->buckets + cache->hash[index];
213       prev = 0;
214       while (1)
215 	{
216 	  if (tmp == script)
217 	    {
218 	      if (prev)
219 		prev->coll_chain = tmp->coll_chain;
220 	      else
221 		cache->hash[index] = tmp->coll_chain;
222 	      break;
223 	    }
224 	  else
225 	    prev = tmp;
226 	  if (tmp->coll_chain >= IA64_UNW_CACHE_SIZE)
227 	    /* old script wasn't in the hash-table */
228 	    break;
229 	  tmp = cache->buckets + tmp->coll_chain;
230 	}
231     }
232 
233   /* enter new script in the hash table */
234   index = hash (ip);
235   script->coll_chain = cache->hash[index];
236   cache->hash[index] = script - cache->buckets;
237 
238   script_init (script, ip);
239   return script;
240 }
241 
242 static inline void
script_finalize(struct ia64_script * script,struct cursor * c,struct ia64_state_record * sr)243 script_finalize (struct ia64_script *script, struct cursor *c,
244 		 struct ia64_state_record *sr)
245 {
246   script->pr_mask = sr->pr_mask;
247   script->pr_val = sr->pr_val;
248   script->pi = c->pi;
249 }
250 
251 static inline void
script_emit(struct ia64_script * script,struct ia64_script_insn insn)252 script_emit (struct ia64_script *script, struct ia64_script_insn insn)
253 {
254   if (script->count >= IA64_MAX_SCRIPT_LEN)
255     {
256       Dprintf ("%s: script exceeds maximum size of %u instructions!\n",
257 	       __FUNCTION__, IA64_MAX_SCRIPT_LEN);
258       return;
259     }
260   script->insn[script->count++] = insn;
261 }
262 
263 static void
compile_reg(struct ia64_state_record * sr,int i,struct ia64_reg_info * r,struct ia64_script * script)264 compile_reg (struct ia64_state_record *sr, int i, struct ia64_reg_info *r,
265 	     struct ia64_script *script)
266 {
267   enum ia64_script_insn_opcode opc;
268   unsigned long val, rval;
269   struct ia64_script_insn insn;
270   long is_preserved_gr;
271 
272   if (r->where == IA64_WHERE_NONE || r->when >= sr->when_target)
273     return;
274 
275   opc = IA64_INSN_MOVE;
276   val = rval = r->val;
277   is_preserved_gr = (i >= IA64_REG_R4 && i <= IA64_REG_R7);
278 
279   if (r->where == IA64_WHERE_GR)
280     {
281       /* Handle most common case first... */
282       if (rval >= 32)
283 	{
284 	  /* register got spilled to a stacked register */
285 	  if (is_preserved_gr)
286 	    opc = IA64_INSN_MOVE_STACKED_NAT;
287 	  else
288 	    opc = IA64_INSN_MOVE_STACKED;
289 	  val = rval;
290 	}
291       else if (rval >= 4 && rval <= 7)
292 	{
293 	  /* register got spilled to a preserved register */
294 	  val = IA64_REG_R4 + (rval - 4);
295 	  if (is_preserved_gr)
296 	    opc = IA64_INSN_MOVE_NAT;
297 	}
298       else
299 	{
300 	  /* register got spilled to a scratch register */
301 	  if (is_preserved_gr)
302 	    opc = IA64_INSN_MOVE_SCRATCH_NAT;
303 	  else
304 	    opc = IA64_INSN_MOVE_SCRATCH;
305 	  val = UNW_IA64_GR + rval;
306 	}
307     }
308   else
309     {
310       switch (r->where)
311 	{
312 	case IA64_WHERE_FR:
313 	  /* Note: There is no need to handle NaT-bit info here
314 	     (indepent of is_preserved_gr), because for floating-point
315 	     NaTs are represented as NaTVal, so the NaT-info never
316 	     needs to be consulated.  */
317 	  if (rval >= 2 && rval <= 5)
318 	    val = IA64_REG_F2 + (rval - 2);
319 	  else if (rval >= 16 && rval <= 31)
320 	    val = IA64_REG_F16 + (rval - 16);
321 	  else
322 	    {
323 	      opc = IA64_INSN_MOVE_SCRATCH;
324 	      val = UNW_IA64_FR + rval;
325 	    }
326 	  break;
327 
328 	case IA64_WHERE_BR:
329 	  if (rval >= 1 && rval <= 5)
330 	    {
331 	      val = IA64_REG_B1 + (rval - 1);
332 	      if (is_preserved_gr)
333 		opc = IA64_INSN_MOVE_NO_NAT;
334 	    }
335 	  else
336 	    {
337 	      opc = IA64_INSN_MOVE_SCRATCH;
338 	      if (is_preserved_gr)
339 		opc = IA64_INSN_MOVE_SCRATCH_NO_NAT;
340 	      val = UNW_IA64_BR + rval;
341 	    }
342 	  break;
343 
344 	case IA64_WHERE_SPREL:
345 	  if (is_preserved_gr)
346 	    opc = IA64_INSN_ADD_SP_NAT;
347 	  else
348 	    {
349 	      opc = IA64_INSN_ADD_SP;
350 	      if (i >= IA64_REG_F2 && i <= IA64_REG_F31)
351 		val |= IA64_LOC_TYPE_FP;
352 	    }
353 	  break;
354 
355 	case IA64_WHERE_PSPREL:
356 	  if (is_preserved_gr)
357 	    opc = IA64_INSN_ADD_PSP_NAT;
358 	  else
359 	    {
360 	      opc = IA64_INSN_ADD_PSP;
361 	      if (i >= IA64_REG_F2 && i <= IA64_REG_F31)
362 		val |= IA64_LOC_TYPE_FP;
363 	    }
364 	  break;
365 
366 	default:
367 	  Dprintf ("%s: register %u has unexpected `where' value of %u\n",
368 		   __FUNCTION__, i, r->where);
369 	  break;
370 	}
371     }
372   insn.opc = opc;
373   insn.dst = i;
374   insn.val = val;
375   script_emit (script, insn);
376 
377   if (i == IA64_REG_PSP)
378     {
379       /* c->psp must contain the _value_ of the previous sp, not it's
380 	 save-location.  We get this by dereferencing the value we
381 	 just stored in loc[IA64_REG_PSP]: */
382       insn.opc = IA64_INSN_LOAD_PSP;
383       script_emit (script, insn);
384     }
385 }
386 
387 /* Sort the registers which got saved in decreasing order of WHEN
388    value.  This is needed to ensure that the save-locations are
389    updated in the proper order.  For example, suppose r4 gets spilled
390    to memory and then r5 gets saved in r4.  In this case, we need to
391    update the save location of r5 before the one of r4.  */
392 
393 static inline int
sort_regs(struct ia64_state_record * sr,int regorder[])394 sort_regs (struct ia64_state_record *sr, int regorder[])
395 {
396   int r, i, j, max, max_reg, max_when, num_regs = 0;
397 
398   assert (IA64_REG_BSP == 3);
399 
400   for (r = IA64_REG_BSP; r < IA64_NUM_PREGS; ++r)
401     {
402       if (sr->curr.reg[r].where == IA64_WHERE_NONE
403 	  || sr->curr.reg[r].when >= sr->when_target)
404 	continue;
405 
406       regorder[num_regs++] = r;
407     }
408 
409   /* Simple insertion-sort.  Involves about N^2/2 comparisons and N
410      exchanges.  N is often small (say, 2-5) so a fancier sorting
411      algorithm may not be worthwhile.  */
412 
413   for (i = max = 0; i < num_regs - 1; ++i)
414     {
415       max_reg = regorder[max];
416       max_when = sr->curr.reg[max_reg].when;
417 
418       for (j = i + 1; j < num_regs; ++j)
419 	if (sr->curr.reg[regorder[j]].when > max_when)
420 	  {
421 	    max = j;
422 	    max_reg = regorder[j];
423 	    max_when = sr->curr.reg[max_reg].when;
424 	  }
425       if (i != max)
426 	{
427 	  regorder[max] = regorder[i];
428 	  regorder[i] = max_reg;
429 	}
430     }
431   return num_regs;
432 }
433 
434 /* Build an unwind script that unwinds from state OLD_STATE to the
435    entrypoint of the function that called OLD_STATE.  */
436 
437 static inline int
build_script(struct cursor * c,struct ia64_script * script)438 build_script (struct cursor *c, struct ia64_script *script)
439 {
440   int num_regs, i, ret, regorder[IA64_NUM_PREGS - 3];
441   struct ia64_reg_info *pri_unat;
442   struct ia64_state_record sr;
443   struct ia64_script_insn insn;
444 
445   ret = ia64_create_state_record (c, &sr);
446   if (ret < 0)
447     return ret;
448 
449   /* First, compile the update for IA64_REG_PSP.  This is important
450      because later save-locations may depend on it's correct (updated)
451      value.  Fixed-size frames are handled specially and variable-size
452      frames get handled via the normal compile_reg().  */
453 
454   if (sr.when_target > sr.curr.reg[IA64_REG_PSP].when
455       && (sr.curr.reg[IA64_REG_PSP].where == IA64_WHERE_NONE)
456       && sr.curr.reg[IA64_REG_PSP].val != 0)
457     {
458       /* new psp is psp plus frame size */
459       insn.opc = IA64_INSN_INC_PSP;
460       insn.val = sr.curr.reg[IA64_REG_PSP].val;	/* frame size */
461       script_emit (script, insn);
462     }
463   else
464     compile_reg (&sr, IA64_REG_PSP, sr.curr.reg + IA64_REG_PSP, script);
465 
466   /* Second, compile the update for the primary UNaT, if any: */
467 
468   if (sr.when_target >= sr.curr.reg[IA64_REG_PRI_UNAT_GR].when
469       || sr.when_target >= sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when)
470     {
471       if (sr.when_target < sr.curr.reg[IA64_REG_PRI_UNAT_GR].when)
472 	/* (primary) NaT bits were saved to memory only */
473 	pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_MEM;
474       else if (sr.when_target < sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when)
475 	/* (primary) NaT bits were saved to a register only */
476 	pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_GR;
477       else if (sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when >
478 	       sr.curr.reg[IA64_REG_PRI_UNAT_GR].when)
479 	/* (primary) NaT bits were last saved to memory */
480 	pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_MEM;
481       else
482 	/* (primary) NaT bits were last saved to a register */
483 	pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_GR;
484 
485       /* Note: we always store the final primary-UNaT location in UNAT_MEM.  */
486       compile_reg (&sr, IA64_REG_PRI_UNAT_MEM, pri_unat, script);
487     }
488 
489   /* Third, compile the other register in decreasing order of WHEN values.  */
490 
491   num_regs = sort_regs (&sr, regorder);
492   for (i = 0; i < num_regs; ++i)
493     compile_reg (&sr, regorder[i], sr.curr.reg + regorder[i], script);
494 
495   script->abi_marker = sr.abi_marker;
496   script_finalize (script, c, &sr);
497 
498   ia64_free_state_record (&sr);
499   return 0;
500 }
501 
502 static inline void
set_nat_info(struct cursor * c,unsigned long dst,ia64_loc_t nat_loc,uint8_t bitnr)503 set_nat_info (struct cursor *c, unsigned long dst,
504 	      ia64_loc_t nat_loc, uint8_t bitnr)
505 {
506   assert (dst >= IA64_REG_R4 && dst <= IA64_REG_R7);
507 
508   c->loc[dst - IA64_REG_R4 + IA64_REG_NAT4] = nat_loc;
509   c->nat_bitnr[dst - IA64_REG_R4] = bitnr;
510 }
511 
512 /* Apply the unwinding actions represented by OPS and update SR to
513    reflect the state that existed upon entry to the function that this
514    unwinder represents.  */
515 
516 static inline int
run_script(struct ia64_script * script,struct cursor * c)517 run_script (struct ia64_script *script, struct cursor *c)
518 {
519   struct ia64_script_insn *ip, *limit, next_insn;
520   ia64_loc_t loc, nat_loc;
521   unsigned long opc, dst;
522   uint8_t nat_bitnr;
523   unw_word_t val;
524   int ret;
525 
526   c->pi = script->pi;
527   ip = script->insn;
528   limit = script->insn + script->count;
529   next_insn = *ip;
530   c->abi_marker = script->abi_marker;
531 
532   while (ip++ < limit)
533     {
534       opc = next_insn.opc;
535       dst = next_insn.dst;
536       val = next_insn.val;
537       next_insn = *ip;
538 
539       /* This is by far the most common operation: */
540       if (likely (opc == IA64_INSN_MOVE_STACKED))
541 	{
542 	  if ((ret = ia64_get_stacked (c, val, &loc, NULL)) < 0)
543 	    return ret;
544 	}
545       else
546 	switch (opc)
547 	  {
548 	  case IA64_INSN_INC_PSP:
549 	    c->psp += val;
550 	    continue;
551 
552 	  case IA64_INSN_LOAD_PSP:
553 	    if ((ret = ia64_get (c, c->loc[IA64_REG_PSP], &c->psp)) < 0)
554 	      return ret;
555 	    continue;
556 
557 	  case IA64_INSN_ADD_PSP:
558 	    loc = IA64_LOC_ADDR (c->psp + val, (val & IA64_LOC_TYPE_FP));
559 	    break;
560 
561 	  case IA64_INSN_ADD_SP:
562 	    loc = IA64_LOC_ADDR (c->sp + val, (val & IA64_LOC_TYPE_FP));
563 	    break;
564 
565 	  case IA64_INSN_MOVE_NO_NAT:
566 	    set_nat_info (c, dst, IA64_NULL_LOC, 0);
567 	  case IA64_INSN_MOVE:
568 	    loc = c->loc[val];
569 	    break;
570 
571 	  case IA64_INSN_MOVE_SCRATCH_NO_NAT:
572 	    set_nat_info (c, dst, IA64_NULL_LOC, 0);
573 	  case IA64_INSN_MOVE_SCRATCH:
574 	    loc = ia64_scratch_loc (c, val, NULL);
575 	    break;
576 
577 	  case IA64_INSN_ADD_PSP_NAT:
578 	    loc = IA64_LOC_ADDR (c->psp + val, 0);
579 	    assert (!IA64_IS_REG_LOC (loc));
580 	    set_nat_info (c, dst,
581 			  c->loc[IA64_REG_PRI_UNAT_MEM],
582 			  ia64_unat_slot_num (IA64_GET_ADDR (loc)));
583 	    break;
584 
585 	  case IA64_INSN_ADD_SP_NAT:
586 	    loc = IA64_LOC_ADDR (c->sp + val, 0);
587 	    assert (!IA64_IS_REG_LOC (loc));
588 	    set_nat_info (c, dst,
589 			  c->loc[IA64_REG_PRI_UNAT_MEM],
590 			  ia64_unat_slot_num (IA64_GET_ADDR (loc)));
591 	    break;
592 
593 	  case IA64_INSN_MOVE_NAT:
594 	    loc = c->loc[val];
595 	    set_nat_info (c, dst,
596 			  c->loc[val - IA64_REG_R4 + IA64_REG_NAT4],
597 			  c->nat_bitnr[val - IA64_REG_R4]);
598 	    break;
599 
600 	  case IA64_INSN_MOVE_STACKED_NAT:
601 	    if ((ret = ia64_get_stacked (c, val, &loc, &nat_loc)) < 0)
602 	      return ret;
603 	    assert (!IA64_IS_REG_LOC (loc));
604 	    set_nat_info (c, dst, nat_loc, rse_slot_num (IA64_GET_ADDR (loc)));
605 	    break;
606 
607 	  case IA64_INSN_MOVE_SCRATCH_NAT:
608 	    loc = ia64_scratch_loc (c, val, NULL);
609 	    nat_loc = ia64_scratch_loc (c, val + (UNW_IA64_NAT - UNW_IA64_GR),
610 					&nat_bitnr);
611 	    set_nat_info (c, dst, nat_loc, nat_bitnr);
612 	    break;
613 	  }
614       c->loc[dst] = loc;
615     }
616   return 0;
617 }
618 
619 static int
uncached_find_save_locs(struct cursor * c)620 uncached_find_save_locs (struct cursor *c)
621 {
622   struct ia64_script script;
623   int ret = 0;
624 
625   if ((ret = ia64_fetch_proc_info (c, c->ip, 1)) < 0)
626     return ret;
627 
628   script_init (&script, c->ip);
629   if ((ret = build_script (c, &script)) < 0)
630     {
631       if (ret != -UNW_ESTOPUNWIND)
632 	Dprintf ("%s: failed to build unwind script for ip %lx\n",
633 		 __FUNCTION__, (long) c->ip);
634       return ret;
635     }
636   return run_script (&script, c);
637 }
638 
639 HIDDEN int
ia64_find_save_locs(struct cursor * c)640 ia64_find_save_locs (struct cursor *c)
641 {
642   struct ia64_script_cache *cache = NULL;
643   struct ia64_script *script = NULL;
644   intrmask_t saved_mask;
645   int ret = 0;
646 
647   if (c->as->caching_policy == UNW_CACHE_NONE)
648     return uncached_find_save_locs (c);
649 
650   cache = get_script_cache (c->as, &saved_mask);
651   if (!cache)
652     {
653       Debug (1, "contention on script-cache; doing uncached lookup\n");
654       return uncached_find_save_locs (c);
655     }
656   {
657     script = script_lookup (cache, c);
658     Debug (8, "ip %lx %s in script cache\n", (long) c->ip,
659 	   script ? "hit" : "missed");
660 
661     if (!script || (script->count == 0 && !script->pi.unwind_info))
662       {
663 	if ((ret = ia64_fetch_proc_info (c, c->ip, 1)) < 0)
664 	  goto out;
665       }
666 
667     if (!script)
668       {
669 	script = script_new (cache, c->ip);
670 	if (!script)
671 	  {
672 	    Dprintf ("%s: failed to create unwind script\n", __FUNCTION__);
673 	    ret = -UNW_EUNSPEC;
674 	    goto out;
675 	  }
676       }
677     cache->buckets[c->prev_script].hint = script - cache->buckets;
678 
679     if (script->count == 0)
680       ret = build_script (c, script);
681 
682     assert (script->count > 0);
683 
684     c->hint = script->hint;
685     c->prev_script = script - cache->buckets;
686 
687     if (ret < 0)
688       {
689 	if (ret != -UNW_ESTOPUNWIND)
690 	  Dprintf ("%s: failed to locate/build unwind script for ip %lx\n",
691 		   __FUNCTION__, (long) c->ip);
692 	goto out;
693       }
694 
695     ret = run_script (script, c);
696   }
697  out:
698   put_script_cache (c->as, cache, &saved_mask);
699   return ret;
700 }
701 
702 HIDDEN void
ia64_validate_cache(unw_addr_space_t as,void * arg)703 ia64_validate_cache (unw_addr_space_t as, void *arg)
704 {
705 #ifndef UNW_REMOTE_ONLY
706   if (as == unw_local_addr_space && ia64_local_validate_cache (as, arg) == 1)
707     return;
708 #endif
709 
710 #ifndef UNW_LOCAL_ONLY
711   /* local info is up-to-date, check dynamic info.  */
712   unwi_dyn_validate_cache (as, arg);
713 #endif
714 }
715 
716 HIDDEN int
ia64_cache_proc_info(struct cursor * c)717 ia64_cache_proc_info (struct cursor *c)
718 {
719   struct ia64_script_cache *cache;
720   struct ia64_script *script;
721   intrmask_t saved_mask;
722   int ret = 0;
723 
724   cache = get_script_cache (c->as, &saved_mask);
725   if (!cache)
726     return ret;	/* cache is busy */
727 
728   /* Re-check to see if a cache entry has been added in the meantime: */
729   script = script_lookup (cache, c);
730   if (script)
731     goto out;
732 
733   script = script_new (cache, c->ip);
734   if (!script)
735     {
736       Dprintf ("%s: failed to create unwind script\n", __FUNCTION__);
737       ret = -UNW_EUNSPEC;
738       goto out;
739     }
740 
741   script->pi = c->pi;
742 
743  out:
744   put_script_cache (c->as, cache, &saved_mask);
745   return ret;
746 }
747 
748 HIDDEN int
ia64_get_cached_proc_info(struct cursor * c)749 ia64_get_cached_proc_info (struct cursor *c)
750 {
751   struct ia64_script_cache *cache;
752   struct ia64_script *script;
753   intrmask_t saved_mask;
754 
755   cache = get_script_cache (c->as, &saved_mask);
756   if (!cache)
757     return -UNW_ENOINFO;	/* cache is busy */
758   {
759     script = script_lookup (cache, c);
760     if (script)
761       c->pi = script->pi;
762   }
763   put_script_cache (c->as, cache, &saved_mask);
764   return script ? 0 : -UNW_ENOINFO;
765 }
766