1 /* libunwind - a platform-independent unwind library
2 Copyright (C) 2001-2005 Hewlett-Packard Co
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of libunwind.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice shall be
16 included in all copies or substantial portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #include "offsets.h"
27 #include "regs.h"
28 #include "unwind_i.h"
29 #include <stdatomic.h>
30
31 enum ia64_script_insn_opcode
32 {
33 IA64_INSN_INC_PSP, /* psp += val */
34 IA64_INSN_LOAD_PSP, /* psp = *psp_loc */
35 IA64_INSN_ADD_PSP, /* s[dst] = (s.psp + val) */
36 IA64_INSN_ADD_PSP_NAT, /* like above, but with NaT info */
37 IA64_INSN_ADD_SP, /* s[dst] = (s.sp + val) */
38 IA64_INSN_ADD_SP_NAT, /* like above, but with NaT info */
39 IA64_INSN_MOVE, /* s[dst] = s[val] */
40 IA64_INSN_MOVE_NAT, /* like above, but with NaT info */
41 IA64_INSN_MOVE_NO_NAT, /* like above, but clear NaT info */
42 IA64_INSN_MOVE_STACKED, /* s[dst] = rse_skip(*s.bsp_loc, val) */
43 IA64_INSN_MOVE_STACKED_NAT, /* like above, but with NaT info */
44 IA64_INSN_MOVE_SCRATCH, /* s[dst] = scratch reg "val" */
45 IA64_INSN_MOVE_SCRATCH_NAT, /* like above, but with NaT info */
46 IA64_INSN_MOVE_SCRATCH_NO_NAT /* like above, but clear NaT info */
47 };
48
49 #if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD
50 static _Thread_local struct ia64_script_cache ia64_per_thread_cache =
51 {
52 .busy = ATOMIC_FLAG_INIT
53 };
54 #endif
55
56 static inline unw_hash_index_t CONST_ATTR
hash(unw_word_t ip)57 hash (unw_word_t ip)
58 {
59 /* based on (sqrt(5)/2-1)*2^64 */
60 # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL)
61
62 return (ip >> 4) * magic >> (64 - IA64_LOG_UNW_HASH_SIZE);
63 }
64
65 static inline long
cache_match(struct ia64_script * script,unw_word_t ip,unw_word_t pr)66 cache_match (struct ia64_script *script, unw_word_t ip, unw_word_t pr)
67 {
68 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
69 return 1;
70 return 0;
71 }
72
73 static inline void
flush_script_cache(struct ia64_script_cache * cache)74 flush_script_cache (struct ia64_script_cache *cache)
75 {
76 int i;
77
78 cache->lru_head = IA64_UNW_CACHE_SIZE - 1;
79 cache->lru_tail = 0;
80
81 for (i = 0; i < IA64_UNW_CACHE_SIZE; ++i)
82 {
83 if (i > 0)
84 cache->buckets[i].lru_chain = (i - 1);
85 cache->buckets[i].coll_chain = -1;
86 cache->buckets[i].ip = 0;
87 }
88 for (i = 0; i<IA64_UNW_HASH_SIZE; ++i)
89 cache->hash[i] = -1;
90 }
91
92 static inline struct ia64_script_cache *
get_script_cache(unw_addr_space_t as,intrmask_t * saved_maskp)93 get_script_cache (unw_addr_space_t as, intrmask_t *saved_maskp)
94 {
95 struct ia64_script_cache *cache = &as->global_cache;
96 unw_caching_policy_t caching = as->caching_policy;
97
98 if (caching == UNW_CACHE_NONE)
99 return NULL;
100
101 # if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD
102 if (as->caching_policy == UNW_CACHE_PER_THREAD)
103 cache = &ia64_per_thread_cache;
104 else
105 # endif
106 if (atomic_flag_test_and_set(&cache->busy))
107 return NULL;
108
109 if (atomic_load (&as->cache_generation) != atomic_load (&cache->generation))
110 {
111 flush_script_cache (cache);
112 atomic_store(&cache->generation, atomic_load (&as->cache_generation));
113 }
114 return cache;
115 }
116
117 static inline void
put_script_cache(unw_addr_space_t as,struct ia64_script_cache * cache,intrmask_t * saved_maskp)118 put_script_cache (unw_addr_space_t as, struct ia64_script_cache *cache,
119 intrmask_t *saved_maskp)
120 {
121 assert (as->caching_policy != UNW_CACHE_NONE);
122
123 Debug (16, "unmasking signals/interrupts and releasing lock\n");
124 atomic_flag_clear(&cache->busy);
125 }
126
127 static struct ia64_script *
script_lookup(struct ia64_script_cache * cache,struct cursor * c)128 script_lookup (struct ia64_script_cache *cache, struct cursor *c)
129 {
130 struct ia64_script *script = cache->buckets + c->hint;
131 unsigned short index;
132 unw_word_t ip, pr;
133
134 ip = c->ip;
135 pr = c->pr;
136
137 if (cache_match (script, ip, pr))
138 return script;
139
140 index = cache->hash[hash (ip)];
141 if (index >= IA64_UNW_CACHE_SIZE)
142 return 0;
143
144 script = cache->buckets + index;
145 while (1)
146 {
147 if (cache_match (script, ip, pr))
148 {
149 /* update hint; no locking needed: single-word writes are atomic */
150 c->hint = cache->buckets[c->prev_script].hint =
151 (script - cache->buckets);
152 return script;
153 }
154 if (script->coll_chain >= IA64_UNW_HASH_SIZE)
155 return 0;
156 script = cache->buckets + script->coll_chain;
157 }
158 }
159
160 static inline void
script_init(struct ia64_script * script,unw_word_t ip)161 script_init (struct ia64_script *script, unw_word_t ip)
162 {
163 script->ip = ip;
164 script->hint = 0;
165 script->count = 0;
166 script->abi_marker = 0;
167 }
168
169 static inline struct ia64_script *
script_new(struct ia64_script_cache * cache,unw_word_t ip)170 script_new (struct ia64_script_cache *cache, unw_word_t ip)
171 {
172 struct ia64_script *script, *prev, *tmp;
173 unw_hash_index_t index;
174 unsigned short head;
175
176 head = cache->lru_head;
177 script = cache->buckets + head;
178 cache->lru_head = script->lru_chain;
179
180 /* re-insert script at the tail of the LRU chain: */
181 cache->buckets[cache->lru_tail].lru_chain = head;
182 cache->lru_tail = head;
183
184 /* remove the old script from the hash table (if it's there): */
185 if (script->ip)
186 {
187 index = hash (script->ip);
188 tmp = cache->buckets + cache->hash[index];
189 prev = 0;
190 while (1)
191 {
192 if (tmp == script)
193 {
194 if (prev)
195 prev->coll_chain = tmp->coll_chain;
196 else
197 cache->hash[index] = tmp->coll_chain;
198 break;
199 }
200 else
201 prev = tmp;
202 if (tmp->coll_chain >= IA64_UNW_CACHE_SIZE)
203 /* old script wasn't in the hash-table */
204 break;
205 tmp = cache->buckets + tmp->coll_chain;
206 }
207 }
208
209 /* enter new script in the hash table */
210 index = hash (ip);
211 script->coll_chain = cache->hash[index];
212 cache->hash[index] = script - cache->buckets;
213
214 script_init (script, ip);
215 return script;
216 }
217
218 static inline void
script_finalize(struct ia64_script * script,struct cursor * c,struct ia64_state_record * sr)219 script_finalize (struct ia64_script *script, struct cursor *c,
220 struct ia64_state_record *sr)
221 {
222 script->pr_mask = sr->pr_mask;
223 script->pr_val = sr->pr_val;
224 script->pi = c->pi;
225 }
226
227 static inline void
script_emit(struct ia64_script * script,struct ia64_script_insn insn)228 script_emit (struct ia64_script *script, struct ia64_script_insn insn)
229 {
230 if (script->count >= IA64_MAX_SCRIPT_LEN)
231 {
232 Dprintf ("%s: script exceeds maximum size of %u instructions!\n",
233 __FUNCTION__, IA64_MAX_SCRIPT_LEN);
234 return;
235 }
236 script->insn[script->count++] = insn;
237 }
238
239 static void
compile_reg(struct ia64_state_record * sr,int i,struct ia64_reg_info * r,struct ia64_script * script)240 compile_reg (struct ia64_state_record *sr, int i, struct ia64_reg_info *r,
241 struct ia64_script *script)
242 {
243 enum ia64_script_insn_opcode opc;
244 unsigned long val, rval;
245 struct ia64_script_insn insn;
246 long is_preserved_gr;
247
248 if (r->where == IA64_WHERE_NONE || r->when >= sr->when_target)
249 return;
250
251 opc = IA64_INSN_MOVE;
252 val = rval = r->val;
253 is_preserved_gr = (i >= IA64_REG_R4 && i <= IA64_REG_R7);
254
255 if (r->where == IA64_WHERE_GR)
256 {
257 /* Handle most common case first... */
258 if (rval >= 32)
259 {
260 /* register got spilled to a stacked register */
261 if (is_preserved_gr)
262 opc = IA64_INSN_MOVE_STACKED_NAT;
263 else
264 opc = IA64_INSN_MOVE_STACKED;
265 val = rval;
266 }
267 else if (rval >= 4 && rval <= 7)
268 {
269 /* register got spilled to a preserved register */
270 val = IA64_REG_R4 + (rval - 4);
271 if (is_preserved_gr)
272 opc = IA64_INSN_MOVE_NAT;
273 }
274 else
275 {
276 /* register got spilled to a scratch register */
277 if (is_preserved_gr)
278 opc = IA64_INSN_MOVE_SCRATCH_NAT;
279 else
280 opc = IA64_INSN_MOVE_SCRATCH;
281 val = UNW_IA64_GR + rval;
282 }
283 }
284 else
285 {
286 switch (r->where)
287 {
288 case IA64_WHERE_FR:
289 /* Note: There is no need to handle NaT-bit info here
290 (indepent of is_preserved_gr), because for floating-point
291 NaTs are represented as NaTVal, so the NaT-info never
292 needs to be consulated. */
293 if (rval >= 2 && rval <= 5)
294 val = IA64_REG_F2 + (rval - 2);
295 else if (rval >= 16 && rval <= 31)
296 val = IA64_REG_F16 + (rval - 16);
297 else
298 {
299 opc = IA64_INSN_MOVE_SCRATCH;
300 val = UNW_IA64_FR + rval;
301 }
302 break;
303
304 case IA64_WHERE_BR:
305 if (rval >= 1 && rval <= 5)
306 {
307 val = IA64_REG_B1 + (rval - 1);
308 if (is_preserved_gr)
309 opc = IA64_INSN_MOVE_NO_NAT;
310 }
311 else
312 {
313 opc = IA64_INSN_MOVE_SCRATCH;
314 if (is_preserved_gr)
315 opc = IA64_INSN_MOVE_SCRATCH_NO_NAT;
316 val = UNW_IA64_BR + rval;
317 }
318 break;
319
320 case IA64_WHERE_SPREL:
321 if (is_preserved_gr)
322 opc = IA64_INSN_ADD_SP_NAT;
323 else
324 {
325 opc = IA64_INSN_ADD_SP;
326 if (i >= IA64_REG_F2 && i <= IA64_REG_F31)
327 val |= IA64_LOC_TYPE_FP;
328 }
329 break;
330
331 case IA64_WHERE_PSPREL:
332 if (is_preserved_gr)
333 opc = IA64_INSN_ADD_PSP_NAT;
334 else
335 {
336 opc = IA64_INSN_ADD_PSP;
337 if (i >= IA64_REG_F2 && i <= IA64_REG_F31)
338 val |= IA64_LOC_TYPE_FP;
339 }
340 break;
341
342 default:
343 Dprintf ("%s: register %u has unexpected `where' value of %u\n",
344 __FUNCTION__, i, r->where);
345 break;
346 }
347 }
348 insn.opc = opc;
349 insn.dst = i;
350 insn.val = val;
351 script_emit (script, insn);
352
353 if (i == IA64_REG_PSP)
354 {
355 /* c->psp must contain the _value_ of the previous sp, not it's
356 save-location. We get this by dereferencing the value we
357 just stored in loc[IA64_REG_PSP]: */
358 insn.opc = IA64_INSN_LOAD_PSP;
359 script_emit (script, insn);
360 }
361 }
362
363 /* Sort the registers which got saved in decreasing order of WHEN
364 value. This is needed to ensure that the save-locations are
365 updated in the proper order. For example, suppose r4 gets spilled
366 to memory and then r5 gets saved in r4. In this case, we need to
367 update the save location of r5 before the one of r4. */
368
369 static inline int
sort_regs(struct ia64_state_record * sr,int regorder[])370 sort_regs (struct ia64_state_record *sr, int regorder[])
371 {
372 int r, i, j, max, max_reg, max_when, num_regs = 0;
373
374 assert (IA64_REG_BSP == 3);
375
376 for (r = IA64_REG_BSP; r < IA64_NUM_PREGS; ++r)
377 {
378 if (sr->curr.reg[r].where == IA64_WHERE_NONE
379 || sr->curr.reg[r].when >= sr->when_target)
380 continue;
381
382 regorder[num_regs++] = r;
383 }
384
385 /* Simple insertion-sort. Involves about N^2/2 comparisons and N
386 exchanges. N is often small (say, 2-5) so a fancier sorting
387 algorithm may not be worthwhile. */
388
389 for (i = max = 0; i < num_regs - 1; ++i)
390 {
391 max_reg = regorder[max];
392 max_when = sr->curr.reg[max_reg].when;
393
394 for (j = i + 1; j < num_regs; ++j)
395 if (sr->curr.reg[regorder[j]].when > max_when)
396 {
397 max = j;
398 max_reg = regorder[j];
399 max_when = sr->curr.reg[max_reg].when;
400 }
401 if (i != max)
402 {
403 regorder[max] = regorder[i];
404 regorder[i] = max_reg;
405 }
406 }
407 return num_regs;
408 }
409
410 /* Build an unwind script that unwinds from state OLD_STATE to the
411 entrypoint of the function that called OLD_STATE. */
412
413 static inline int
build_script(struct cursor * c,struct ia64_script * script)414 build_script (struct cursor *c, struct ia64_script *script)
415 {
416 int num_regs, i, ret, regorder[IA64_NUM_PREGS - 3];
417 struct ia64_reg_info *pri_unat;
418 struct ia64_state_record sr;
419 struct ia64_script_insn insn;
420
421 ret = ia64_create_state_record (c, &sr);
422 if (ret < 0)
423 return ret;
424
425 /* First, compile the update for IA64_REG_PSP. This is important
426 because later save-locations may depend on it's correct (updated)
427 value. Fixed-size frames are handled specially and variable-size
428 frames get handled via the normal compile_reg(). */
429
430 if (sr.when_target > sr.curr.reg[IA64_REG_PSP].when
431 && (sr.curr.reg[IA64_REG_PSP].where == IA64_WHERE_NONE)
432 && sr.curr.reg[IA64_REG_PSP].val != 0)
433 {
434 /* new psp is psp plus frame size */
435 insn.opc = IA64_INSN_INC_PSP;
436 insn.val = sr.curr.reg[IA64_REG_PSP].val; /* frame size */
437 script_emit (script, insn);
438 }
439 else
440 compile_reg (&sr, IA64_REG_PSP, sr.curr.reg + IA64_REG_PSP, script);
441
442 /* Second, compile the update for the primary UNaT, if any: */
443
444 if (sr.when_target >= sr.curr.reg[IA64_REG_PRI_UNAT_GR].when
445 || sr.when_target >= sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when)
446 {
447 if (sr.when_target < sr.curr.reg[IA64_REG_PRI_UNAT_GR].when)
448 /* (primary) NaT bits were saved to memory only */
449 pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_MEM;
450 else if (sr.when_target < sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when)
451 /* (primary) NaT bits were saved to a register only */
452 pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_GR;
453 else if (sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when >
454 sr.curr.reg[IA64_REG_PRI_UNAT_GR].when)
455 /* (primary) NaT bits were last saved to memory */
456 pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_MEM;
457 else
458 /* (primary) NaT bits were last saved to a register */
459 pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_GR;
460
461 /* Note: we always store the final primary-UNaT location in UNAT_MEM. */
462 compile_reg (&sr, IA64_REG_PRI_UNAT_MEM, pri_unat, script);
463 }
464
465 /* Third, compile the other register in decreasing order of WHEN values. */
466
467 num_regs = sort_regs (&sr, regorder);
468 for (i = 0; i < num_regs; ++i)
469 compile_reg (&sr, regorder[i], sr.curr.reg + regorder[i], script);
470
471 script->abi_marker = sr.abi_marker;
472 script_finalize (script, c, &sr);
473
474 ia64_free_state_record (&sr);
475 return 0;
476 }
477
478 static inline void
set_nat_info(struct cursor * c,unsigned long dst,ia64_loc_t nat_loc,uint8_t bitnr)479 set_nat_info (struct cursor *c, unsigned long dst,
480 ia64_loc_t nat_loc, uint8_t bitnr)
481 {
482 assert (dst >= IA64_REG_R4 && dst <= IA64_REG_R7);
483
484 c->loc[dst - IA64_REG_R4 + IA64_REG_NAT4] = nat_loc;
485 c->nat_bitnr[dst - IA64_REG_R4] = bitnr;
486 }
487
488 /* Apply the unwinding actions represented by OPS and update SR to
489 reflect the state that existed upon entry to the function that this
490 unwinder represents. */
491
492 static inline int
run_script(struct ia64_script * script,struct cursor * c)493 run_script (struct ia64_script *script, struct cursor *c)
494 {
495 struct ia64_script_insn *ip, *limit, next_insn;
496 ia64_loc_t loc, nat_loc;
497 unsigned long opc, dst;
498 uint8_t nat_bitnr;
499 unw_word_t val;
500 int ret;
501
502 c->pi = script->pi;
503 ip = script->insn;
504 limit = script->insn + script->count;
505 next_insn = *ip;
506 c->abi_marker = script->abi_marker;
507
508 while (ip++ < limit)
509 {
510 opc = next_insn.opc;
511 dst = next_insn.dst;
512 val = next_insn.val;
513 next_insn = *ip;
514
515 /* This is by far the most common operation: */
516 if (likely (opc == IA64_INSN_MOVE_STACKED))
517 {
518 if ((ret = ia64_get_stacked (c, val, &loc, NULL)) < 0)
519 return ret;
520 }
521 else
522 switch (opc)
523 {
524 case IA64_INSN_INC_PSP:
525 c->psp += val;
526 continue;
527
528 case IA64_INSN_LOAD_PSP:
529 if ((ret = ia64_get (c, c->loc[IA64_REG_PSP], &c->psp)) < 0)
530 return ret;
531 continue;
532
533 case IA64_INSN_ADD_PSP:
534 loc = IA64_LOC_ADDR (c->psp + val, (val & IA64_LOC_TYPE_FP));
535 break;
536
537 case IA64_INSN_ADD_SP:
538 loc = IA64_LOC_ADDR (c->sp + val, (val & IA64_LOC_TYPE_FP));
539 break;
540
541 case IA64_INSN_MOVE_NO_NAT:
542 set_nat_info (c, dst, IA64_NULL_LOC, 0);
543 case IA64_INSN_MOVE:
544 loc = c->loc[val];
545 break;
546
547 case IA64_INSN_MOVE_SCRATCH_NO_NAT:
548 set_nat_info (c, dst, IA64_NULL_LOC, 0);
549 case IA64_INSN_MOVE_SCRATCH:
550 loc = ia64_scratch_loc (c, val, NULL);
551 break;
552
553 case IA64_INSN_ADD_PSP_NAT:
554 loc = IA64_LOC_ADDR (c->psp + val, 0);
555 assert (!IA64_IS_REG_LOC (loc));
556 set_nat_info (c, dst,
557 c->loc[IA64_REG_PRI_UNAT_MEM],
558 ia64_unat_slot_num (IA64_GET_ADDR (loc)));
559 break;
560
561 case IA64_INSN_ADD_SP_NAT:
562 loc = IA64_LOC_ADDR (c->sp + val, 0);
563 assert (!IA64_IS_REG_LOC (loc));
564 set_nat_info (c, dst,
565 c->loc[IA64_REG_PRI_UNAT_MEM],
566 ia64_unat_slot_num (IA64_GET_ADDR (loc)));
567 break;
568
569 case IA64_INSN_MOVE_NAT:
570 loc = c->loc[val];
571 set_nat_info (c, dst,
572 c->loc[val - IA64_REG_R4 + IA64_REG_NAT4],
573 c->nat_bitnr[val - IA64_REG_R4]);
574 break;
575
576 case IA64_INSN_MOVE_STACKED_NAT:
577 if ((ret = ia64_get_stacked (c, val, &loc, &nat_loc)) < 0)
578 return ret;
579 assert (!IA64_IS_REG_LOC (loc));
580 set_nat_info (c, dst, nat_loc, rse_slot_num (IA64_GET_ADDR (loc)));
581 break;
582
583 case IA64_INSN_MOVE_SCRATCH_NAT:
584 loc = ia64_scratch_loc (c, val, NULL);
585 nat_loc = ia64_scratch_loc (c, val + (UNW_IA64_NAT - UNW_IA64_GR),
586 &nat_bitnr);
587 set_nat_info (c, dst, nat_loc, nat_bitnr);
588 break;
589 }
590 c->loc[dst] = loc;
591 }
592 return 0;
593 }
594
595 static int
uncached_find_save_locs(struct cursor * c)596 uncached_find_save_locs (struct cursor *c)
597 {
598 struct ia64_script script;
599 int ret = 0;
600
601 if ((ret = ia64_fetch_proc_info (c, c->ip, 1)) < 0)
602 return ret;
603
604 script_init (&script, c->ip);
605 if ((ret = build_script (c, &script)) < 0)
606 {
607 if (ret != -UNW_ESTOPUNWIND)
608 Dprintf ("%s: failed to build unwind script for ip %lx\n",
609 __FUNCTION__, (long) c->ip);
610 return ret;
611 }
612 return run_script (&script, c);
613 }
614
615 HIDDEN int
ia64_find_save_locs(struct cursor * c)616 ia64_find_save_locs (struct cursor *c)
617 {
618 struct ia64_script_cache *cache = NULL;
619 struct ia64_script *script = NULL;
620 intrmask_t saved_mask;
621 int ret = 0;
622
623 if (c->as->caching_policy == UNW_CACHE_NONE)
624 return uncached_find_save_locs (c);
625
626 cache = get_script_cache (c->as, &saved_mask);
627 if (!cache)
628 {
629 Debug (1, "contention on script-cache; doing uncached lookup\n");
630 return uncached_find_save_locs (c);
631 }
632 {
633 script = script_lookup (cache, c);
634 Debug (8, "ip %lx %s in script cache\n", (long) c->ip,
635 script ? "hit" : "missed");
636
637 if (!script || (script->count == 0 && !script->pi.unwind_info))
638 {
639 if ((ret = ia64_fetch_proc_info (c, c->ip, 1)) < 0)
640 goto out;
641 }
642
643 if (!script)
644 {
645 script = script_new (cache, c->ip);
646 if (!script)
647 {
648 Dprintf ("%s: failed to create unwind script\n", __FUNCTION__);
649 ret = -UNW_EUNSPEC;
650 goto out;
651 }
652 }
653 cache->buckets[c->prev_script].hint = script - cache->buckets;
654
655 if (script->count == 0)
656 ret = build_script (c, script);
657
658 assert (script->count > 0);
659
660 c->hint = script->hint;
661 c->prev_script = script - cache->buckets;
662
663 if (ret < 0)
664 {
665 if (ret != -UNW_ESTOPUNWIND)
666 Dprintf ("%s: failed to locate/build unwind script for ip %lx\n",
667 __FUNCTION__, (long) c->ip);
668 goto out;
669 }
670
671 ret = run_script (script, c);
672 }
673 out:
674 put_script_cache (c->as, cache, &saved_mask);
675 return ret;
676 }
677
678 HIDDEN void
ia64_validate_cache(unw_addr_space_t as,void * arg)679 ia64_validate_cache (unw_addr_space_t as, void *arg)
680 {
681 #ifndef UNW_REMOTE_ONLY
682 if (as == unw_local_addr_space && ia64_local_validate_cache (as, arg) == 1)
683 return;
684 #endif
685
686 #ifndef UNW_LOCAL_ONLY
687 /* local info is up-to-date, check dynamic info. */
688 unwi_dyn_validate_cache (as, arg);
689 #endif
690 }
691
692 HIDDEN int
ia64_cache_proc_info(struct cursor * c)693 ia64_cache_proc_info (struct cursor *c)
694 {
695 struct ia64_script_cache *cache;
696 struct ia64_script *script;
697 intrmask_t saved_mask;
698 int ret = 0;
699
700 cache = get_script_cache (c->as, &saved_mask);
701 if (!cache)
702 return ret; /* cache is busy */
703
704 /* Re-check to see if a cache entry has been added in the meantime: */
705 script = script_lookup (cache, c);
706 if (script)
707 goto out;
708
709 script = script_new (cache, c->ip);
710 if (!script)
711 {
712 Dprintf ("%s: failed to create unwind script\n", __FUNCTION__);
713 ret = -UNW_EUNSPEC;
714 goto out;
715 }
716
717 script->pi = c->pi;
718
719 out:
720 put_script_cache (c->as, cache, &saved_mask);
721 return ret;
722 }
723
724 HIDDEN int
ia64_get_cached_proc_info(struct cursor * c)725 ia64_get_cached_proc_info (struct cursor *c)
726 {
727 struct ia64_script_cache *cache;
728 struct ia64_script *script;
729 intrmask_t saved_mask;
730
731 cache = get_script_cache (c->as, &saved_mask);
732 if (!cache)
733 return -UNW_ENOINFO; /* cache is busy */
734 {
735 script = script_lookup (cache, c);
736 if (script)
737 c->pi = script->pi;
738 }
739 put_script_cache (c->as, cache, &saved_mask);
740 return script ? 0 : -UNW_ENOINFO;
741 }
742