1 /* libunwind - a platform-independent unwind library
2 Copyright (c) 2003, 2005 Hewlett-Packard Development Company, L.P.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of libunwind.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice shall be
16 included in all copies or substantial portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #include "dwarf_i.h"
27 #include "libunwind_i.h"
28 #include <stddef.h>
29 #include <limits.h>
30
31 #define alloc_reg_state() (mempool_alloc (&dwarf_reg_state_pool))
32 #define free_reg_state(rs) (mempool_free (&dwarf_reg_state_pool, rs))
33
34 #define DWARF_UNW_CACHE_SIZE(log_size) (1 << log_size)
35 #define DWARF_UNW_HASH_SIZE(log_size) (1 << (log_size + 1))
36
37 static inline int
read_regnum(unw_addr_space_t as,unw_accessors_t * a,unw_word_t * addr,unw_word_t * valp,void * arg)38 read_regnum (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr,
39 unw_word_t *valp, void *arg)
40 {
41 int ret;
42
43 if ((ret = dwarf_read_uleb128 (as, a, addr, valp, arg)) < 0)
44 return ret;
45
46 if (*valp >= DWARF_NUM_PRESERVED_REGS)
47 {
48 Debug (1, "Invalid register number %u\n", (unsigned int) *valp);
49 return -UNW_EBADREG;
50 }
51 return 0;
52 }
53
54 static inline void
set_reg(dwarf_state_record_t * sr,unw_word_t regnum,dwarf_where_t where,unw_word_t val)55 set_reg (dwarf_state_record_t *sr, unw_word_t regnum, dwarf_where_t where,
56 unw_word_t val)
57 {
58 sr->rs_current.reg.where[regnum] = where;
59 sr->rs_current.reg.val[regnum] = val;
60 }
61
62 static inline int
push_rstate_stack(dwarf_stackable_reg_state_t ** rs_stack)63 push_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
64 {
65 dwarf_stackable_reg_state_t *old_rs = *rs_stack;
66 if (NULL == (*rs_stack = alloc_reg_state ()))
67 {
68 *rs_stack = old_rs;
69 return -1;
70 }
71 (*rs_stack)->next = old_rs;
72 return 0;
73 }
74
75 static inline void
pop_rstate_stack(dwarf_stackable_reg_state_t ** rs_stack)76 pop_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
77 {
78 dwarf_stackable_reg_state_t *old_rs = *rs_stack;
79 *rs_stack = old_rs->next;
80 free_reg_state (old_rs);
81 }
82
83 static inline void
empty_rstate_stack(dwarf_stackable_reg_state_t ** rs_stack)84 empty_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
85 {
86 while (*rs_stack)
87 pop_rstate_stack(rs_stack);
88 }
89
90 /* Run a CFI program to update the register state. */
91 static int
run_cfi_program(struct dwarf_cursor * c,dwarf_state_record_t * sr,unw_word_t * ip,unw_word_t end_ip,unw_word_t * addr,unw_word_t end_addr,dwarf_stackable_reg_state_t ** rs_stack,struct dwarf_cie_info * dci)92 run_cfi_program (struct dwarf_cursor *c, dwarf_state_record_t *sr,
93 unw_word_t *ip, unw_word_t end_ip,
94 unw_word_t *addr, unw_word_t end_addr,
95 dwarf_stackable_reg_state_t **rs_stack,
96 struct dwarf_cie_info *dci)
97 {
98 unw_addr_space_t as;
99 void *arg;
100
101 if (c->pi.flags & UNW_PI_FLAG_DEBUG_FRAME)
102 {
103 /* .debug_frame CFI is stored in local address space. */
104 as = unw_local_addr_space;
105 arg = NULL;
106 }
107 else
108 {
109 as = c->as;
110 arg = c->as_arg;
111 }
112 unw_accessors_t *a = unw_get_accessors_int (as);
113 int ret = 0;
114
115 while (*ip <= end_ip && *addr < end_addr && ret >= 0)
116 {
117 unw_word_t operand = 0, regnum, val, len;
118 uint8_t u8, op;
119 uint16_t u16;
120 uint32_t u32;
121
122 if ((ret = dwarf_readu8 (as, a, addr, &op, arg)) < 0)
123 break;
124
125 if (op & DWARF_CFA_OPCODE_MASK)
126 {
127 operand = op & DWARF_CFA_OPERAND_MASK;
128 op &= ~DWARF_CFA_OPERAND_MASK;
129 }
130 switch ((dwarf_cfa_t) op)
131 {
132 case DW_CFA_advance_loc:
133 *ip += operand * dci->code_align;
134 Debug (15, "CFA_advance_loc to 0x%lx\n", (long) *ip);
135 break;
136
137 case DW_CFA_advance_loc1:
138 if ((ret = dwarf_readu8 (as, a, addr, &u8, arg)) < 0)
139 break;
140 *ip += u8 * dci->code_align;
141 Debug (15, "CFA_advance_loc1 to 0x%lx\n", (long) *ip);
142 break;
143
144 case DW_CFA_advance_loc2:
145 if ((ret = dwarf_readu16 (as, a, addr, &u16, arg)) < 0)
146 break;
147 *ip += u16 * dci->code_align;
148 Debug (15, "CFA_advance_loc2 to 0x%lx\n", (long) *ip);
149 break;
150
151 case DW_CFA_advance_loc4:
152 if ((ret = dwarf_readu32 (as, a, addr, &u32, arg)) < 0)
153 break;
154 *ip += u32 * dci->code_align;
155 Debug (15, "CFA_advance_loc4 to 0x%lx\n", (long) *ip);
156 break;
157
158 case DW_CFA_MIPS_advance_loc8:
159 #ifdef UNW_TARGET_MIPS
160 {
161 uint64_t u64 = 0;
162
163 if ((ret = dwarf_readu64 (as, a, addr, &u64, arg)) < 0)
164 break;
165 *ip += u64 * dci->code_align;
166 Debug (15, "CFA_MIPS_advance_loc8\n");
167 break;
168 }
169 #else
170 Debug (1, "DW_CFA_MIPS_advance_loc8 on non-MIPS target\n");
171 ret = -UNW_EINVAL;
172 break;
173 #endif
174
175 case DW_CFA_offset:
176 regnum = operand;
177 if (regnum >= DWARF_NUM_PRESERVED_REGS)
178 {
179 Debug (1, "Invalid register number %u in DW_cfa_OFFSET\n",
180 (unsigned int) regnum);
181 ret = -UNW_EBADREG;
182 break;
183 }
184 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
185 break;
186 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
187 Debug (15, "CFA_offset r%lu at cfa+0x%lx\n",
188 (long) regnum, (long) (val * dci->data_align));
189 break;
190
191 case DW_CFA_offset_extended:
192 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
193 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
194 break;
195 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
196 Debug (15, "CFA_offset_extended r%lu at cf+0x%lx\n",
197 (long) regnum, (long) (val * dci->data_align));
198 break;
199
200 case DW_CFA_offset_extended_sf:
201 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
202 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
203 break;
204 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
205 Debug (15, "CFA_offset_extended_sf r%lu at cf+0x%lx\n",
206 (long) regnum, (long) (val * dci->data_align));
207 break;
208
209 case DW_CFA_restore:
210 regnum = operand;
211 if (regnum >= DWARF_NUM_PRESERVED_REGS)
212 {
213 Debug (1, "Invalid register number %u in DW_CFA_restore\n",
214 (unsigned int) regnum);
215 ret = -UNW_EINVAL;
216 break;
217 }
218 sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum];
219 sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum];
220 Debug (15, "CFA_restore r%lu\n", (long) regnum);
221 break;
222
223 case DW_CFA_restore_extended:
224 if ((ret = dwarf_read_uleb128 (as, a, addr, ®num, arg)) < 0)
225 break;
226 if (regnum >= DWARF_NUM_PRESERVED_REGS)
227 {
228 Debug (1, "Invalid register number %u in "
229 "DW_CFA_restore_extended\n", (unsigned int) regnum);
230 ret = -UNW_EINVAL;
231 break;
232 }
233 sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum];
234 sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum];
235 Debug (15, "CFA_restore_extended r%lu\n", (long) regnum);
236 break;
237
238 case DW_CFA_nop:
239 break;
240
241 case DW_CFA_set_loc:
242 if ((ret = dwarf_read_encoded_pointer (as, a, addr, dci->fde_encoding,
243 &c->pi, ip,
244 arg)) < 0)
245 break;
246 Debug (15, "CFA_set_loc to 0x%lx\n", (long) *ip);
247 break;
248
249 case DW_CFA_undefined:
250 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
251 break;
252 set_reg (sr, regnum, DWARF_WHERE_UNDEF, 0);
253 Debug (15, "CFA_undefined r%lu\n", (long) regnum);
254 break;
255
256 case DW_CFA_same_value:
257 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
258 break;
259 set_reg (sr, regnum, DWARF_WHERE_SAME, 0);
260 Debug (15, "CFA_same_value r%lu\n", (long) regnum);
261 break;
262
263 case DW_CFA_register:
264 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
265 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
266 break;
267 set_reg (sr, regnum, DWARF_WHERE_REG, val);
268 Debug (15, "CFA_register r%lu to r%lu\n", (long) regnum, (long) val);
269 break;
270
271 case DW_CFA_remember_state:
272 if (push_rstate_stack(rs_stack) < 0)
273 {
274 Debug (1, "Out of memory in DW_CFA_remember_state\n");
275 ret = -UNW_ENOMEM;
276 break;
277 }
278 (*rs_stack)->state = sr->rs_current;
279 Debug (15, "CFA_remember_state\n");
280 break;
281
282 case DW_CFA_restore_state:
283 if (!*rs_stack)
284 {
285 Debug (1, "register-state stack underflow\n");
286 ret = -UNW_EINVAL;
287 break;
288 }
289 sr->rs_current = (*rs_stack)->state;
290 pop_rstate_stack(rs_stack);
291 Debug (15, "CFA_restore_state\n");
292 break;
293
294 case DW_CFA_def_cfa:
295 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
296 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
297 break;
298 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
299 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */
300 Debug (15, "CFA_def_cfa r%lu+0x%lx\n", (long) regnum, (long) val);
301 break;
302
303 case DW_CFA_def_cfa_sf:
304 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
305 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
306 break;
307 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
308 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0,
309 val * dci->data_align); /* factored! */
310 Debug (15, "CFA_def_cfa_sf r%lu+0x%lx\n",
311 (long) regnum, (long) (val * dci->data_align));
312 break;
313
314 case DW_CFA_def_cfa_register:
315 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
316 break;
317 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
318 Debug (15, "CFA_def_cfa_register r%lu\n", (long) regnum);
319 break;
320
321 case DW_CFA_def_cfa_offset:
322 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
323 break;
324 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */
325 Debug (15, "CFA_def_cfa_offset 0x%lx\n", (long) val);
326 break;
327
328 case DW_CFA_def_cfa_offset_sf:
329 if ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0)
330 break;
331 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0,
332 val * dci->data_align); /* factored! */
333 Debug (15, "CFA_def_cfa_offset_sf 0x%lx\n",
334 (long) (val * dci->data_align));
335 break;
336
337 case DW_CFA_def_cfa_expression:
338 /* Save the address of the DW_FORM_block for later evaluation. */
339 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_EXPR, *addr);
340
341 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
342 break;
343
344 Debug (15, "CFA_def_cfa_expr @ 0x%lx [%lu bytes]\n",
345 (long) *addr, (long) len);
346 *addr += len;
347 break;
348
349 case DW_CFA_expression:
350 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
351 break;
352
353 /* Save the address of the DW_FORM_block for later evaluation. */
354 set_reg (sr, regnum, DWARF_WHERE_EXPR, *addr);
355
356 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
357 break;
358
359 Debug (15, "CFA_expression r%lu @ 0x%lx [%lu bytes]\n",
360 (long) regnum, (long) addr, (long) len);
361 *addr += len;
362 break;
363
364 case DW_CFA_val_expression:
365 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
366 break;
367
368 /* Save the address of the DW_FORM_block for later evaluation. */
369 set_reg (sr, regnum, DWARF_WHERE_VAL_EXPR, *addr);
370
371 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
372 break;
373
374 Debug (15, "CFA_val_expression r%lu @ 0x%lx [%lu bytes]\n",
375 (long) regnum, (long) addr, (long) len);
376 *addr += len;
377 break;
378
379 case DW_CFA_GNU_args_size:
380 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
381 break;
382 sr->args_size = val;
383 Debug (15, "CFA_GNU_args_size %lu\n", (long) val);
384 break;
385
386 case DW_CFA_GNU_negative_offset_extended:
387 /* A comment in GCC says that this is obsoleted by
388 DW_CFA_offset_extended_sf, but that it's used by older
389 PowerPC code. */
390 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
391 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
392 break;
393 set_reg (sr, regnum, DWARF_WHERE_CFAREL, -(val * dci->data_align));
394 Debug (15, "CFA_GNU_negative_offset_extended cfa+0x%lx\n",
395 (long) -(val * dci->data_align));
396 break;
397
398 case DW_CFA_GNU_window_save:
399 #ifdef UNW_TARGET_SPARC
400 /* This is a special CFA to handle all 16 windowed registers
401 on SPARC. */
402 for (regnum = 16; regnum < 32; ++regnum)
403 set_reg (sr, regnum, DWARF_WHERE_CFAREL,
404 (regnum - 16) * sizeof (unw_word_t));
405 Debug (15, "CFA_GNU_window_save\n");
406 break;
407 #else
408 /* FALL THROUGH */
409 #endif
410 case DW_CFA_lo_user:
411 case DW_CFA_hi_user:
412 Debug (1, "Unexpected CFA opcode 0x%x\n", op);
413 ret = -UNW_EINVAL;
414 break;
415 }
416 }
417
418 if (ret > 0)
419 ret = 0;
420 return ret;
421 }
422
423 static int
fetch_proc_info(struct dwarf_cursor * c,unw_word_t ip)424 fetch_proc_info (struct dwarf_cursor *c, unw_word_t ip)
425 {
426 int ret, dynamic = 1;
427
428 /* The 'ip' can point either to the previous or next instruction
429 depending on what type of frame we have: normal call or a place
430 to resume execution (e.g. after signal frame).
431
432 For a normal call frame we need to back up so we point within the
433 call itself; this is important because a) the call might be the
434 very last instruction of the function and the edge of the FDE,
435 and b) so that run_cfi_program() runs locations up to the call
436 but not more.
437
438 For signal frame, we need to do the exact opposite and look
439 up using the current 'ip' value. That is where execution will
440 continue, and it's important we get this right, as 'ip' could be
441 right at the function entry and hence FDE edge, or at instruction
442 that manipulates CFA (push/pop). */
443
444 if (c->use_prev_instr)
445 {
446 #if defined(__arm__)
447 /* On arm, the least bit denotes thumb/arm mode, clear it. */
448 ip &= ~(unw_word_t)0x1;
449 #endif
450 --ip;
451 }
452
453 memset (&c->pi, 0, sizeof (c->pi));
454
455 /* check dynamic info first --- it overrides everything else */
456 ret = unwi_find_dynamic_proc_info (c->as, ip, &c->pi, 1,
457 c->as_arg);
458 if (ret == -UNW_ENOINFO)
459 {
460 dynamic = 0;
461 if ((ret = tdep_find_proc_info (c, ip, 1)) < 0)
462 return ret;
463 }
464
465 if (c->pi.format != UNW_INFO_FORMAT_DYNAMIC
466 && c->pi.format != UNW_INFO_FORMAT_TABLE
467 && c->pi.format != UNW_INFO_FORMAT_REMOTE_TABLE)
468 return -UNW_ENOINFO;
469
470 c->pi_valid = 1;
471 c->pi_is_dynamic = dynamic;
472
473 /* Let system/machine-dependent code determine frame-specific attributes. */
474 if (ret >= 0)
475 tdep_fetch_frame (c, ip, 1);
476
477 return ret;
478 }
479
480 static int
parse_dynamic(struct dwarf_cursor * c,unw_word_t ip,dwarf_state_record_t * sr)481 parse_dynamic (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr)
482 {
483 Debug (1, "Not yet implemented\n");
484 return -UNW_ENOINFO;
485 }
486
487 static inline void
put_unwind_info(struct dwarf_cursor * c,unw_proc_info_t * pi)488 put_unwind_info (struct dwarf_cursor *c, unw_proc_info_t *pi)
489 {
490 if (c->pi_is_dynamic)
491 unwi_put_dynamic_unwind_info (c->as, pi, c->as_arg);
492 else if (pi->unwind_info && pi->format == UNW_INFO_FORMAT_TABLE)
493 {
494 mempool_free (&dwarf_cie_info_pool, pi->unwind_info);
495 pi->unwind_info = NULL;
496 }
497 c->pi_valid = 0;
498 }
499
500 static inline int
setup_fde(struct dwarf_cursor * c,dwarf_state_record_t * sr)501 setup_fde (struct dwarf_cursor *c, dwarf_state_record_t *sr)
502 {
503 int i, ret;
504
505 assert (c->pi_valid);
506
507 memset (sr, 0, sizeof (*sr));
508 for (i = 0; i < DWARF_NUM_PRESERVED_REGS + 2; ++i)
509 set_reg (sr, i, DWARF_WHERE_SAME, 0);
510
511 struct dwarf_cie_info *dci = c->pi.unwind_info;
512 sr->rs_current.ret_addr_column = dci->ret_addr_column;
513 unw_word_t addr = dci->cie_instr_start;
514 unw_word_t curr_ip = 0;
515 dwarf_stackable_reg_state_t *rs_stack = NULL;
516 ret = run_cfi_program (c, sr, &curr_ip, ~(unw_word_t) 0, &addr,
517 dci->cie_instr_end,
518 &rs_stack, dci);
519 empty_rstate_stack(&rs_stack);
520 if (ret < 0)
521 return ret;
522
523 memcpy (&sr->rs_initial, &sr->rs_current, sizeof (sr->rs_initial));
524 return 0;
525 }
526
527 static inline int
parse_fde(struct dwarf_cursor * c,unw_word_t ip,dwarf_state_record_t * sr)528 parse_fde (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr)
529 {
530 int ret;
531 struct dwarf_cie_info *dci = c->pi.unwind_info;
532 unw_word_t addr = dci->fde_instr_start;
533 unw_word_t curr_ip = c->pi.start_ip;
534 dwarf_stackable_reg_state_t *rs_stack = NULL;
535 /* Process up to current `ip` for signal frame and `ip - 1` for normal call frame
536 See `c->use_prev_instr` use in `fetch_proc_info` for details. */
537 ret = run_cfi_program (c, sr, &curr_ip, ip - c->use_prev_instr, &addr, dci->fde_instr_end,
538 &rs_stack, dci);
539 empty_rstate_stack(&rs_stack);
540 if (ret < 0)
541 return ret;
542
543 return 0;
544 }
545
546 HIDDEN int
dwarf_flush_rs_cache(struct dwarf_rs_cache * cache)547 dwarf_flush_rs_cache (struct dwarf_rs_cache *cache)
548 {
549 int i;
550
551 if (cache->log_size == DWARF_DEFAULT_LOG_UNW_CACHE_SIZE
552 || !cache->hash) {
553 cache->hash = cache->default_hash;
554 cache->buckets = cache->default_buckets;
555 cache->links = cache->default_links;
556 cache->log_size = DWARF_DEFAULT_LOG_UNW_CACHE_SIZE;
557 } else {
558 if (cache->hash && cache->hash != cache->default_hash)
559 munmap(cache->hash, DWARF_UNW_HASH_SIZE(cache->prev_log_size)
560 * sizeof (cache->hash[0]));
561 if (cache->buckets && cache->buckets != cache->default_buckets)
562 munmap(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->prev_log_size)
563 * sizeof (cache->buckets[0]));
564 if (cache->links && cache->links != cache->default_links)
565 munmap(cache->links, DWARF_UNW_CACHE_SIZE(cache->prev_log_size)
566 * sizeof (cache->links[0]));
567 GET_MEMORY(cache->hash, DWARF_UNW_HASH_SIZE(cache->log_size)
568 * sizeof (cache->hash[0]));
569 GET_MEMORY(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->log_size)
570 * sizeof (cache->buckets[0]));
571 GET_MEMORY(cache->links, DWARF_UNW_CACHE_SIZE(cache->log_size)
572 * sizeof (cache->links[0]));
573 if (!cache->hash || !cache->buckets || !cache->links)
574 {
575 Debug (1, "Unable to allocate cache memory");
576 return -UNW_ENOMEM;
577 }
578 cache->prev_log_size = cache->log_size;
579 }
580
581 cache->rr_head = 0;
582
583 for (i = 0; i < DWARF_UNW_CACHE_SIZE(cache->log_size); ++i)
584 {
585 cache->links[i].coll_chain = -1;
586 cache->links[i].ip = 0;
587 cache->links[i].valid = 0;
588 }
589 for (i = 0; i< DWARF_UNW_HASH_SIZE(cache->log_size); ++i)
590 cache->hash[i] = -1;
591
592 return 0;
593 }
594
595 static inline struct dwarf_rs_cache *
get_rs_cache(unw_addr_space_t as,intrmask_t * saved_maskp)596 get_rs_cache (unw_addr_space_t as, intrmask_t *saved_maskp)
597 {
598 struct dwarf_rs_cache *cache = &as->global_cache;
599 unw_caching_policy_t caching = as->caching_policy;
600
601 if (caching == UNW_CACHE_NONE)
602 return NULL;
603
604 #if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD
605 if (likely (caching == UNW_CACHE_PER_THREAD))
606 {
607 static _Thread_local struct dwarf_rs_cache tls_cache __attribute__((tls_model("initial-exec")));
608 Debug (16, "using TLS cache\n");
609 cache = &tls_cache;
610 }
611 else
612 #else
613 if (likely (caching == UNW_CACHE_GLOBAL))
614 #endif
615 {
616 Debug (16, "acquiring lock\n");
617 lock_acquire (&cache->lock, *saved_maskp);
618 }
619
620 if ((atomic_load (&as->cache_generation) != atomic_load (&cache->generation))
621 || !cache->hash)
622 {
623 /* cache_size is only set in the global_cache, copy it over before flushing */
624 cache->log_size = as->global_cache.log_size;
625 if (dwarf_flush_rs_cache (cache) < 0)
626 return NULL;
627 atomic_store (&cache->generation, atomic_load (&as->cache_generation));
628 }
629
630 return cache;
631 }
632
633 static inline void
put_rs_cache(unw_addr_space_t as,struct dwarf_rs_cache * cache,intrmask_t * saved_maskp)634 put_rs_cache (unw_addr_space_t as, struct dwarf_rs_cache *cache,
635 intrmask_t *saved_maskp)
636 {
637 assert (as->caching_policy != UNW_CACHE_NONE);
638
639 Debug (16, "unmasking signals/interrupts and releasing lock\n");
640 if (likely (as->caching_policy == UNW_CACHE_GLOBAL))
641 lock_release (&cache->lock, *saved_maskp);
642 }
643
644 static inline unw_hash_index_t CONST_ATTR
hash(unw_word_t ip,unsigned short log_size)645 hash (unw_word_t ip, unsigned short log_size)
646 {
647 /* based on (sqrt(5)/2-1)*2^64 */
648 # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL)
649
650 return ip * magic >> ((sizeof(unw_word_t) * 8) - (log_size + 1));
651 }
652
653 static inline long
cache_match(struct dwarf_rs_cache * cache,unsigned short index,unw_word_t ip)654 cache_match (struct dwarf_rs_cache *cache, unsigned short index, unw_word_t ip)
655 {
656 return (cache->links[index].valid && (ip == cache->links[index].ip));
657 }
658
659 static dwarf_reg_state_t *
rs_lookup(struct dwarf_rs_cache * cache,struct dwarf_cursor * c)660 rs_lookup (struct dwarf_rs_cache *cache, struct dwarf_cursor *c)
661 {
662 unsigned short index;
663 unw_word_t ip = c->ip;
664
665 if (c->hint > 0)
666 {
667 index = c->hint - 1;
668 if (cache_match (cache, index, ip))
669 return &cache->buckets[index];
670 }
671
672 for (index = cache->hash[hash (ip, cache->log_size)];
673 index < DWARF_UNW_CACHE_SIZE(cache->log_size);
674 index = cache->links[index].coll_chain)
675 {
676 if (cache_match (cache, index, ip))
677 return &cache->buckets[index];
678 }
679 return NULL;
680 }
681
682 static inline dwarf_reg_state_t *
rs_new(struct dwarf_rs_cache * cache,struct dwarf_cursor * c)683 rs_new (struct dwarf_rs_cache *cache, struct dwarf_cursor * c)
684 {
685 unw_hash_index_t index;
686 unsigned short head;
687
688 head = cache->rr_head;
689 cache->rr_head = (head + 1) & (DWARF_UNW_CACHE_SIZE(cache->log_size) - 1);
690
691 /* remove the old rs from the hash table (if it's there): */
692 if (cache->links[head].ip)
693 {
694 unsigned short *pindex;
695 for (pindex = &cache->hash[hash (cache->links[head].ip, cache->log_size)];
696 *pindex < DWARF_UNW_CACHE_SIZE(cache->log_size);
697 pindex = &cache->links[*pindex].coll_chain)
698 {
699 if (*pindex == head)
700 {
701 *pindex = cache->links[*pindex].coll_chain;
702 break;
703 }
704 }
705 }
706
707 /* enter new rs in the hash table */
708 index = hash (c->ip, cache->log_size);
709 cache->links[head].coll_chain = cache->hash[index];
710 cache->hash[index] = head;
711
712 cache->links[head].ip = c->ip;
713 cache->links[head].valid = 1;
714 cache->links[head].signal_frame = tdep_cache_frame(c);
715 return cache->buckets + head;
716 }
717
718 static int
create_state_record_for(struct dwarf_cursor * c,dwarf_state_record_t * sr,unw_word_t ip)719 create_state_record_for (struct dwarf_cursor *c, dwarf_state_record_t *sr,
720 unw_word_t ip)
721 {
722 int ret;
723 switch (c->pi.format)
724 {
725 case UNW_INFO_FORMAT_TABLE:
726 case UNW_INFO_FORMAT_REMOTE_TABLE:
727 if ((ret = setup_fde(c, sr)) < 0)
728 return ret;
729 ret = parse_fde (c, ip, sr);
730 break;
731
732 case UNW_INFO_FORMAT_DYNAMIC:
733 ret = parse_dynamic (c, ip, sr);
734 break;
735
736 default:
737 Debug (1, "Unexpected unwind-info format %d\n", c->pi.format);
738 ret = -UNW_EINVAL;
739 }
740 return ret;
741 }
742
743 static inline int
eval_location_expr(struct dwarf_cursor * c,unw_word_t stack_val,unw_addr_space_t as,unw_accessors_t * a,unw_word_t addr,dwarf_loc_t * locp,void * arg)744 eval_location_expr (struct dwarf_cursor *c, unw_word_t stack_val, unw_addr_space_t as,
745 unw_accessors_t *a, unw_word_t addr,
746 dwarf_loc_t *locp, void *arg)
747 {
748 int ret, is_register;
749 unw_word_t len, val;
750
751 /* read the length of the expression: */
752 if ((ret = dwarf_read_uleb128 (as, a, &addr, &len, arg)) < 0)
753 return ret;
754
755 /* evaluate the expression: */
756 if ((ret = dwarf_eval_expr (c, stack_val, &addr, len, &val, &is_register)) < 0)
757 return ret;
758
759 if (is_register)
760 *locp = DWARF_REG_LOC (c, dwarf_to_unw_regnum (val));
761 else
762 *locp = DWARF_MEM_LOC (c, val);
763
764 return 0;
765 }
766
767 static int
apply_reg_state(struct dwarf_cursor * c,struct dwarf_reg_state * rs)768 apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs)
769 {
770 unw_word_t regnum, addr, cfa, ip;
771 unw_word_t prev_ip, prev_cfa;
772 unw_addr_space_t as;
773 dwarf_loc_t cfa_loc;
774 unw_accessors_t *a;
775 int i, ret;
776 void *arg;
777
778 prev_ip = c->ip;
779 prev_cfa = c->cfa;
780
781 as = c->as;
782 arg = c->as_arg;
783 a = unw_get_accessors_int (as);
784
785 /* Evaluate the CFA first, because it may be referred to by other
786 expressions. */
787
788 if (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_REG)
789 {
790 /* CFA is equal to [reg] + offset: */
791
792 /* As a special-case, if the stack-pointer is the CFA and the
793 stack-pointer wasn't saved, popping the CFA implicitly pops
794 the stack-pointer as well. */
795 if ((rs->reg.val[DWARF_CFA_REG_COLUMN] == UNW_TDEP_SP)
796 && (UNW_TDEP_SP < ARRAY_SIZE(rs->reg.val))
797 && (rs->reg.where[UNW_TDEP_SP] == DWARF_WHERE_SAME))
798 cfa = c->cfa;
799 else
800 {
801 regnum = dwarf_to_unw_regnum (rs->reg.val[DWARF_CFA_REG_COLUMN]);
802 if ((ret = unw_get_reg ((unw_cursor_t *) c, regnum, &cfa)) < 0)
803 return ret;
804 }
805 cfa += rs->reg.val[DWARF_CFA_OFF_COLUMN];
806 }
807 else
808 {
809 /* CFA is equal to EXPR: */
810
811 assert (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_EXPR);
812
813 addr = rs->reg.val[DWARF_CFA_REG_COLUMN];
814 /* The dwarf standard doesn't specify an initial value to be pushed on */
815 /* the stack before DW_CFA_def_cfa_expression evaluation. We push on a */
816 /* dummy value (0) to keep the eval_location_expr function consistent. */
817 if ((ret = eval_location_expr (c, 0, as, a, addr, &cfa_loc, arg)) < 0)
818 return ret;
819 /* the returned location better be a memory location... */
820 if (DWARF_IS_REG_LOC (cfa_loc))
821 return -UNW_EBADFRAME;
822 cfa = DWARF_GET_LOC (cfa_loc);
823 }
824
825 dwarf_loc_t new_loc[DWARF_NUM_PRESERVED_REGS];
826 memcpy(new_loc, c->loc, sizeof(new_loc));
827
828 for (i = 0; i < DWARF_NUM_PRESERVED_REGS; ++i)
829 {
830 switch ((dwarf_where_t) rs->reg.where[i])
831 {
832 case DWARF_WHERE_UNDEF:
833 new_loc[i] = DWARF_NULL_LOC;
834 break;
835
836 case DWARF_WHERE_SAME:
837 break;
838
839 case DWARF_WHERE_CFAREL:
840 new_loc[i] = DWARF_MEM_LOC (c, cfa + rs->reg.val[i]);
841 break;
842
843 case DWARF_WHERE_REG:
844 #ifdef __s390x__
845 /* GPRs can be saved in FPRs on s390x */
846 if (unw_is_fpreg (dwarf_to_unw_regnum (rs->reg.val[i])))
847 {
848 new_loc[i] = DWARF_FPREG_LOC (c, dwarf_to_unw_regnum (rs->reg.val[i]));
849 break;
850 }
851 #endif
852 new_loc[i] = new_loc[rs->reg.val[i]];
853 break;
854
855 case DWARF_WHERE_EXPR:
856 addr = rs->reg.val[i];
857 /* The dwarf standard requires the current CFA to be pushed on the */
858 /* stack before DW_CFA_expression evaluation. */
859 if ((ret = eval_location_expr (c, cfa, as, a, addr, new_loc + i, arg)) < 0)
860 return ret;
861 break;
862
863 case DWARF_WHERE_VAL_EXPR:
864 addr = rs->reg.val[i];
865 /* The dwarf standard requires the current CFA to be pushed on the */
866 /* stack before DW_CFA_val_expression evaluation. */
867 if ((ret = eval_location_expr (c, cfa, as, a, addr, new_loc + i, arg)) < 0)
868 return ret;
869 new_loc[i] = DWARF_VAL_LOC (c, DWARF_GET_LOC (new_loc[i]));
870 break;
871 }
872 }
873
874 memcpy(c->loc, new_loc, sizeof(new_loc));
875
876 c->cfa = cfa;
877 /* DWARF spec says undefined return address location means end of stack. */
878 if (DWARF_IS_NULL_LOC (c->loc[rs->ret_addr_column]))
879 {
880 c->ip = 0;
881 ret = 0;
882 }
883 else
884 {
885 ret = dwarf_get (c, c->loc[rs->ret_addr_column], &ip);
886 if (ret < 0)
887 return ret;
888 c->ip = ip;
889 ret = 1;
890 }
891
892 /* XXX: check for ip to be code_aligned */
893 if (c->ip == prev_ip && c->cfa == prev_cfa)
894 {
895 Dprintf ("%s: ip and cfa unchanged; stopping here (ip=0x%lx)\n",
896 __FUNCTION__, (long) c->ip);
897 return -UNW_EBADFRAME;
898 }
899
900 if (c->stash_frames)
901 tdep_stash_frame (c, rs);
902
903 return ret;
904 }
905
906 /* Find the saved locations. */
907 static int
find_reg_state(struct dwarf_cursor * c,dwarf_state_record_t * sr)908 find_reg_state (struct dwarf_cursor *c, dwarf_state_record_t *sr)
909 {
910 dwarf_reg_state_t *rs;
911 struct dwarf_rs_cache *cache;
912 int ret = 0;
913 intrmask_t saved_mask;
914
915 if ((cache = get_rs_cache(c->as, &saved_mask)) &&
916 (rs = rs_lookup(cache, c)))
917 {
918 /* update hint; no locking needed: single-word writes are atomic */
919 unsigned short index = rs - cache->buckets;
920 c->use_prev_instr = ! cache->links[index].signal_frame;
921 memcpy (&sr->rs_current, rs, sizeof (*rs));
922 }
923 else
924 {
925 ret = fetch_proc_info (c, c->ip);
926 int next_use_prev_instr = c->use_prev_instr;
927 if (ret >= 0)
928 {
929 /* Update use_prev_instr for the next frame. */
930 assert(c->pi.unwind_info);
931 struct dwarf_cie_info *dci = c->pi.unwind_info;
932 next_use_prev_instr = ! dci->signal_frame;
933 ret = create_state_record_for (c, sr, c->ip);
934 }
935 put_unwind_info (c, &c->pi);
936 c->use_prev_instr = next_use_prev_instr;
937
938 if (cache && ret >= 0)
939 {
940 rs = rs_new (cache, c);
941 cache->links[rs - cache->buckets].hint = 0;
942 memcpy(rs, &sr->rs_current, sizeof(*rs));
943 }
944 }
945
946 unsigned short index = -1;
947 if (cache)
948 {
949 if (rs)
950 {
951 index = rs - cache->buckets;
952 c->hint = cache->links[index].hint;
953 cache->links[c->prev_rs].hint = index + 1;
954 c->prev_rs = index;
955 }
956 put_rs_cache (c->as, cache, &saved_mask);
957 }
958 if (ret < 0)
959 return ret;
960 if (cache)
961 tdep_reuse_frame (c, cache->links[index].signal_frame);
962 return 0;
963 }
964
965 /* The function finds the saved locations and applies the register
966 state as well. */
967 HIDDEN int
dwarf_step(struct dwarf_cursor * c)968 dwarf_step (struct dwarf_cursor *c)
969 {
970 int ret;
971 dwarf_state_record_t sr;
972 if ((ret = find_reg_state (c, &sr)) < 0)
973 return ret;
974 return apply_reg_state (c, &sr.rs_current);
975 }
976
977 HIDDEN int
dwarf_make_proc_info(struct dwarf_cursor * c)978 dwarf_make_proc_info (struct dwarf_cursor *c)
979 {
980 #if 0
981 if (c->as->caching_policy == UNW_CACHE_NONE
982 || get_cached_proc_info (c) < 0)
983 #endif
984 /* Need to check if current frame contains
985 args_size, and set cursor appropriately. Only
986 needed for unw_resume */
987 dwarf_state_record_t sr;
988 int ret;
989
990 /* Lookup it up the slow way... */
991 ret = fetch_proc_info (c, c->ip);
992 if (ret >= 0)
993 ret = create_state_record_for (c, &sr, c->ip);
994 put_unwind_info (c, &c->pi);
995 if (ret < 0)
996 return ret;
997 c->args_size = sr.args_size;
998
999 return 0;
1000 }
1001
1002 static int
dwarf_reg_states_dynamic_iterate(struct dwarf_cursor * c,unw_reg_states_callback cb,void * token)1003 dwarf_reg_states_dynamic_iterate(struct dwarf_cursor *c,
1004 unw_reg_states_callback cb,
1005 void *token)
1006 {
1007 Debug (1, "Not yet implemented\n");
1008 return -UNW_ENOINFO;
1009 }
1010
1011 static int
dwarf_reg_states_table_iterate(struct dwarf_cursor * c,unw_reg_states_callback cb,void * token)1012 dwarf_reg_states_table_iterate(struct dwarf_cursor *c,
1013 unw_reg_states_callback cb,
1014 void *token)
1015 {
1016 dwarf_state_record_t sr;
1017 int ret = setup_fde(c, &sr);
1018 struct dwarf_cie_info *dci = c->pi.unwind_info;
1019 unw_word_t addr = dci->fde_instr_start;
1020 unw_word_t curr_ip = c->pi.start_ip;
1021 dwarf_stackable_reg_state_t *rs_stack = NULL;
1022 while (ret >= 0 && curr_ip < c->pi.end_ip && addr < dci->fde_instr_end)
1023 {
1024 unw_word_t prev_ip = curr_ip;
1025 ret = run_cfi_program (c, &sr, &curr_ip, prev_ip, &addr, dci->fde_instr_end,
1026 &rs_stack, dci);
1027 if (ret >= 0 && prev_ip < curr_ip)
1028 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), prev_ip, curr_ip);
1029 }
1030 empty_rstate_stack(&rs_stack);
1031 #if defined(NEED_LAST_IP)
1032 if (ret >= 0 && curr_ip < c->pi.last_ip)
1033 /* report the dead zone after the procedure ends */
1034 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.last_ip);
1035 #else
1036 if (ret >= 0 && curr_ip < c->pi.end_ip)
1037 /* report for whatever is left before procedure end */
1038 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.end_ip);
1039 #endif
1040 return ret;
1041 }
1042
1043 HIDDEN int
dwarf_reg_states_iterate(struct dwarf_cursor * c,unw_reg_states_callback cb,void * token)1044 dwarf_reg_states_iterate(struct dwarf_cursor *c,
1045 unw_reg_states_callback cb,
1046 void *token)
1047 {
1048 int ret = fetch_proc_info (c, c->ip);
1049 int next_use_prev_instr = c->use_prev_instr;
1050 if (ret >= 0)
1051 {
1052 /* Update use_prev_instr for the next frame. */
1053 assert(c->pi.unwind_info);
1054 struct dwarf_cie_info *dci = c->pi.unwind_info;
1055 next_use_prev_instr = ! dci->signal_frame;
1056 switch (c->pi.format)
1057 {
1058 case UNW_INFO_FORMAT_TABLE:
1059 case UNW_INFO_FORMAT_REMOTE_TABLE:
1060 ret = dwarf_reg_states_table_iterate(c, cb, token);
1061 break;
1062
1063 case UNW_INFO_FORMAT_DYNAMIC:
1064 ret = dwarf_reg_states_dynamic_iterate (c, cb, token);
1065 break;
1066
1067 default:
1068 Debug (1, "Unexpected unwind-info format %d\n", c->pi.format);
1069 ret = -UNW_EINVAL;
1070 }
1071 }
1072 put_unwind_info (c, &c->pi);
1073 c->use_prev_instr = next_use_prev_instr;
1074 return ret;
1075 }
1076
1077 HIDDEN int
dwarf_apply_reg_state(struct dwarf_cursor * c,struct dwarf_reg_state * rs)1078 dwarf_apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs)
1079 {
1080 return apply_reg_state(c, rs);
1081 }
1082