1 /* libunwind - a platform-independent unwind library
2 Copyright (c) 2003, 2005 Hewlett-Packard Development Company, L.P.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of libunwind.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice shall be
16 included in all copies or substantial portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #include "dwarf_i.h"
27 #include "libunwind_i.h"
28 #include <stddef.h>
29 #include <limits.h>
30
31 #define alloc_reg_state() (mempool_alloc (&dwarf_reg_state_pool))
32 #define free_reg_state(rs) (mempool_free (&dwarf_reg_state_pool, rs))
33
34 #define DWARF_UNW_CACHE_SIZE(log_size) (1 << log_size)
35 #define DWARF_UNW_HASH_SIZE(log_size) (1 << (log_size + 1))
36
37 static inline int
read_regnum(unw_addr_space_t as,unw_accessors_t * a,unw_word_t * addr,unw_word_t * valp,void * arg)38 read_regnum (unw_addr_space_t as, unw_accessors_t *a, unw_word_t *addr,
39 unw_word_t *valp, void *arg)
40 {
41 int ret;
42
43 if ((ret = dwarf_read_uleb128 (as, a, addr, valp, arg)) < 0)
44 return ret;
45
46 if (*valp >= DWARF_NUM_PRESERVED_REGS)
47 {
48 Debug (1, "Invalid register number %u\n", (unsigned int) *valp);
49 return -UNW_EBADREG;
50 }
51 return 0;
52 }
53
54 static inline void
set_reg(dwarf_state_record_t * sr,unw_word_t regnum,dwarf_where_t where,unw_word_t val)55 set_reg (dwarf_state_record_t *sr, unw_word_t regnum, dwarf_where_t where,
56 unw_word_t val)
57 {
58 sr->rs_current.reg.where[regnum] = where;
59 sr->rs_current.reg.val[regnum] = val;
60 }
61
62 static inline int
push_rstate_stack(dwarf_stackable_reg_state_t ** rs_stack)63 push_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
64 {
65 dwarf_stackable_reg_state_t *old_rs = *rs_stack;
66 if (NULL == (*rs_stack = alloc_reg_state ()))
67 {
68 *rs_stack = old_rs;
69 return -1;
70 }
71 (*rs_stack)->next = old_rs;
72 return 0;
73 }
74
75 static inline void
pop_rstate_stack(dwarf_stackable_reg_state_t ** rs_stack)76 pop_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
77 {
78 dwarf_stackable_reg_state_t *old_rs = *rs_stack;
79 *rs_stack = old_rs->next;
80 free_reg_state (old_rs);
81 }
82
83 static inline void
empty_rstate_stack(dwarf_stackable_reg_state_t ** rs_stack)84 empty_rstate_stack(dwarf_stackable_reg_state_t **rs_stack)
85 {
86 while (*rs_stack)
87 pop_rstate_stack(rs_stack);
88 }
89
90 /* Run a CFI program to update the register state. */
91 static int
run_cfi_program(struct dwarf_cursor * c,dwarf_state_record_t * sr,unw_word_t * ip,unw_word_t end_ip,unw_word_t * addr,unw_word_t end_addr,dwarf_stackable_reg_state_t ** rs_stack,struct dwarf_cie_info * dci)92 run_cfi_program (struct dwarf_cursor *c, dwarf_state_record_t *sr,
93 unw_word_t *ip, unw_word_t end_ip,
94 unw_word_t *addr, unw_word_t end_addr,
95 dwarf_stackable_reg_state_t **rs_stack,
96 struct dwarf_cie_info *dci)
97 {
98 unw_addr_space_t as;
99 void *arg;
100
101 if (c->pi.flags & UNW_PI_FLAG_DEBUG_FRAME)
102 {
103 /* .debug_frame CFI is stored in local address space. */
104 as = unw_local_addr_space;
105 arg = NULL;
106 }
107 else
108 {
109 as = c->as;
110 arg = c->as_arg;
111 }
112 unw_accessors_t *a = unw_get_accessors_int (as);
113 int ret = 0;
114
115 while (*ip <= end_ip && *addr < end_addr && ret >= 0)
116 {
117 unw_word_t operand = 0, regnum, val, len;
118 uint8_t u8, op;
119 uint16_t u16;
120 uint32_t u32;
121
122 if ((ret = dwarf_readu8 (as, a, addr, &op, arg)) < 0)
123 break;
124
125 if (op & DWARF_CFA_OPCODE_MASK)
126 {
127 operand = op & DWARF_CFA_OPERAND_MASK;
128 op &= ~DWARF_CFA_OPERAND_MASK;
129 }
130 switch ((dwarf_cfa_t) op)
131 {
132 case DW_CFA_advance_loc:
133 *ip += operand * dci->code_align;
134 Debug (15, "CFA_advance_loc to 0x%lx\n", (long) *ip);
135 break;
136
137 case DW_CFA_advance_loc1:
138 if ((ret = dwarf_readu8 (as, a, addr, &u8, arg)) < 0)
139 break;
140 *ip += u8 * dci->code_align;
141 Debug (15, "CFA_advance_loc1 to 0x%lx\n", (long) *ip);
142 break;
143
144 case DW_CFA_advance_loc2:
145 if ((ret = dwarf_readu16 (as, a, addr, &u16, arg)) < 0)
146 break;
147 *ip += u16 * dci->code_align;
148 Debug (15, "CFA_advance_loc2 to 0x%lx\n", (long) *ip);
149 break;
150
151 case DW_CFA_advance_loc4:
152 if ((ret = dwarf_readu32 (as, a, addr, &u32, arg)) < 0)
153 break;
154 *ip += u32 * dci->code_align;
155 Debug (15, "CFA_advance_loc4 to 0x%lx\n", (long) *ip);
156 break;
157
158 case DW_CFA_MIPS_advance_loc8:
159 #ifdef UNW_TARGET_MIPS
160 {
161 uint64_t u64 = 0;
162
163 if ((ret = dwarf_readu64 (as, a, addr, &u64, arg)) < 0)
164 break;
165 *ip += u64 * dci->code_align;
166 Debug (15, "CFA_MIPS_advance_loc8\n");
167 break;
168 }
169 #else
170 Debug (1, "DW_CFA_MIPS_advance_loc8 on non-MIPS target\n");
171 ret = -UNW_EINVAL;
172 break;
173 #endif
174
175 case DW_CFA_offset:
176 regnum = operand;
177 if (regnum >= DWARF_NUM_PRESERVED_REGS)
178 {
179 Debug (1, "Invalid register number %u in DW_cfa_OFFSET\n",
180 (unsigned int) regnum);
181 ret = -UNW_EBADREG;
182 break;
183 }
184 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
185 break;
186 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
187 Debug (15, "CFA_offset r%lu at cfa+0x%lx\n",
188 (long) regnum, (long) (val * dci->data_align));
189 break;
190
191 case DW_CFA_offset_extended:
192 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
193 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
194 break;
195 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
196 Debug (15, "CFA_offset_extended r%lu at cf+0x%lx\n",
197 (long) regnum, (long) (val * dci->data_align));
198 break;
199
200 case DW_CFA_offset_extended_sf:
201 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
202 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
203 break;
204 set_reg (sr, regnum, DWARF_WHERE_CFAREL, val * dci->data_align);
205 Debug (15, "CFA_offset_extended_sf r%lu at cf+0x%lx\n",
206 (long) regnum, (long) (val * dci->data_align));
207 break;
208
209 case DW_CFA_restore:
210 regnum = operand;
211 if (regnum >= DWARF_NUM_PRESERVED_REGS)
212 {
213 Debug (1, "Invalid register number %u in DW_CFA_restore\n",
214 (unsigned int) regnum);
215 ret = -UNW_EINVAL;
216 break;
217 }
218 sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum];
219 sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum];
220 Debug (15, "CFA_restore r%lu\n", (long) regnum);
221 break;
222
223 case DW_CFA_restore_extended:
224 if ((ret = dwarf_read_uleb128 (as, a, addr, ®num, arg)) < 0)
225 break;
226 if (regnum >= DWARF_NUM_PRESERVED_REGS)
227 {
228 Debug (1, "Invalid register number %u in "
229 "DW_CFA_restore_extended\n", (unsigned int) regnum);
230 ret = -UNW_EINVAL;
231 break;
232 }
233 sr->rs_current.reg.where[regnum] = sr->rs_initial.reg.where[regnum];
234 sr->rs_current.reg.val[regnum] = sr->rs_initial.reg.val[regnum];
235 Debug (15, "CFA_restore_extended r%lu\n", (long) regnum);
236 break;
237
238 case DW_CFA_nop:
239 break;
240
241 case DW_CFA_set_loc:
242 if ((ret = dwarf_read_encoded_pointer (as, a, addr, dci->fde_encoding,
243 &c->pi, ip,
244 arg)) < 0)
245 break;
246 Debug (15, "CFA_set_loc to 0x%lx\n", (long) *ip);
247 break;
248
249 case DW_CFA_undefined:
250 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
251 break;
252 set_reg (sr, regnum, DWARF_WHERE_UNDEF, 0);
253 Debug (15, "CFA_undefined r%lu\n", (long) regnum);
254 break;
255
256 case DW_CFA_same_value:
257 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
258 break;
259 set_reg (sr, regnum, DWARF_WHERE_SAME, 0);
260 Debug (15, "CFA_same_value r%lu\n", (long) regnum);
261 break;
262
263 case DW_CFA_register:
264 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
265 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
266 break;
267 set_reg (sr, regnum, DWARF_WHERE_REG, val);
268 Debug (15, "CFA_register r%lu to r%lu\n", (long) regnum, (long) val);
269 break;
270
271 case DW_CFA_remember_state:
272 if (push_rstate_stack(rs_stack) < 0)
273 {
274 Debug (1, "Out of memory in DW_CFA_remember_state\n");
275 ret = -UNW_ENOMEM;
276 break;
277 }
278 (*rs_stack)->state = sr->rs_current;
279 Debug (15, "CFA_remember_state\n");
280 break;
281
282 case DW_CFA_restore_state:
283 if (!*rs_stack)
284 {
285 Debug (1, "register-state stack underflow\n");
286 ret = -UNW_EINVAL;
287 break;
288 }
289 sr->rs_current = (*rs_stack)->state;
290 pop_rstate_stack(rs_stack);
291 Debug (15, "CFA_restore_state\n");
292 break;
293
294 case DW_CFA_def_cfa:
295 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
296 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
297 break;
298 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
299 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */
300 Debug (15, "CFA_def_cfa r%lu+0x%lx\n", (long) regnum, (long) val);
301 break;
302
303 case DW_CFA_def_cfa_sf:
304 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
305 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
306 break;
307 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
308 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0,
309 val * dci->data_align); /* factored! */
310 Debug (15, "CFA_def_cfa_sf r%lu+0x%lx\n",
311 (long) regnum, (long) (val * dci->data_align));
312 break;
313
314 case DW_CFA_def_cfa_register:
315 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
316 break;
317 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_REG, regnum);
318 Debug (15, "CFA_def_cfa_register r%lu\n", (long) regnum);
319 break;
320
321 case DW_CFA_def_cfa_offset:
322 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
323 break;
324 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0, val); /* NOT factored! */
325 Debug (15, "CFA_def_cfa_offset 0x%lx\n", (long) val);
326 break;
327
328 case DW_CFA_def_cfa_offset_sf:
329 if ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0)
330 break;
331 set_reg (sr, DWARF_CFA_OFF_COLUMN, 0,
332 val * dci->data_align); /* factored! */
333 Debug (15, "CFA_def_cfa_offset_sf 0x%lx\n",
334 (long) (val * dci->data_align));
335 break;
336
337 case DW_CFA_val_offset:
338 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
339 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
340 break;
341 set_reg (sr, regnum, DWARF_WHERE_VAL, val * dci->data_align);
342 Debug (15, "CFA_offset_extended_sf r%lu at cf+0x%lx\n",
343 (long) regnum, (long) (val * dci->data_align));
344 break;
345
346 case DW_CFA_val_offset_sf:
347 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
348 || ((ret = dwarf_read_sleb128 (as, a, addr, &val, arg)) < 0))
349 break;
350 set_reg (sr, regnum, DWARF_WHERE_VAL, val * dci->data_align);
351 Debug (15, "CFA_offset_extended_sf r%lu at cf+0x%lx\n",
352 (long) regnum, (long) (val * dci->data_align));
353 break;
354
355 case DW_CFA_def_cfa_expression:
356 /* Save the address of the DW_FORM_block for later evaluation. */
357 set_reg (sr, DWARF_CFA_REG_COLUMN, DWARF_WHERE_EXPR, *addr);
358
359 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
360 break;
361
362 Debug (15, "CFA_def_cfa_expr @ 0x%lx [%lu bytes]\n",
363 (long) *addr, (long) len);
364 *addr += len;
365 break;
366
367 case DW_CFA_expression:
368 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
369 break;
370
371 /* Save the address of the DW_FORM_block for later evaluation. */
372 set_reg (sr, regnum, DWARF_WHERE_EXPR, *addr);
373
374 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
375 break;
376
377 Debug (15, "CFA_expression r%lu @ 0x%lx [%lu bytes]\n",
378 (long) regnum, (long) addr, (long) len);
379 *addr += len;
380 break;
381
382 case DW_CFA_val_expression:
383 if ((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
384 break;
385
386 /* Save the address of the DW_FORM_block for later evaluation. */
387 set_reg (sr, regnum, DWARF_WHERE_VAL_EXPR, *addr);
388
389 if ((ret = dwarf_read_uleb128 (as, a, addr, &len, arg)) < 0)
390 break;
391
392 Debug (15, "CFA_val_expression r%lu @ 0x%lx [%lu bytes]\n",
393 (long) regnum, (long) addr, (long) len);
394 *addr += len;
395 break;
396
397 case DW_CFA_GNU_args_size:
398 if ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0)
399 break;
400 sr->args_size = val;
401 Debug (15, "CFA_GNU_args_size %lu\n", (long) val);
402 break;
403
404 case DW_CFA_GNU_negative_offset_extended:
405 /* A comment in GCC says that this is obsoleted by
406 DW_CFA_offset_extended_sf, but that it's used by older
407 PowerPC code. */
408 if (((ret = read_regnum (as, a, addr, ®num, arg)) < 0)
409 || ((ret = dwarf_read_uleb128 (as, a, addr, &val, arg)) < 0))
410 break;
411 set_reg (sr, regnum, DWARF_WHERE_CFAREL, ~(val * dci->data_align) + 1);
412 Debug (15, "CFA_GNU_negative_offset_extended cfa+0x%lx\n",
413 (long) (~(val * dci->data_align) + 1));
414 break;
415
416 case DW_CFA_GNU_window_save:
417 #ifdef UNW_TARGET_SPARC
418 /* This is a special CFA to handle all 16 windowed registers
419 on SPARC. */
420 for (regnum = 16; regnum < 32; ++regnum)
421 set_reg (sr, regnum, DWARF_WHERE_CFAREL,
422 (regnum - 16) * sizeof (unw_word_t));
423 Debug (15, "CFA_GNU_window_save\n");
424 break;
425 #else
426 /* FALL THROUGH */
427 #endif
428 case DW_CFA_lo_user:
429 case DW_CFA_hi_user:
430 Debug (1, "Unexpected CFA opcode 0x%x\n", op);
431 ret = -UNW_EINVAL;
432 break;
433 }
434 }
435
436 if (ret > 0)
437 ret = 0;
438 return ret;
439 }
440
441 static int
fetch_proc_info(struct dwarf_cursor * c,unw_word_t ip)442 fetch_proc_info (struct dwarf_cursor *c, unw_word_t ip)
443 {
444 int ret, dynamic = 1;
445
446 /* The 'ip' can point either to the previous or next instruction
447 depending on what type of frame we have: normal call or a place
448 to resume execution (e.g. after signal frame).
449
450 For a normal call frame we need to back up so we point within the
451 call itself; this is important because a) the call might be the
452 very last instruction of the function and the edge of the FDE,
453 and b) so that run_cfi_program() runs locations up to the call
454 but not more.
455
456 For signal frame, we need to do the exact opposite and look
457 up using the current 'ip' value. That is where execution will
458 continue, and it's important we get this right, as 'ip' could be
459 right at the function entry and hence FDE edge, or at instruction
460 that manipulates CFA (push/pop). */
461
462 if (c->use_prev_instr)
463 {
464 #if defined(__arm__)
465 /* On arm, the least bit denotes thumb/arm mode, clear it. */
466 ip &= ~(unw_word_t)0x1;
467 #endif
468 --ip;
469 }
470
471 memset (&c->pi, 0, sizeof (c->pi));
472
473 /* check dynamic info first --- it overrides everything else */
474 ret = unwi_find_dynamic_proc_info (c->as, ip, &c->pi, 1,
475 c->as_arg);
476 if (ret == -UNW_ENOINFO)
477 {
478 dynamic = 0;
479 if ((ret = tdep_find_proc_info (c, ip, 1)) < 0)
480 return ret;
481 }
482
483 if (c->pi.format != UNW_INFO_FORMAT_DYNAMIC
484 && c->pi.format != UNW_INFO_FORMAT_TABLE
485 && c->pi.format != UNW_INFO_FORMAT_REMOTE_TABLE)
486 return -UNW_ENOINFO;
487
488 c->pi_valid = 1;
489 c->pi_is_dynamic = dynamic;
490
491 /* Let system/machine-dependent code determine frame-specific attributes. */
492 if (ret >= 0)
493 tdep_fetch_frame (c, ip, 1);
494
495 return ret;
496 }
497
498 static int
parse_dynamic(struct dwarf_cursor * c,unw_word_t ip,dwarf_state_record_t * sr)499 parse_dynamic (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr)
500 {
501 Debug (1, "Not yet implemented\n");
502 return -UNW_ENOINFO;
503 }
504
505 static inline void
put_unwind_info(struct dwarf_cursor * c,unw_proc_info_t * pi)506 put_unwind_info (struct dwarf_cursor *c, unw_proc_info_t *pi)
507 {
508 if (c->pi_is_dynamic)
509 unwi_put_dynamic_unwind_info (c->as, pi, c->as_arg);
510 else if (pi->unwind_info && pi->format == UNW_INFO_FORMAT_TABLE)
511 {
512 mempool_free (&dwarf_cie_info_pool, pi->unwind_info);
513 pi->unwind_info = NULL;
514 }
515 c->pi_valid = 0;
516 }
517
518 static inline int
setup_fde(struct dwarf_cursor * c,dwarf_state_record_t * sr)519 setup_fde (struct dwarf_cursor *c, dwarf_state_record_t *sr)
520 {
521 int i, ret;
522
523 assert (c->pi_valid);
524
525 memset (sr, 0, sizeof (*sr));
526 for (i = 0; i < DWARF_NUM_PRESERVED_REGS + 2; ++i)
527 set_reg (sr, i, DWARF_WHERE_SAME, 0);
528
529 struct dwarf_cie_info *dci = c->pi.unwind_info;
530 sr->rs_current.ret_addr_column = dci->ret_addr_column;
531 unw_word_t addr = dci->cie_instr_start;
532 unw_word_t curr_ip = 0;
533 dwarf_stackable_reg_state_t *rs_stack = NULL;
534 ret = run_cfi_program (c, sr, &curr_ip, ~(unw_word_t) 0, &addr,
535 dci->cie_instr_end,
536 &rs_stack, dci);
537 empty_rstate_stack(&rs_stack);
538 if (ret < 0)
539 return ret;
540
541 memcpy (&sr->rs_initial, &sr->rs_current, sizeof (sr->rs_initial));
542 return 0;
543 }
544
545 static inline int
parse_fde(struct dwarf_cursor * c,unw_word_t ip,dwarf_state_record_t * sr)546 parse_fde (struct dwarf_cursor *c, unw_word_t ip, dwarf_state_record_t *sr)
547 {
548 int ret;
549 struct dwarf_cie_info *dci = c->pi.unwind_info;
550 unw_word_t addr = dci->fde_instr_start;
551 unw_word_t curr_ip = c->pi.start_ip;
552 dwarf_stackable_reg_state_t *rs_stack = NULL;
553 /* Process up to current `ip` for signal frame and `ip - 1` for normal call frame
554 See `c->use_prev_instr` use in `fetch_proc_info` for details. */
555 ret = run_cfi_program (c, sr, &curr_ip, ip - c->use_prev_instr, &addr, dci->fde_instr_end,
556 &rs_stack, dci);
557 empty_rstate_stack(&rs_stack);
558 if (ret < 0)
559 return ret;
560
561 return 0;
562 }
563
564 HIDDEN int
dwarf_flush_rs_cache(struct dwarf_rs_cache * cache)565 dwarf_flush_rs_cache (struct dwarf_rs_cache *cache)
566 {
567 int i;
568
569 if (cache->log_size == DWARF_DEFAULT_LOG_UNW_CACHE_SIZE
570 || !cache->hash) {
571 cache->hash = cache->default_hash;
572 cache->buckets = cache->default_buckets;
573 cache->links = cache->default_links;
574 cache->log_size = DWARF_DEFAULT_LOG_UNW_CACHE_SIZE;
575 } else {
576 if (cache->hash && cache->hash != cache->default_hash)
577 munmap(cache->hash, DWARF_UNW_HASH_SIZE(cache->prev_log_size)
578 * sizeof (cache->hash[0]));
579 if (cache->buckets && cache->buckets != cache->default_buckets)
580 munmap(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->prev_log_size)
581 * sizeof (cache->buckets[0]));
582 if (cache->links && cache->links != cache->default_links)
583 munmap(cache->links, DWARF_UNW_CACHE_SIZE(cache->prev_log_size)
584 * sizeof (cache->links[0]));
585 GET_MEMORY(cache->hash, DWARF_UNW_HASH_SIZE(cache->log_size)
586 * sizeof (cache->hash[0]));
587 GET_MEMORY(cache->buckets, DWARF_UNW_CACHE_SIZE(cache->log_size)
588 * sizeof (cache->buckets[0]));
589 GET_MEMORY(cache->links, DWARF_UNW_CACHE_SIZE(cache->log_size)
590 * sizeof (cache->links[0]));
591 if (!cache->hash || !cache->buckets || !cache->links)
592 {
593 Debug (1, "Unable to allocate cache memory");
594 return -UNW_ENOMEM;
595 }
596 cache->prev_log_size = cache->log_size;
597 }
598
599 cache->rr_head = 0;
600
601 for (i = 0; i < DWARF_UNW_CACHE_SIZE(cache->log_size); ++i)
602 {
603 cache->links[i].coll_chain = -1;
604 cache->links[i].ip = 0;
605 cache->links[i].valid = 0;
606 }
607 for (i = 0; i< DWARF_UNW_HASH_SIZE(cache->log_size); ++i)
608 cache->hash[i] = -1;
609
610 return 0;
611 }
612
613 static inline struct dwarf_rs_cache *
get_rs_cache(unw_addr_space_t as,intrmask_t * saved_maskp)614 get_rs_cache (unw_addr_space_t as, intrmask_t *saved_maskp)
615 {
616 struct dwarf_rs_cache *cache = &as->global_cache;
617 unw_caching_policy_t caching = as->caching_policy;
618
619 if (caching == UNW_CACHE_NONE)
620 return NULL;
621
622 #if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD
623 if (likely (caching == UNW_CACHE_PER_THREAD))
624 {
625 static _Thread_local struct dwarf_rs_cache tls_cache __attribute__((tls_model("initial-exec")));
626 Debug (16, "using TLS cache\n");
627 cache = &tls_cache;
628 }
629 else
630 #else
631 if (likely (caching == UNW_CACHE_GLOBAL))
632 #endif
633 {
634 Debug (16, "acquiring lock\n");
635 lock_acquire (&cache->lock, *saved_maskp);
636 }
637
638 if ((atomic_load (&as->cache_generation) != atomic_load (&cache->generation))
639 || !cache->hash)
640 {
641 /* cache_size is only set in the global_cache, copy it over before flushing */
642 cache->log_size = as->global_cache.log_size;
643 if (dwarf_flush_rs_cache (cache) < 0)
644 return NULL;
645 atomic_store (&cache->generation, atomic_load (&as->cache_generation));
646 }
647
648 return cache;
649 }
650
651 static inline void
put_rs_cache(unw_addr_space_t as,struct dwarf_rs_cache * cache,intrmask_t * saved_maskp)652 put_rs_cache (unw_addr_space_t as, struct dwarf_rs_cache *cache,
653 intrmask_t *saved_maskp)
654 {
655 assert (as->caching_policy != UNW_CACHE_NONE);
656
657 Debug (16, "unmasking signals/interrupts and releasing lock\n");
658 if (likely (as->caching_policy == UNW_CACHE_GLOBAL))
659 lock_release (&cache->lock, *saved_maskp);
660 }
661
662 static inline unw_hash_index_t CONST_ATTR
hash(unw_word_t ip,unsigned short log_size)663 hash (unw_word_t ip, unsigned short log_size)
664 {
665 /* based on (sqrt(5)/2-1)*2^64 */
666 # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL)
667
668 return ip * magic >> ((sizeof(unw_word_t) * 8) - (log_size + 1));
669 }
670
671 static inline long
cache_match(struct dwarf_rs_cache * cache,unsigned short index,unw_word_t ip)672 cache_match (struct dwarf_rs_cache *cache, unsigned short index, unw_word_t ip)
673 {
674 return (cache->links[index].valid && (ip == cache->links[index].ip));
675 }
676
677 static dwarf_reg_state_t *
rs_lookup(struct dwarf_rs_cache * cache,struct dwarf_cursor * c)678 rs_lookup (struct dwarf_rs_cache *cache, struct dwarf_cursor *c)
679 {
680 unsigned short index;
681 unw_word_t ip = c->ip;
682
683 if (c->hint > 0)
684 {
685 index = c->hint - 1;
686 if (cache_match (cache, index, ip))
687 return &cache->buckets[index];
688 }
689
690 for (index = cache->hash[hash (ip, cache->log_size)];
691 index < DWARF_UNW_CACHE_SIZE(cache->log_size);
692 index = cache->links[index].coll_chain)
693 {
694 if (cache_match (cache, index, ip))
695 return &cache->buckets[index];
696 }
697 return NULL;
698 }
699
700 static inline dwarf_reg_state_t *
rs_new(struct dwarf_rs_cache * cache,struct dwarf_cursor * c)701 rs_new (struct dwarf_rs_cache *cache, struct dwarf_cursor * c)
702 {
703 unw_hash_index_t index;
704 unsigned short head;
705
706 head = cache->rr_head;
707 cache->rr_head = (head + 1) & (DWARF_UNW_CACHE_SIZE(cache->log_size) - 1);
708
709 /* remove the old rs from the hash table (if it's there): */
710 if (cache->links[head].ip)
711 {
712 unsigned short *pindex;
713 for (pindex = &cache->hash[hash (cache->links[head].ip, cache->log_size)];
714 *pindex < DWARF_UNW_CACHE_SIZE(cache->log_size);
715 pindex = &cache->links[*pindex].coll_chain)
716 {
717 if (*pindex == head)
718 {
719 *pindex = cache->links[*pindex].coll_chain;
720 break;
721 }
722 }
723 }
724
725 /* enter new rs in the hash table */
726 index = hash (c->ip, cache->log_size);
727 cache->links[head].coll_chain = cache->hash[index];
728 cache->hash[index] = head;
729
730 cache->links[head].ip = c->ip;
731 cache->links[head].valid = 1;
732 cache->links[head].signal_frame = tdep_cache_frame(c);
733 return cache->buckets + head;
734 }
735
736 static int
create_state_record_for(struct dwarf_cursor * c,dwarf_state_record_t * sr,unw_word_t ip)737 create_state_record_for (struct dwarf_cursor *c, dwarf_state_record_t *sr,
738 unw_word_t ip)
739 {
740 int ret;
741 switch (c->pi.format)
742 {
743 case UNW_INFO_FORMAT_TABLE:
744 case UNW_INFO_FORMAT_REMOTE_TABLE:
745 if ((ret = setup_fde(c, sr)) < 0)
746 return ret;
747 ret = parse_fde (c, ip, sr);
748 break;
749
750 case UNW_INFO_FORMAT_DYNAMIC:
751 ret = parse_dynamic (c, ip, sr);
752 break;
753
754 default:
755 Debug (1, "Unexpected unwind-info format %d\n", c->pi.format);
756 ret = -UNW_EINVAL;
757 }
758 return ret;
759 }
760
761 static inline int
eval_location_expr(struct dwarf_cursor * c,unw_word_t stack_val,unw_addr_space_t as,unw_accessors_t * a,unw_word_t addr,dwarf_loc_t * locp,void * arg)762 eval_location_expr (struct dwarf_cursor *c, unw_word_t stack_val, unw_addr_space_t as,
763 unw_accessors_t *a, unw_word_t addr,
764 dwarf_loc_t *locp, void *arg)
765 {
766 int ret, is_register;
767 unw_word_t len, val;
768
769 /* read the length of the expression: */
770 if ((ret = dwarf_read_uleb128 (as, a, &addr, &len, arg)) < 0)
771 return ret;
772
773 /* evaluate the expression: */
774 if ((ret = dwarf_eval_expr (c, stack_val, &addr, len, &val, &is_register)) < 0)
775 return ret;
776
777 if (is_register)
778 *locp = DWARF_REG_LOC (c, dwarf_to_unw_regnum (val));
779 else
780 *locp = DWARF_MEM_LOC (c, val);
781
782 return 0;
783 }
784
785 static int
apply_reg_state(struct dwarf_cursor * c,struct dwarf_reg_state * rs)786 apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs)
787 {
788 unw_word_t regnum, addr, cfa, ip;
789 unw_word_t prev_ip, prev_cfa;
790 unw_addr_space_t as;
791 dwarf_loc_t cfa_loc;
792 unw_accessors_t *a;
793 int i, ret;
794 void *arg;
795
796 /* In the case that we have incorrect CFI, the return address column may be
797 * outside the valid range of data and will read invalid data. Protect
798 * against the errant read and indicate that we have a bad frame. */
799 if (rs->ret_addr_column >= DWARF_NUM_PRESERVED_REGS) {
800 Dprintf ("%s: return address entry %zu is outside of range of CIE",
801 __FUNCTION__, rs->ret_addr_column);
802 return -UNW_EBADFRAME;
803 }
804
805 prev_ip = c->ip;
806 prev_cfa = c->cfa;
807
808 as = c->as;
809 arg = c->as_arg;
810 a = unw_get_accessors_int (as);
811
812 /* Evaluate the CFA first, because it may be referred to by other
813 expressions. */
814
815 if (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_REG)
816 {
817 /* CFA is equal to [reg] + offset: */
818
819 /* As a special-case, if the stack-pointer is the CFA and the
820 stack-pointer wasn't saved, popping the CFA implicitly pops
821 the stack-pointer as well. */
822 if ((rs->reg.val[DWARF_CFA_REG_COLUMN] == UNW_TDEP_SP)
823 && (UNW_TDEP_SP < ARRAY_SIZE(rs->reg.val))
824 && (rs->reg.where[UNW_TDEP_SP] == DWARF_WHERE_SAME))
825 cfa = c->cfa;
826 else
827 {
828 regnum = dwarf_to_unw_regnum (rs->reg.val[DWARF_CFA_REG_COLUMN]);
829 if ((ret = unw_get_reg ((unw_cursor_t *) c, regnum, &cfa)) < 0)
830 return ret;
831 }
832 cfa += rs->reg.val[DWARF_CFA_OFF_COLUMN];
833 }
834 else
835 {
836 /* CFA is equal to EXPR: */
837
838 assert (rs->reg.where[DWARF_CFA_REG_COLUMN] == DWARF_WHERE_EXPR);
839
840 addr = rs->reg.val[DWARF_CFA_REG_COLUMN];
841 /* The dwarf standard doesn't specify an initial value to be pushed on */
842 /* the stack before DW_CFA_def_cfa_expression evaluation. We push on a */
843 /* dummy value (0) to keep the eval_location_expr function consistent. */
844 if ((ret = eval_location_expr (c, 0, as, a, addr, &cfa_loc, arg)) < 0)
845 return ret;
846 /* the returned location better be a memory location... */
847 if (DWARF_IS_REG_LOC (cfa_loc))
848 return -UNW_EBADFRAME;
849 cfa = DWARF_GET_LOC (cfa_loc);
850 }
851
852 dwarf_loc_t new_loc[DWARF_NUM_PRESERVED_REGS];
853 memcpy(new_loc, c->loc, sizeof(new_loc));
854
855 for (i = 0; i < DWARF_NUM_PRESERVED_REGS; ++i)
856 {
857 switch ((dwarf_where_t) rs->reg.where[i])
858 {
859 case DWARF_WHERE_UNDEF:
860 new_loc[i] = DWARF_NULL_LOC;
861 break;
862
863 case DWARF_WHERE_SAME:
864 break;
865
866 case DWARF_WHERE_CFAREL:
867 new_loc[i] = DWARF_MEM_LOC (c, cfa + rs->reg.val[i]);
868 break;
869
870 case DWARF_WHERE_REG:
871 #ifdef __s390x__
872 /* GPRs can be saved in FPRs on s390x */
873 if (unw_is_fpreg (dwarf_to_unw_regnum (rs->reg.val[i])))
874 {
875 new_loc[i] = DWARF_FPREG_LOC (c, dwarf_to_unw_regnum (rs->reg.val[i]));
876 break;
877 }
878 #endif
879 new_loc[i] = new_loc[rs->reg.val[i]];
880 break;
881
882 case DWARF_WHERE_EXPR:
883 addr = rs->reg.val[i];
884 /* The dwarf standard requires the current CFA to be pushed on the */
885 /* stack before DW_CFA_expression evaluation. */
886 if ((ret = eval_location_expr (c, cfa, as, a, addr, new_loc + i, arg)) < 0)
887 return ret;
888 break;
889
890 case DWARF_WHERE_VAL_EXPR:
891 addr = rs->reg.val[i];
892 /* The dwarf standard requires the current CFA to be pushed on the */
893 /* stack before DW_CFA_val_expression evaluation. */
894 if ((ret = eval_location_expr (c, cfa, as, a, addr, new_loc + i, arg)) < 0)
895 return ret;
896 new_loc[i] = DWARF_VAL_LOC (c, DWARF_GET_LOC (new_loc[i]));
897 break;
898
899 case DWARF_WHERE_VAL:
900 new_loc[i] = DWARF_VAL_LOC (c, DWARF_GET_LOC (new_loc[i]));
901 Debug (16, "%s(%d), where[%d]=0x%x, new_loc.val=0x%x\n",
902 __FILE__, __LINE__, i, rs->reg.where[i], new_loc[i].val);
903 break;
904 }
905 }
906
907 memcpy(c->loc, new_loc, sizeof(new_loc));
908
909 c->cfa = cfa;
910 /* DWARF spec says undefined return address location means end of stack. */
911 if (DWARF_IS_NULL_LOC (c->loc[rs->ret_addr_column]))
912 {
913 c->ip = 0;
914 ret = 0;
915 }
916 else
917 {
918 ret = dwarf_get (c, c->loc[rs->ret_addr_column], &ip);
919 if (ret < 0)
920 return ret;
921 c->ip = ip;
922 ret = 1;
923 }
924
925 /* XXX: check for ip to be code_aligned */
926 if (c->ip == prev_ip && c->cfa == prev_cfa)
927 {
928 Dprintf ("%s: ip and cfa unchanged; stopping here (ip=0x%lx)\n",
929 __FUNCTION__, (long) c->ip);
930 return -UNW_EBADFRAME;
931 }
932
933 if (c->stash_frames)
934 tdep_stash_frame (c, rs);
935
936 return ret;
937 }
938
939 /* Find the saved locations. */
940 static int
find_reg_state(struct dwarf_cursor * c,dwarf_state_record_t * sr)941 find_reg_state (struct dwarf_cursor *c, dwarf_state_record_t *sr)
942 {
943 dwarf_reg_state_t *rs = NULL;
944 struct dwarf_rs_cache *cache;
945 int ret = 0;
946 intrmask_t saved_mask;
947
948 if ((cache = get_rs_cache(c->as, &saved_mask)) &&
949 (rs = rs_lookup(cache, c)))
950 {
951 /* update hint; no locking needed: single-word writes are atomic */
952 unsigned short index = rs - cache->buckets;
953 c->use_prev_instr = ! cache->links[index].signal_frame;
954 memcpy (&sr->rs_current, rs, sizeof (*rs));
955 }
956 else
957 {
958 ret = fetch_proc_info (c, c->ip);
959 int next_use_prev_instr = c->use_prev_instr;
960 if (ret >= 0)
961 {
962 /* Update use_prev_instr for the next frame. */
963 assert(c->pi.unwind_info);
964 struct dwarf_cie_info *dci = c->pi.unwind_info;
965 next_use_prev_instr = ! dci->signal_frame;
966 ret = create_state_record_for (c, sr, c->ip);
967 }
968 put_unwind_info (c, &c->pi);
969 c->use_prev_instr = next_use_prev_instr;
970
971 if (cache && ret >= 0)
972 {
973 rs = rs_new (cache, c);
974 cache->links[rs - cache->buckets].hint = 0;
975 memcpy(rs, &sr->rs_current, sizeof(*rs));
976 }
977 }
978
979 unsigned short index = -1;
980 if (cache)
981 {
982 if (rs)
983 {
984 index = rs - cache->buckets;
985 c->hint = cache->links[index].hint;
986 cache->links[c->prev_rs].hint = index + 1;
987 c->prev_rs = index;
988 }
989 if (ret >= 0)
990 tdep_reuse_frame (c, cache->links[index].signal_frame);
991 put_rs_cache (c->as, cache, &saved_mask);
992 }
993 return ret;
994 }
995
996 /* The function finds the saved locations and applies the register
997 state as well. */
998 HIDDEN int
dwarf_step(struct dwarf_cursor * c)999 dwarf_step (struct dwarf_cursor *c)
1000 {
1001 int ret;
1002 dwarf_state_record_t sr;
1003 if ((ret = find_reg_state (c, &sr)) < 0)
1004 return ret;
1005 return apply_reg_state (c, &sr.rs_current);
1006 }
1007
1008 HIDDEN int
dwarf_make_proc_info(struct dwarf_cursor * c)1009 dwarf_make_proc_info (struct dwarf_cursor *c)
1010 {
1011 #if 0
1012 if (c->as->caching_policy == UNW_CACHE_NONE
1013 || get_cached_proc_info (c) < 0)
1014 #endif
1015 /* Need to check if current frame contains
1016 args_size, and set cursor appropriately. Only
1017 needed for unw_resume */
1018 dwarf_state_record_t sr;
1019 sr.args_size = 0;
1020 int ret;
1021
1022 /* Lookup it up the slow way... */
1023 ret = fetch_proc_info (c, c->ip);
1024 if (ret >= 0)
1025 ret = create_state_record_for (c, &sr, c->ip);
1026 put_unwind_info (c, &c->pi);
1027 if (ret < 0)
1028 return ret;
1029 c->args_size = sr.args_size;
1030
1031 return 0;
1032 }
1033
1034 static int
dwarf_reg_states_dynamic_iterate(struct dwarf_cursor * c,unw_reg_states_callback cb,void * token)1035 dwarf_reg_states_dynamic_iterate(struct dwarf_cursor *c,
1036 unw_reg_states_callback cb,
1037 void *token)
1038 {
1039 Debug (1, "Not yet implemented\n");
1040 return -UNW_ENOINFO;
1041 }
1042
1043 static int
dwarf_reg_states_table_iterate(struct dwarf_cursor * c,unw_reg_states_callback cb,void * token)1044 dwarf_reg_states_table_iterate(struct dwarf_cursor *c,
1045 unw_reg_states_callback cb,
1046 void *token)
1047 {
1048 dwarf_state_record_t sr;
1049 int ret = setup_fde(c, &sr);
1050 struct dwarf_cie_info *dci = c->pi.unwind_info;
1051 unw_word_t addr = dci->fde_instr_start;
1052 unw_word_t curr_ip = c->pi.start_ip;
1053 dwarf_stackable_reg_state_t *rs_stack = NULL;
1054 while (ret >= 0 && curr_ip < c->pi.end_ip && addr < dci->fde_instr_end)
1055 {
1056 unw_word_t prev_ip = curr_ip;
1057 ret = run_cfi_program (c, &sr, &curr_ip, prev_ip, &addr, dci->fde_instr_end,
1058 &rs_stack, dci);
1059 if (ret >= 0 && prev_ip < curr_ip)
1060 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), prev_ip, curr_ip);
1061 }
1062 empty_rstate_stack(&rs_stack);
1063 #if defined(NEED_LAST_IP)
1064 if (ret >= 0 && curr_ip < c->pi.last_ip)
1065 /* report the dead zone after the procedure ends */
1066 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.last_ip);
1067 #else
1068 if (ret >= 0 && curr_ip < c->pi.end_ip)
1069 /* report for whatever is left before procedure end */
1070 ret = cb(token, &sr.rs_current, sizeof(sr.rs_current), curr_ip, c->pi.end_ip);
1071 #endif
1072 return ret;
1073 }
1074
1075 HIDDEN int
dwarf_reg_states_iterate(struct dwarf_cursor * c,unw_reg_states_callback cb,void * token)1076 dwarf_reg_states_iterate(struct dwarf_cursor *c,
1077 unw_reg_states_callback cb,
1078 void *token)
1079 {
1080 int ret = fetch_proc_info (c, c->ip);
1081 int next_use_prev_instr = c->use_prev_instr;
1082 if (ret >= 0)
1083 {
1084 /* Update use_prev_instr for the next frame. */
1085 assert(c->pi.unwind_info);
1086 struct dwarf_cie_info *dci = c->pi.unwind_info;
1087 next_use_prev_instr = ! dci->signal_frame;
1088 switch (c->pi.format)
1089 {
1090 case UNW_INFO_FORMAT_TABLE:
1091 case UNW_INFO_FORMAT_REMOTE_TABLE:
1092 ret = dwarf_reg_states_table_iterate(c, cb, token);
1093 break;
1094
1095 case UNW_INFO_FORMAT_DYNAMIC:
1096 ret = dwarf_reg_states_dynamic_iterate (c, cb, token);
1097 break;
1098
1099 default:
1100 Debug (1, "Unexpected unwind-info format %d\n", c->pi.format);
1101 ret = -UNW_EINVAL;
1102 }
1103 }
1104 put_unwind_info (c, &c->pi);
1105 c->use_prev_instr = next_use_prev_instr;
1106 return ret;
1107 }
1108
1109 HIDDEN int
dwarf_apply_reg_state(struct dwarf_cursor * c,struct dwarf_reg_state * rs)1110 dwarf_apply_reg_state (struct dwarf_cursor *c, struct dwarf_reg_state *rs)
1111 {
1112 return apply_reg_state(c, rs);
1113 }
1114