1
2 /*--------------------------------------------------------------------*/
3 /*--- Format-neutral storage of and querying of info acquired from ---*/
4 /*--- ELF/XCOFF stabs/dwarf1/dwarf2/dwarf3 debug info. ---*/
5 /*--- storage.c ---*/
6 /*--------------------------------------------------------------------*/
7
8 /*
9 This file is part of Valgrind, a dynamic binary instrumentation
10 framework.
11
12 Copyright (C) 2000-2017 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31 */
32
33 /* This file manages the data structures built by the debuginfo
34 system. These are: the top level SegInfo list. For each SegInfo,
35 there are tables for address-to-symbol mappings,
36 address-to-src-file/line mappings, and address-to-CFI-info
37 mappings.
38 */
39
40 #include "pub_core_basics.h"
41 #include "pub_core_options.h" /* VG_(clo_verbosity) */
42 #include "pub_core_debuginfo.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcassert.h"
45 #include "pub_core_libcbase.h"
46 #include "pub_core_libcprint.h"
47 #include "pub_core_xarray.h"
48 #include "pub_core_oset.h"
49 #include "pub_core_deduppoolalloc.h"
50
51 #include "priv_misc.h" /* dinfo_zalloc/free/strdup */
52 #include "priv_image.h"
53 #include "priv_d3basics.h" /* ML_(pp_GX) */
54 #include "priv_tytypes.h"
55 #include "priv_storage.h" /* self */
56
57
58 /*------------------------------------------------------------*/
59 /*--- Misc (printing, errors) ---*/
60 /*------------------------------------------------------------*/
61
62 /* Show a non-fatal debug info reading error. Use VG_(core_panic) for
63 fatal errors. 'serious' errors are shown regardless of the
64 verbosity setting. */
ML_(symerr)65 void ML_(symerr) ( const DebugInfo* di, Bool serious, const HChar* msg )
66 {
67 /* XML mode hides everything :-( */
68 if (VG_(clo_xml))
69 return;
70
71 if (serious) {
72
73 VG_(message)(Vg_DebugMsg, "WARNING: Serious error when "
74 "reading debug info\n");
75 if (True || VG_(clo_verbosity) < 2) {
76 /* Need to show what the file name is, at verbosity levels 2
77 or below, since that won't already have been shown */
78 VG_(message)(Vg_DebugMsg,
79 "When reading debug info from %s:\n",
80 (di && di->fsm.filename) ? di->fsm.filename
81 : "???");
82 }
83 VG_(message)(Vg_DebugMsg, "%s\n", msg);
84
85 } else { /* !serious */
86
87 if (VG_(clo_verbosity) >= 2)
88 VG_(message)(Vg_DebugMsg, "%s\n", msg);
89
90 }
91 }
92
93
94 /* Print a symbol. */
ML_(ppSym)95 void ML_(ppSym) ( Int idx, const DiSym* sym )
96 {
97 const HChar** sec_names = sym->sec_names;
98 vg_assert(sym->pri_name);
99 if (sec_names)
100 vg_assert(sec_names);
101 VG_(printf)( "%5d: %c%c%c %#8lx .. %#8lx (%u) %s%s",
102 idx,
103 sym->isText ? 'T' : '-',
104 sym->isIFunc ? 'I' : '-',
105 sym->isGlobal ? 'G' : '-',
106 sym->avmas.main,
107 sym->avmas.main + sym->size - 1, sym->size,
108 sym->pri_name, sec_names ? " " : "" );
109 if (sec_names) {
110 while (*sec_names) {
111 VG_(printf)("%s%s", *sec_names, *(sec_names+1) ? " " : "");
112 sec_names++;
113 }
114 }
115 VG_(printf)("\n");
116 }
117
118 /* Print a call-frame-info summary. */
ML_(ppDiCfSI)119 void ML_(ppDiCfSI) ( const XArray* /* of CfiExpr */ exprs,
120 Addr base, UInt len,
121 const DiCfSI_m* si_m )
122 {
123 # define SHOW_HOW(_how, _off) \
124 do { \
125 if (_how == CFIR_UNKNOWN) { \
126 VG_(printf)("Unknown"); \
127 } else \
128 if (_how == CFIR_SAME) { \
129 VG_(printf)("Same"); \
130 } else \
131 if (_how == CFIR_CFAREL) { \
132 VG_(printf)("cfa+%d", _off); \
133 } else \
134 if (_how == CFIR_MEMCFAREL) { \
135 VG_(printf)("*(cfa+%d)", _off); \
136 } else \
137 if (_how == CFIR_EXPR) { \
138 VG_(printf)("{"); \
139 ML_(ppCfiExpr)(exprs, _off); \
140 VG_(printf)("}"); \
141 } else { \
142 vg_assert(0+0); \
143 } \
144 } while (0)
145
146 if (base != 0 || len != 0)
147 VG_(printf)("[%#lx .. %#lx]: ", base, base + len - 1);
148 else
149 VG_(printf)("[]: ");
150
151 switch (si_m->cfa_how) {
152 case CFIC_IA_SPREL:
153 VG_(printf)("let cfa=oldSP+%d", si_m->cfa_off);
154 break;
155 case CFIC_IA_BPREL:
156 VG_(printf)("let cfa=oldBP+%d", si_m->cfa_off);
157 break;
158 case CFIC_ARM_R13REL:
159 VG_(printf)("let cfa=oldR13+%d", si_m->cfa_off);
160 break;
161 case CFIC_ARM_R12REL:
162 VG_(printf)("let cfa=oldR12+%d", si_m->cfa_off);
163 break;
164 case CFIC_ARM_R11REL:
165 VG_(printf)("let cfa=oldR11+%d", si_m->cfa_off);
166 break;
167 case CFIR_SAME:
168 VG_(printf)("let cfa=Same");
169 break;
170 case CFIC_ARM_R7REL:
171 VG_(printf)("let cfa=oldR7+%d", si_m->cfa_off);
172 break;
173 case CFIC_ARM64_SPREL:
174 VG_(printf)("let cfa=oldSP+%d", si_m->cfa_off);
175 break;
176 case CFIC_ARM64_X29REL:
177 VG_(printf)("let cfa=oldX29+%d", si_m->cfa_off);
178 break;
179 case CFIC_EXPR:
180 VG_(printf)("let cfa={");
181 ML_(ppCfiExpr)(exprs, si_m->cfa_off);
182 VG_(printf)("}");
183 break;
184 default:
185 vg_assert(0);
186 }
187
188 VG_(printf)(" in RA=");
189 SHOW_HOW(si_m->ra_how, si_m->ra_off);
190 # if defined(VGA_x86) || defined(VGA_amd64)
191 VG_(printf)(" SP=");
192 SHOW_HOW(si_m->sp_how, si_m->sp_off);
193 VG_(printf)(" BP=");
194 SHOW_HOW(si_m->bp_how, si_m->bp_off);
195 # elif defined(VGA_arm)
196 VG_(printf)(" R14=");
197 SHOW_HOW(si_m->r14_how, si_m->r14_off);
198 VG_(printf)(" R13=");
199 SHOW_HOW(si_m->r13_how, si_m->r13_off);
200 VG_(printf)(" R12=");
201 SHOW_HOW(si_m->r12_how, si_m->r12_off);
202 VG_(printf)(" R11=");
203 SHOW_HOW(si_m->r11_how, si_m->r11_off);
204 VG_(printf)(" R7=");
205 SHOW_HOW(si_m->r7_how, si_m->r7_off);
206 # elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
207 # elif defined(VGA_s390x) || defined(VGA_mips32) || defined(VGA_mips64)
208 VG_(printf)(" SP=");
209 SHOW_HOW(si_m->sp_how, si_m->sp_off);
210 VG_(printf)(" FP=");
211 SHOW_HOW(si_m->fp_how, si_m->fp_off);
212 # elif defined(VGA_arm64)
213 VG_(printf)(" SP=");
214 SHOW_HOW(si_m->sp_how, si_m->sp_off);
215 VG_(printf)(" X30=");
216 SHOW_HOW(si_m->x30_how, si_m->x30_off);
217 VG_(printf)(" X29=");
218 SHOW_HOW(si_m->x29_how, si_m->x29_off);
219 # else
220 # error "Unknown arch"
221 # endif
222 VG_(printf)("\n");
223 # undef SHOW_HOW
224 }
225
226
227 /*------------------------------------------------------------*/
228 /*--- Adding stuff ---*/
229 /*------------------------------------------------------------*/
230
231 /* If not yet in strpool, add a str to the string pool including terminating
232 zero.
233 Return the pointer to the string in strpool.
234 */
ML_(addStr)235 const HChar* ML_(addStr) ( DebugInfo* di, const HChar* str, Int len )
236 {
237 if (len == -1) {
238 len = VG_(strlen)(str);
239 } else {
240 vg_assert(len >= 0);
241 }
242 if (UNLIKELY(di->strpool == NULL))
243 di->strpool = VG_(newDedupPA)(SEGINFO_STRPOOLSIZE,
244 1,
245 ML_(dinfo_zalloc),
246 "di.storage.addStr.1",
247 ML_(dinfo_free));
248 return VG_(allocEltDedupPA) (di->strpool, len+1, str);
249 }
250
ML_(addFnDn)251 UInt ML_(addFnDn) (struct _DebugInfo* di,
252 const HChar* filename,
253 const HChar* dirname)
254 {
255 FnDn fndn;
256 UInt fndn_ix;
257
258 if (UNLIKELY(di->fndnpool == NULL))
259 di->fndnpool = VG_(newDedupPA)(500,
260 vg_alignof(FnDn),
261 ML_(dinfo_zalloc),
262 "di.storage.addFnDn.1",
263 ML_(dinfo_free));
264 fndn.filename = ML_(addStr)(di, filename, -1);
265 fndn.dirname = dirname ? ML_(addStr)(di, dirname, -1) : NULL;
266 fndn_ix = VG_(allocFixedEltDedupPA) (di->fndnpool, sizeof(FnDn), &fndn);
267 return fndn_ix;
268 }
269
ML_(fndn_ix2filename)270 const HChar* ML_(fndn_ix2filename) (const DebugInfo* di,
271 UInt fndn_ix)
272 {
273 FnDn *fndn;
274 if (fndn_ix == 0)
275 return "???";
276 else {
277 fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
278 return fndn->filename;
279 }
280 }
281
ML_(fndn_ix2dirname)282 const HChar* ML_(fndn_ix2dirname) (const DebugInfo* di,
283 UInt fndn_ix)
284 {
285 FnDn *fndn;
286 if (fndn_ix == 0)
287 return "";
288 else {
289 fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
290 if (fndn->dirname)
291 return fndn->dirname;
292 else
293 return "";
294 }
295 }
296
297 /* Add a string to the string table of a DebugInfo, by copying the
298 string from the given DiCursor. Measures the length of the string
299 itself. */
ML_(addStrFromCursor)300 const HChar* ML_(addStrFromCursor)( DebugInfo* di, DiCursor c )
301 {
302 /* This is a less-than-stellar implementation, but it should
303 work. */
304 vg_assert(ML_(cur_is_valid)(c));
305 HChar* str = ML_(cur_read_strdup)(c, "di.addStrFromCursor.1");
306 const HChar* res = ML_(addStr)(di, str, -1);
307 ML_(dinfo_free)(str);
308 return res;
309 }
310
311
312 /* Add a symbol to the symbol table, by copying *sym. 'sym' may only
313 have one name, so there's no complexities to do with deep vs
314 shallow copying of the sec_name array. This is checked.
315 */
ML_(addSym)316 void ML_(addSym) ( struct _DebugInfo* di, DiSym* sym )
317 {
318 UInt new_sz, i;
319 DiSym* new_tab;
320
321 vg_assert(sym->pri_name != NULL);
322 vg_assert(sym->sec_names == NULL);
323
324 /* Ignore zero-sized syms. */
325 if (sym->size == 0) return;
326
327 if (di->symtab_used == di->symtab_size) {
328 new_sz = 2 * di->symtab_size;
329 if (new_sz == 0) new_sz = 500;
330 new_tab = ML_(dinfo_zalloc)( "di.storage.addSym.1",
331 new_sz * sizeof(DiSym) );
332 if (di->symtab != NULL) {
333 for (i = 0; i < di->symtab_used; i++)
334 new_tab[i] = di->symtab[i];
335 ML_(dinfo_free)(di->symtab);
336 }
337 di->symtab = new_tab;
338 di->symtab_size = new_sz;
339 }
340
341 di->symtab[di->symtab_used++] = *sym;
342 vg_assert(di->symtab_used <= di->symtab_size);
343 }
344
ML_(fndn_ix)345 UInt ML_(fndn_ix) (const DebugInfo* di, Word locno)
346 {
347 UInt fndn_ix;
348
349 switch(di->sizeof_fndn_ix) {
350 case 1: fndn_ix = ((UChar*) di->loctab_fndn_ix)[locno]; break;
351 case 2: fndn_ix = ((UShort*) di->loctab_fndn_ix)[locno]; break;
352 case 4: fndn_ix = ((UInt*) di->loctab_fndn_ix)[locno]; break;
353 default: vg_assert(0);
354 }
355 return fndn_ix;
356 }
357
set_fndn_ix(struct _DebugInfo * di,Word locno,UInt fndn_ix)358 static inline void set_fndn_ix (struct _DebugInfo* di, Word locno, UInt fndn_ix)
359 {
360 Word i;
361
362 switch(di->sizeof_fndn_ix) {
363 case 1:
364 if (LIKELY (fndn_ix <= 255)) {
365 ((UChar*) di->loctab_fndn_ix)[locno] = fndn_ix;
366 return;
367 }
368 {
369 UChar* old = (UChar*) di->loctab_fndn_ix;
370 UShort* new = ML_(dinfo_zalloc)( "di.storage.sfix.1",
371 di->loctab_size * 2 );
372 for (i = 0; i < di->loctab_used; i++)
373 new[i] = old[i];
374 ML_(dinfo_free)(old);
375 di->sizeof_fndn_ix = 2;
376 di->loctab_fndn_ix = new;
377 }
378 // Fallthrough
379
380 case 2:
381 if (LIKELY (fndn_ix <= 65535)) {
382 ((UShort*) di->loctab_fndn_ix)[locno] = fndn_ix;
383 return;
384 }
385 {
386 UShort* old = (UShort*) di->loctab_fndn_ix;
387 UInt* new = ML_(dinfo_zalloc)( "di.storage.sfix.2",
388 di->loctab_size * 4 );
389 for (i = 0; i < di->loctab_used; i++)
390 new[i] = old[i];
391 ML_(dinfo_free)(old);
392 di->sizeof_fndn_ix = 4;
393 di->loctab_fndn_ix = new;
394 }
395 // Fallthrough
396
397 case 4:
398 ((UInt*) di->loctab_fndn_ix)[locno] = fndn_ix;
399 return;
400
401 default: vg_assert(0);
402 }
403 }
404
405
406 // Comment the below line to trace LOCTAB merging/canonicalising
407 #define TRACE_LOCTAB_CANON(msg,prev_loc,cur_loc)
408 #ifndef TRACE_LOCTAB_CANON
409 #define TRACE_LOCTAB_CANON(msg,prev_loc,cur_loc) \
410 VG_(printf)("%s previous: addr %#lx, size %d, line %d, " \
411 " current: addr %#lx, size %d, line %d.\n", \
412 msg, \
413 (prev_loc)->addr, (prev_loc)->size, (prev_loc)->lineno, \
414 (cur_loc)->addr, (cur_loc)->size, (cur_loc)->lineno);
415 #endif
416
417 /* Add a location to the location table.
418 */
addLoc(struct _DebugInfo * di,DiLoc * loc,UInt fndn_ix)419 static void addLoc ( struct _DebugInfo* di, DiLoc* loc, UInt fndn_ix )
420 {
421 /* Zero-sized locs should have been ignored earlier */
422 vg_assert(loc->size > 0);
423
424 /* Check if the last entry has adjacent range for the same line. */
425 if (di->loctab_used > 0) {
426 DiLoc *previous = &di->loctab[di->loctab_used - 1];
427 if ((previous->lineno == loc->lineno)
428 && (previous->addr + previous->size == loc->addr)) {
429 if (previous->size + loc->size <= MAX_LOC_SIZE) {
430 TRACE_LOCTAB_CANON ("addLoc merging", previous, loc);
431 previous->size += loc->size;
432 return;
433 } else {
434 TRACE_LOCTAB_CANON ("addLoc merging not done (maxsize)",
435 previous, loc);
436 }
437 }
438 }
439
440 if (di->loctab_used == di->loctab_size) {
441 UInt new_sz;
442 DiLoc* new_loctab;
443 void* new_loctab_fndn_ix;
444
445 new_sz = 2 * di->loctab_size;
446 if (new_sz == 0) new_sz = 500;
447 new_loctab = ML_(dinfo_zalloc)( "di.storage.addLoc.1",
448 new_sz * sizeof(DiLoc) );
449 if (di->sizeof_fndn_ix == 0)
450 di->sizeof_fndn_ix = 1; // To start with.
451 new_loctab_fndn_ix = ML_(dinfo_zalloc)( "di.storage.addLoc.2",
452 new_sz * di->sizeof_fndn_ix );
453 if (di->loctab != NULL) {
454 VG_(memcpy)(new_loctab, di->loctab,
455 di->loctab_used * sizeof(DiLoc));
456 VG_(memcpy)(new_loctab_fndn_ix, di->loctab_fndn_ix,
457 di->loctab_used * di->sizeof_fndn_ix);
458 ML_(dinfo_free)(di->loctab);
459 ML_(dinfo_free)(di->loctab_fndn_ix);
460 }
461 di->loctab = new_loctab;
462 di->loctab_fndn_ix = new_loctab_fndn_ix;
463 di->loctab_size = new_sz;
464 }
465
466 di->loctab[di->loctab_used] = *loc;
467 set_fndn_ix (di, di->loctab_used, fndn_ix);
468 di->loctab_used++;
469 vg_assert(di->loctab_used <= di->loctab_size);
470 }
471
472
473 /* Resize the LocTab (line number table) to save memory, by removing
474 (and, potentially, allowing m_mallocfree to unmap) any unused space
475 at the end of the table. */
shrinkLocTab(struct _DebugInfo * di)476 static void shrinkLocTab ( struct _DebugInfo* di )
477 {
478 UWord new_sz = di->loctab_used;
479 if (new_sz == di->loctab_size) return;
480 vg_assert(new_sz < di->loctab_size);
481 ML_(dinfo_shrink_block)( di->loctab, new_sz * sizeof(DiLoc));
482 ML_(dinfo_shrink_block)( di->loctab_fndn_ix, new_sz * di->sizeof_fndn_ix);
483 di->loctab_size = new_sz;
484 }
485
486 #define COMPLAIN_ONCE(what, limit, limit_op) \
487 { \
488 static Bool complained = False; \
489 if (!complained) { \
490 complained = True; \
491 VG_(message)(Vg_UserMsg, \
492 "warning: Can't handle " what " with " \
493 "line number %d " limit_op " than %d\n", \
494 lineno, limit); \
495 VG_(message)(Vg_UserMsg, \
496 "(Nb: this message is only shown once)\n"); \
497 } \
498 }
499
500
501 /* Top-level place to call to add a source-location mapping entry.
502 */
ML_(addLineInfo)503 void ML_(addLineInfo) ( struct _DebugInfo* di,
504 UInt fndn_ix,
505 Addr this,
506 Addr next,
507 Int lineno,
508 Int entry /* only needed for debug printing */
509 )
510 {
511 static const Bool debug = False;
512 DiLoc loc;
513 UWord size = next - this;
514
515 /* Ignore zero-sized locs */
516 if (this == next) return;
517
518 if (debug) {
519 FnDn *fndn = VG_(indexEltNumber) (di->fndnpool, fndn_ix);
520 VG_(printf)( " src ix %u %s %s line %d %#lx-%#lx\n",
521 fndn_ix,
522 fndn->dirname ? fndn->dirname : "(unknown)",
523 fndn->filename, lineno, this, next );
524 }
525
526 /* Maximum sanity checking. Some versions of GNU as do a shabby
527 * job with stabs entries; if anything looks suspicious, revert to
528 * a size of 1. This should catch the instruction of interest
529 * (since if using asm-level debug info, one instruction will
530 * correspond to one line, unlike with C-level debug info where
531 * multiple instructions can map to the one line), but avoid
532 * catching any other instructions bogusly. */
533 if (this > next) {
534 if (VG_(clo_verbosity) > 2) {
535 VG_(message)(Vg_DebugMsg,
536 "warning: line info addresses out of order "
537 "at entry %d: 0x%lx 0x%lx\n", entry, this, next);
538 }
539 size = 1;
540 }
541
542 if (size > MAX_LOC_SIZE) {
543 if (0)
544 VG_(message)(Vg_DebugMsg,
545 "warning: line info address range too large "
546 "at entry %d: %lu\n", entry, size);
547 size = 1;
548 }
549
550 /* At this point, we know that the original value for |size|, viz
551 |next - this|, will only still be used in the case where
552 |this| <u |next|, so it can't have underflowed. Considering
553 that and the three checks that follow it, the following must
554 hold. */
555 vg_assert(size >= 1);
556 vg_assert(size <= MAX_LOC_SIZE);
557
558 /* Rule out ones which are completely outside the r-x mapped area.
559 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
560 for background and rationale. */
561 vg_assert(di->fsm.have_rx_map && di->fsm.have_rw_map);
562 if (ML_(find_rx_mapping)(di, this, this + size - 1) == NULL) {
563 if (0)
564 VG_(message)(Vg_DebugMsg,
565 "warning: ignoring line info entry falling "
566 "outside current DebugInfo: %#lx %#lx %#lx %#lx\n",
567 di->text_avma,
568 di->text_avma + di->text_size,
569 this, this + size - 1);
570 return;
571 }
572
573 if (lineno < 0) {
574 COMPLAIN_ONCE("line info entry", 0, "smaller");
575 return;
576 }
577 if (lineno > MAX_LINENO) {
578 COMPLAIN_ONCE("line info entry", MAX_LINENO, "greater");
579 return;
580 }
581
582 loc.addr = this;
583 loc.size = (UShort)size;
584 loc.lineno = lineno;
585
586 if (0) VG_(message)(Vg_DebugMsg,
587 "addLoc: addr %#lx, size %lu, line %d, fndn_ix %u\n",
588 this,size,lineno,fndn_ix);
589
590 addLoc ( di, &loc, fndn_ix );
591 }
592
593 /* Add an inlined call info to the inlined call table.
594 */
addInl(struct _DebugInfo * di,DiInlLoc * inl)595 static void addInl ( struct _DebugInfo* di, DiInlLoc* inl )
596 {
597 UInt new_sz, i;
598 DiInlLoc* new_tab;
599
600 /* empty inl should have been ignored earlier */
601 vg_assert(inl->addr_lo < inl->addr_hi);
602
603 if (di->inltab_used == di->inltab_size) {
604 new_sz = 2 * di->inltab_size;
605 if (new_sz == 0) new_sz = 500;
606 new_tab = ML_(dinfo_zalloc)( "di.storage.addInl.1",
607 new_sz * sizeof(DiInlLoc) );
608 if (di->inltab != NULL) {
609 for (i = 0; i < di->inltab_used; i++)
610 new_tab[i] = di->inltab[i];
611 ML_(dinfo_free)(di->inltab);
612 }
613 di->inltab = new_tab;
614 di->inltab_size = new_sz;
615 }
616
617 di->inltab[di->inltab_used] = *inl;
618 if (inl->addr_hi - inl->addr_lo > di->maxinl_codesz)
619 di->maxinl_codesz = inl->addr_hi - inl->addr_lo;
620 di->inltab_used++;
621 vg_assert(di->inltab_used <= di->inltab_size);
622 }
623
624
625 /* Resize the InlTab (inlined call table) to save memory, by removing
626 (and, potentially, allowing m_mallocfree to unmap) any unused space
627 at the end of the table. */
shrinkInlTab(struct _DebugInfo * di)628 static void shrinkInlTab ( struct _DebugInfo* di )
629 {
630 UWord new_sz = di->inltab_used;
631 if (new_sz == di->inltab_size) return;
632 vg_assert(new_sz < di->inltab_size);
633 ML_(dinfo_shrink_block)( di->inltab, new_sz * sizeof(DiInlLoc));
634 di->inltab_size = new_sz;
635 }
636
637 /* Top-level place to call to add a addr-to-inlined fn info. */
ML_(addInlInfo)638 void ML_(addInlInfo) ( struct _DebugInfo* di,
639 Addr addr_lo, Addr addr_hi,
640 const HChar* inlinedfn,
641 UInt fndn_ix,
642 Int lineno, UShort level)
643 {
644 DiInlLoc inl;
645
646 /* Similar paranoia as in ML_(addLineInfo). Unclear if needed. */
647 if (addr_lo >= addr_hi) {
648 if (VG_(clo_verbosity) > 2) {
649 VG_(message)(Vg_DebugMsg,
650 "warning: inlined info addresses out of order "
651 "at: 0x%lx 0x%lx\n", addr_lo, addr_hi);
652 }
653 addr_hi = addr_lo + 1;
654 }
655
656 if (lineno < 0) {
657 COMPLAIN_ONCE ("inlined call info entry", 0, "smaller");
658 return;
659 }
660 if (lineno > MAX_LINENO) {
661 COMPLAIN_ONCE ("inlined call info entry", MAX_LINENO, "greater");
662 return;
663 }
664
665 // code resulting from inlining of inlinedfn:
666 inl.addr_lo = addr_lo;
667 inl.addr_hi = addr_hi;
668 inl.inlinedfn = inlinedfn;
669 // caller:
670 inl.fndn_ix = fndn_ix;
671 inl.lineno = lineno;
672 inl.level = level;
673
674 if (0) VG_(message)
675 (Vg_DebugMsg,
676 "addInlInfo: fn %s inlined as addr_lo %#lx,addr_hi %#lx,"
677 "caller fndn_ix %u %s:%d\n",
678 inlinedfn, addr_lo, addr_hi, fndn_ix,
679 ML_(fndn_ix2filename) (di, fndn_ix), lineno);
680
681 addInl ( di, &inl );
682 }
683
ML_(get_cfsi_m)684 DiCfSI_m* ML_(get_cfsi_m) (const DebugInfo* di, UInt pos)
685 {
686 UInt cfsi_m_ix;
687
688 vg_assert(pos >= 0 && pos < di->cfsi_used);
689 switch (di->sizeof_cfsi_m_ix) {
690 case 1: cfsi_m_ix = ((UChar*) di->cfsi_m_ix)[pos]; break;
691 case 2: cfsi_m_ix = ((UShort*) di->cfsi_m_ix)[pos]; break;
692 case 4: cfsi_m_ix = ((UInt*) di->cfsi_m_ix)[pos]; break;
693 default: vg_assert(0);
694 }
695 if (cfsi_m_ix == 0)
696 return NULL; // cfi hole
697 else
698 return VG_(indexEltNumber) (di->cfsi_m_pool, cfsi_m_ix);
699 }
700
701 /* Top-level place to call to add a CFI summary record. The supplied
702 DiCfSI_m is copied. */
ML_(addDiCfSI)703 void ML_(addDiCfSI) ( struct _DebugInfo* di,
704 Addr base, UInt len, DiCfSI_m* cfsi_m )
705 {
706 static const Bool debug = False;
707 UInt new_sz;
708 DiCfSI* new_tab;
709 SSizeT delta;
710 DebugInfoMapping* map;
711 DebugInfoMapping* map2;
712
713 if (debug) {
714 VG_(printf)("adding DiCfSI: ");
715 ML_(ppDiCfSI)(di->cfsi_exprs, base, len, cfsi_m);
716 }
717
718 /* sanity */
719 vg_assert(len > 0);
720 /* Issue a warning if LEN is unexpectedly large (exceeds 5 million).
721 The implication is you have a single procedure
722 with more than 5 million bytes of code. Which is pretty
723 unlikely. Either that, or the debuginfo reader is somehow
724 broken. 5 million is of course arbitrary; but it's big enough
725 to be bigger than the size of any plausible piece of code that
726 would fall within a single procedure. But occasionally it does
727 happen (c.f. BZ #339542). */
728 if (len >= 5000000)
729 VG_(message)(Vg_DebugMsg,
730 "warning: DiCfSI %#lx .. %#lx is huge; length = %u (%s)\n",
731 base, base + len - 1, len, di->soname);
732
733 vg_assert(di->fsm.have_rx_map && di->fsm.have_rw_map);
734 /* Find mapping where at least one end of the CFSI falls into. */
735 map = ML_(find_rx_mapping)(di, base, base);
736 map2 = ML_(find_rx_mapping)(di, base + len - 1,
737 base + len - 1);
738 if (map == NULL)
739 map = map2;
740 else if (map2 == NULL)
741 map2 = map;
742
743 /* Rule out ones which are completely outside the r-x mapped area
744 (or which span across different areas).
745 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
746 for background and rationale. */
747 if (map == NULL || map != map2) {
748 static Int complaints = 10;
749 if (VG_(clo_trace_cfi) || complaints > 0) {
750 complaints--;
751 if (VG_(clo_verbosity) > 1) {
752 VG_(message)(
753 Vg_DebugMsg,
754 "warning: DiCfSI %#lx .. %#lx outside mapped rx segments (%s)\n",
755 base,
756 base + len - 1,
757 di->soname
758 );
759 }
760 if (VG_(clo_trace_cfi))
761 ML_(ppDiCfSI)(di->cfsi_exprs, base, len, cfsi_m);
762 }
763 return;
764 }
765
766 /* Now we know the range is at least partially inside the r-x
767 mapped area. That implies that at least one of the ends of the
768 range falls inside the area. If necessary, clip it so it is
769 completely within the area. If we don't do this,
770 check_CFSI_related_invariants() in debuginfo.c (invariant #2)
771 will fail. See
772 "Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS" in
773 priv_storage.h for background. */
774 if (base < map->avma) {
775 /* Lower end is outside the mapped area. Hence upper end must
776 be inside it. */
777 if (0) VG_(printf)("XXX truncate lower\n");
778 vg_assert(base + len - 1 >= map->avma);
779 delta = (SSizeT)(map->avma - base);
780 vg_assert(delta > 0);
781 vg_assert(delta < (SSizeT)len);
782 base += delta;
783 len -= delta;
784 }
785 else
786 if (base + len - 1 > map->avma + map->size - 1) {
787 /* Upper end is outside the mapped area. Hence lower end must be
788 inside it. */
789 if (0) VG_(printf)("XXX truncate upper\n");
790 vg_assert(base <= map->avma + map->size - 1);
791 delta = (SSizeT)( (base + len - 1)
792 - (map->avma + map->size - 1) );
793 vg_assert(delta > 0);
794 vg_assert(delta < (SSizeT)len);
795 len -= delta;
796 }
797
798 /* Final checks */
799
800 /* Because: either cfsi was entirely inside the range, in which
801 case we asserted that len > 0 at the start, OR it fell partially
802 inside the range, in which case we reduced it by some size
803 (delta) which is < its original size. */
804 vg_assert(len > 0);
805
806 /* Similar logic applies for the next two assertions. */
807 vg_assert(base >= map->avma);
808 vg_assert(base + len - 1
809 <= map->avma + map->size - 1);
810
811 if (di->cfsi_used == di->cfsi_size) {
812 new_sz = 2 * di->cfsi_size;
813 if (new_sz == 0) new_sz = 20;
814 new_tab = ML_(dinfo_zalloc)( "di.storage.addDiCfSI.1",
815 new_sz * sizeof(DiCfSI) );
816 if (di->cfsi_rd != NULL) {
817 VG_(memcpy)(new_tab, di->cfsi_rd,
818 di->cfsi_used * sizeof(DiCfSI));
819 ML_(dinfo_free)(di->cfsi_rd);
820 }
821 di->cfsi_rd = new_tab;
822 di->cfsi_size = new_sz;
823 if (di->cfsi_m_pool == NULL)
824 di->cfsi_m_pool = VG_(newDedupPA)(1000 * sizeof(DiCfSI_m),
825 vg_alignof(DiCfSI_m),
826 ML_(dinfo_zalloc),
827 "di.storage.DiCfSI_m_pool",
828 ML_(dinfo_free));
829 }
830
831 di->cfsi_rd[di->cfsi_used].base = base;
832 di->cfsi_rd[di->cfsi_used].len = len;
833 di->cfsi_rd[di->cfsi_used].cfsi_m_ix
834 = VG_(allocFixedEltDedupPA)(di->cfsi_m_pool,
835 sizeof(DiCfSI_m),
836 cfsi_m);
837 di->cfsi_used++;
838 vg_assert(di->cfsi_used <= di->cfsi_size);
839 }
840
841
ML_(CfiExpr_Undef)842 Int ML_(CfiExpr_Undef)( XArray* dst )
843 {
844 CfiExpr e;
845 VG_(memset)( &e, 0, sizeof(e) );
846 e.tag = Cex_Undef;
847 return (Int)VG_(addToXA)( dst, &e );
848 }
ML_(CfiExpr_Deref)849 Int ML_(CfiExpr_Deref)( XArray* dst, Int ixAddr )
850 {
851 CfiExpr e;
852 VG_(memset)( &e, 0, sizeof(e) );
853 e.tag = Cex_Deref;
854 e.Cex.Deref.ixAddr = ixAddr;
855 return (Int)VG_(addToXA)( dst, &e );
856 }
ML_(CfiExpr_Const)857 Int ML_(CfiExpr_Const)( XArray* dst, UWord con )
858 {
859 CfiExpr e;
860 VG_(memset)( &e, 0, sizeof(e) );
861 e.tag = Cex_Const;
862 e.Cex.Const.con = con;
863 return (Int)VG_(addToXA)( dst, &e );
864 }
ML_(CfiExpr_Unop)865 Int ML_(CfiExpr_Unop)( XArray* dst, CfiUnop op, Int ix )
866 {
867 CfiExpr e;
868 VG_(memset)( &e, 0, sizeof(e) );
869 e.tag = Cex_Unop;
870 e.Cex.Unop.op = op;
871 e.Cex.Unop.ix = ix;
872 return (Int)VG_(addToXA)( dst, &e );
873 }
ML_(CfiExpr_Binop)874 Int ML_(CfiExpr_Binop)( XArray* dst, CfiBinop op, Int ixL, Int ixR )
875 {
876 CfiExpr e;
877 VG_(memset)( &e, 0, sizeof(e) );
878 e.tag = Cex_Binop;
879 e.Cex.Binop.op = op;
880 e.Cex.Binop.ixL = ixL;
881 e.Cex.Binop.ixR = ixR;
882 return (Int)VG_(addToXA)( dst, &e );
883 }
ML_(CfiExpr_CfiReg)884 Int ML_(CfiExpr_CfiReg)( XArray* dst, CfiReg reg )
885 {
886 CfiExpr e;
887 VG_(memset)( &e, 0, sizeof(e) );
888 e.tag = Cex_CfiReg;
889 e.Cex.CfiReg.reg = reg;
890 return (Int)VG_(addToXA)( dst, &e );
891 }
ML_(CfiExpr_DwReg)892 Int ML_(CfiExpr_DwReg)( XArray* dst, Int reg )
893 {
894 CfiExpr e;
895 VG_(memset)( &e, 0, sizeof(e) );
896 e.tag = Cex_DwReg;
897 e.Cex.DwReg.reg = reg;
898 return (Int)VG_(addToXA)( dst, &e );
899 }
900
ppCfiUnop(CfiUnop op)901 static void ppCfiUnop ( CfiUnop op )
902 {
903 switch (op) {
904 case Cunop_Abs: VG_(printf)("abs"); break;
905 case Cunop_Neg: VG_(printf)("-"); break;
906 case Cunop_Not: VG_(printf)("~"); break;
907 default: vg_assert(0);
908 }
909 }
910
ppCfiBinop(CfiBinop op)911 static void ppCfiBinop ( CfiBinop op )
912 {
913 switch (op) {
914 case Cbinop_Add: VG_(printf)("+"); break;
915 case Cbinop_Sub: VG_(printf)("-"); break;
916 case Cbinop_And: VG_(printf)("&"); break;
917 case Cbinop_Mul: VG_(printf)("*"); break;
918 case Cbinop_Shl: VG_(printf)("<<"); break;
919 case Cbinop_Shr: VG_(printf)(">>"); break;
920 case Cbinop_Eq: VG_(printf)("=="); break;
921 case Cbinop_Ge: VG_(printf)(">="); break;
922 case Cbinop_Gt: VG_(printf)(">"); break;
923 case Cbinop_Le: VG_(printf)("<="); break;
924 case Cbinop_Lt: VG_(printf)("<"); break;
925 case Cbinop_Ne: VG_(printf)("!="); break;
926 default: vg_assert(0);
927 }
928 }
929
ppCfiReg(CfiReg reg)930 static void ppCfiReg ( CfiReg reg )
931 {
932 switch (reg) {
933 case Creg_INVALID: VG_(printf)("Creg_INVALID"); break;
934 case Creg_IA_SP: VG_(printf)("xSP"); break;
935 case Creg_IA_BP: VG_(printf)("xBP"); break;
936 case Creg_IA_IP: VG_(printf)("xIP"); break;
937 case Creg_ARM_R13: VG_(printf)("R13"); break;
938 case Creg_ARM_R12: VG_(printf)("R12"); break;
939 case Creg_ARM_R15: VG_(printf)("R15"); break;
940 case Creg_ARM_R14: VG_(printf)("R14"); break;
941 case Creg_ARM_R7: VG_(printf)("R7"); break;
942 case Creg_ARM64_X30: VG_(printf)("X30"); break;
943 case Creg_MIPS_RA: VG_(printf)("RA"); break;
944 case Creg_S390_IA: VG_(printf)("IA"); break;
945 case Creg_S390_SP: VG_(printf)("SP"); break;
946 case Creg_S390_FP: VG_(printf)("FP"); break;
947 case Creg_S390_LR: VG_(printf)("LR"); break;
948 default: vg_assert(0);
949 }
950 }
951
ML_(ppCfiExpr)952 void ML_(ppCfiExpr)( const XArray* src, Int ix )
953 {
954 /* VG_(indexXA) checks for invalid src/ix values, so we can
955 use it indiscriminately. */
956 const CfiExpr* e = VG_(indexXA)( src, ix );
957 switch (e->tag) {
958 case Cex_Undef:
959 VG_(printf)("Undef");
960 break;
961 case Cex_Deref:
962 VG_(printf)("*(");
963 ML_(ppCfiExpr)(src, e->Cex.Deref.ixAddr);
964 VG_(printf)(")");
965 break;
966 case Cex_Const:
967 VG_(printf)("0x%lx", e->Cex.Const.con);
968 break;
969 case Cex_Unop:
970 ppCfiUnop(e->Cex.Unop.op);
971 VG_(printf)("(");
972 ML_(ppCfiExpr)(src, e->Cex.Unop.ix);
973 VG_(printf)(")");
974 break;
975 case Cex_Binop:
976 VG_(printf)("(");
977 ML_(ppCfiExpr)(src, e->Cex.Binop.ixL);
978 VG_(printf)(")");
979 ppCfiBinop(e->Cex.Binop.op);
980 VG_(printf)("(");
981 ML_(ppCfiExpr)(src, e->Cex.Binop.ixR);
982 VG_(printf)(")");
983 break;
984 case Cex_CfiReg:
985 ppCfiReg(e->Cex.CfiReg.reg);
986 break;
987 case Cex_DwReg:
988 VG_(printf)("dwr%d", e->Cex.DwReg.reg);
989 break;
990 default:
991 VG_(core_panic)("ML_(ppCfiExpr)");
992 /*NOTREACHED*/
993 break;
994 }
995 }
996
997
ML_(cmp_for_DiAddrRange_range)998 Word ML_(cmp_for_DiAddrRange_range) ( const void* keyV,
999 const void* elemV ) {
1000 const Addr* key = (const Addr*)keyV;
1001 const DiAddrRange* elem = (const DiAddrRange*)elemV;
1002 if (0)
1003 VG_(printf)("cmp_for_DiAddrRange_range: %#lx vs %#lx\n",
1004 *key, elem->aMin);
1005 if ((*key) < elem->aMin) return -1;
1006 if ((*key) > elem->aMax) return 1;
1007 return 0;
1008 }
1009
1010 static
show_scope(OSet * scope,const HChar * who)1011 void show_scope ( OSet* /* of DiAddrRange */ scope, const HChar* who )
1012 {
1013 DiAddrRange* range;
1014 VG_(printf)("Scope \"%s\" = {\n", who);
1015 VG_(OSetGen_ResetIter)( scope );
1016 while (True) {
1017 range = VG_(OSetGen_Next)( scope );
1018 if (!range) break;
1019 VG_(printf)(" %#lx .. %#lx: %ld vars\n", range->aMin, range->aMax,
1020 range->vars ? VG_(sizeXA)(range->vars) : 0);
1021 }
1022 VG_(printf)("}\n");
1023 }
1024
1025 /* Add the variable 'var' to 'scope' for the address range [aMin,aMax]
1026 (inclusive of aMin and aMax). Split existing ranges as required if
1027 aMin or aMax or both don't match existing range boundaries, and add
1028 'var' to all required ranges. Take great care to preserve the
1029 invariant that the ranges in 'scope' cover the entire address range
1030 exactly once, with no overlaps and no holes. */
add_var_to_arange(OSet * scope,Addr aMin,Addr aMax,DiVariable * var)1031 static void add_var_to_arange (
1032 /*MOD*/OSet* /* of DiAddrRange */ scope,
1033 Addr aMin,
1034 Addr aMax,
1035 DiVariable* var
1036 )
1037 {
1038 DiAddrRange *first, *last, *range;
1039 /* These xx variables are for assertion checking only; they don't
1040 contribute anything to the actual work of this function. */
1041 DiAddrRange *xxRangep, *xxFirst, *xxLast;
1042 UWord xxIters;
1043
1044 vg_assert(aMin <= aMax);
1045
1046 if (0) VG_(printf)("add_var_to_arange: %#lx .. %#lx\n", aMin, aMax);
1047 if (0) show_scope( scope, "add_var_to_arange(1)" );
1048
1049 /* See if the lower end of the range (aMin) falls exactly on an
1050 existing range boundary. If not, find the range it does fall
1051 into, and split it (copying the variables in the process), so
1052 that aMin does exactly fall on a range boundary. */
1053 first = VG_(OSetGen_Lookup)( scope, &aMin );
1054 /* It must be present, since the presented OSet must cover
1055 the entire address range. */
1056 vg_assert(first);
1057 vg_assert(first->aMin <= first->aMax);
1058 vg_assert(first->aMin <= aMin && aMin <= first->aMax);
1059
1060 /* Fast track common case, which is that the range specified for
1061 the variable exactly coincides with one already-existing
1062 range. */
1063 if (first->aMin == aMin && first->aMax == aMax) {
1064 vg_assert(first->vars);
1065 VG_(addToXA)( first->vars, var );
1066 return;
1067 }
1068
1069 /* We have to get into splitting ranges, which is complex
1070 and slow. */
1071 if (first->aMin < aMin) {
1072 DiAddrRange* nyu;
1073 /* Ok. We'll have to split 'first'. */
1074 /* truncate the upper end of 'first' */
1075 Addr tmp = first->aMax;
1076 first->aMax = aMin-1;
1077 vg_assert(first->aMin <= first->aMax);
1078 /* create a new range */
1079 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1080 nyu->aMin = aMin;
1081 nyu->aMax = tmp;
1082 vg_assert(nyu->aMin <= nyu->aMax);
1083 /* copy vars into it */
1084 vg_assert(first->vars);
1085 nyu->vars = VG_(cloneXA)( "di.storage.avta.1", first->vars );
1086 VG_(OSetGen_Insert)( scope, nyu );
1087 first = nyu;
1088 }
1089
1090 vg_assert(first->aMin == aMin);
1091
1092 /* Now do exactly the same for the upper end (aMax): if it doesn't
1093 fall on a boundary, cause it to do so by splitting the range it
1094 does currently fall into. */
1095 last = VG_(OSetGen_Lookup)( scope, &aMax );
1096 vg_assert(last->aMin <= last->aMax);
1097 vg_assert(last->aMin <= aMax && aMax <= last->aMax);
1098
1099 if (aMax < last->aMax) {
1100 DiAddrRange* nyu;
1101 /* We have to split 'last'. */
1102 /* truncate the lower end of 'last' */
1103 Addr tmp = last->aMin;
1104 last->aMin = aMax+1;
1105 vg_assert(last->aMin <= last->aMax);
1106 /* create a new range */
1107 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1108 nyu->aMin = tmp;
1109 nyu->aMax = aMax;
1110 vg_assert(nyu->aMin <= nyu->aMax);
1111 /* copy vars into it */
1112 vg_assert(last->vars);
1113 nyu->vars = VG_(cloneXA)( "di.storage.avta.2", last->vars );
1114 VG_(OSetGen_Insert)( scope, nyu );
1115 last = nyu;
1116 }
1117
1118 vg_assert(aMax == last->aMax);
1119
1120 xxFirst = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMin);
1121 xxLast = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMax);
1122 vg_assert(xxFirst);
1123 vg_assert(xxLast);
1124 vg_assert(xxFirst->aMin == aMin);
1125 vg_assert(xxLast->aMax == aMax);
1126 if (xxFirst != xxLast)
1127 vg_assert(xxFirst->aMax < xxLast->aMin);
1128
1129 /* Great. Now we merely need to iterate over the segments from
1130 'first' to 'last' inclusive, and add 'var' to the variable set
1131 of each of them. */
1132 if (0) {
1133 static UWord ctr = 0;
1134 ctr++;
1135 VG_(printf)("ctr = %lu\n", ctr);
1136 if (ctr >= 33263) show_scope( scope, "add_var_to_arange(2)" );
1137 }
1138
1139 xxIters = 0;
1140 range = xxRangep = NULL;
1141 VG_(OSetGen_ResetIterAt)( scope, &aMin );
1142 while (True) {
1143 xxRangep = range;
1144 range = VG_(OSetGen_Next)( scope );
1145 if (!range) break;
1146 if (range->aMin > aMax) break;
1147 xxIters++;
1148 if (0) VG_(printf)("have range %#lx %#lx\n",
1149 range->aMin, range->aMax);
1150
1151 /* Sanity checks */
1152 if (!xxRangep) {
1153 /* This is the first in the range */
1154 vg_assert(range->aMin == aMin);
1155 } else {
1156 vg_assert(xxRangep->aMax + 1 == range->aMin);
1157 }
1158
1159 vg_assert(range->vars);
1160 VG_(addToXA)( range->vars, var );
1161 }
1162 /* Done. We should have seen at least one range. */
1163 vg_assert(xxIters >= 1);
1164 if (xxIters == 1) vg_assert(xxFirst == xxLast);
1165 if (xxFirst == xxLast) vg_assert(xxIters == 1);
1166 vg_assert(xxRangep);
1167 vg_assert(xxRangep->aMax == aMax);
1168 vg_assert(xxRangep == xxLast);
1169 }
1170
1171
1172 /* Top-level place to call to add a variable description (as extracted
1173 from a DWARF3 .debug_info section. */
ML_(addVar)1174 void ML_(addVar)( struct _DebugInfo* di,
1175 Int level,
1176 Addr aMin,
1177 Addr aMax,
1178 const HChar* name, /* in di's .strpool */
1179 UWord typeR, /* a cuOff */
1180 const GExpr* gexpr,
1181 const GExpr* fbGX,
1182 UInt fndn_ix, /* where decl'd - may be zero.
1183 index in in di's .fndnpool */
1184 Int lineNo, /* where decl'd - may be zero */
1185 Bool show )
1186 {
1187 OSet* /* of DiAddrRange */ scope;
1188 DiVariable var;
1189 Bool all;
1190 TyEnt* ent;
1191 MaybeULong mul;
1192 const HChar* badness;
1193
1194 vg_assert(di && di->admin_tyents);
1195
1196 if (0) {
1197 VG_(printf)(" ML_(addVar): level %d %#lx-%#lx %s :: ",
1198 level, aMin, aMax, name );
1199 ML_(pp_TyEnt_C_ishly)( di->admin_tyents, typeR );
1200 VG_(printf)("\n Var=");
1201 ML_(pp_GX)(gexpr);
1202 VG_(printf)("\n");
1203 if (fbGX) {
1204 VG_(printf)(" FrB=");
1205 ML_(pp_GX)( fbGX );
1206 VG_(printf)("\n");
1207 } else {
1208 VG_(printf)(" FrB=none\n");
1209 }
1210 VG_(printf)("\n");
1211 }
1212
1213 vg_assert(level >= 0);
1214 vg_assert(aMin <= aMax);
1215 vg_assert(name);
1216 vg_assert(gexpr);
1217
1218 ent = ML_(TyEnts__index_by_cuOff)( di->admin_tyents, NULL, typeR);
1219 vg_assert(ent);
1220 vg_assert(ML_(TyEnt__is_type)(ent));
1221
1222 /* "Comment_Regarding_Text_Range_Checks" (is referred to elsewhere)
1223 ----------------------------------------------------------------
1224 Ignore any variables whose aMin .. aMax (that is, range of text
1225 addresses for which they actually exist) falls outside the text
1226 segment. Is this indicative of a bug in the reader? Maybe.
1227 (LATER): instead of restricting strictly to the .text segment,
1228 be a bit more relaxed, and accept any variable whose text range
1229 falls inside the r-x mapped area. This is useful because .text
1230 is not always the only instruction-carrying segment: others are:
1231 .init .plt __libc_freeres_fn and .fini. This implicitly assumes
1232 that those extra sections have the same bias as .text, but that
1233 seems a reasonable assumption to me. */
1234 /* This is assured us by top level steering logic in debuginfo.c,
1235 and it is re-checked at the start of
1236 ML_(read_elf_debug_info). */
1237 vg_assert(di->fsm.have_rx_map && di->fsm.have_rw_map);
1238 if (level > 0 && ML_(find_rx_mapping)(di, aMin, aMax) == NULL) {
1239 if (VG_(clo_verbosity) >= 0) {
1240 VG_(message)(Vg_DebugMsg,
1241 "warning: addVar: in range %#lx .. %#lx outside "
1242 "all rx mapped areas (%s)\n",
1243 aMin, aMax, name
1244 );
1245 }
1246 return;
1247 }
1248
1249 /* If the type's size is zero (which can mean unknown size), ignore
1250 it. We will never be able to actually relate a data address to
1251 a data object with zero size, so there's no point in storing
1252 info on it. On 32-bit platforms, also reject types whose size
1253 is 2^32 bytes or large. (It's amazing what junk shows up ..) */
1254 mul = ML_(sizeOfType)(di->admin_tyents, typeR);
1255
1256 badness = NULL;
1257 if (mul.b != True)
1258 badness = "unknown size";
1259 else if (mul.ul == 0)
1260 badness = "zero size ";
1261 else if (sizeof(void*) == 4 && mul.ul >= (1ULL<<32))
1262 badness = "implausibly large";
1263
1264 if (badness) {
1265 static Int complaints = 10;
1266 if (VG_(clo_verbosity) >= 2 && complaints > 0) {
1267 VG_(message)(Vg_DebugMsg, "warning: addVar: %s (%s)\n",
1268 badness, name );
1269 complaints--;
1270 }
1271 return;
1272 }
1273
1274 if (!di->varinfo) {
1275 di->varinfo = VG_(newXA)( ML_(dinfo_zalloc),
1276 "di.storage.addVar.1",
1277 ML_(dinfo_free),
1278 sizeof(OSet*) );
1279 }
1280
1281 vg_assert(level < 256); /* arbitrary; stay sane */
1282 /* Expand the top level array enough to map this level */
1283 while ( VG_(sizeXA)(di->varinfo) <= level ) {
1284 DiAddrRange* nyu;
1285 scope = VG_(OSetGen_Create)( offsetof(DiAddrRange,aMin),
1286 ML_(cmp_for_DiAddrRange_range),
1287 ML_(dinfo_zalloc), "di.storage.addVar.2",
1288 ML_(dinfo_free) );
1289 if (0) VG_(printf)("create: scope = %p, adding at %ld\n",
1290 scope, VG_(sizeXA)(di->varinfo));
1291 VG_(addToXA)( di->varinfo, &scope );
1292 /* Add a single range covering the entire address space. At
1293 level 0 we require this doesn't get split. At levels above 0
1294 we require that any additions to it cause it to get split.
1295 All of these invariants get checked both add_var_to_arange
1296 and after reading is complete, in canonicaliseVarInfo. */
1297 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
1298 nyu->aMin = (Addr)0;
1299 nyu->aMax = ~(Addr)0;
1300 nyu->vars = VG_(newXA)( ML_(dinfo_zalloc), "di.storage.addVar.3",
1301 ML_(dinfo_free),
1302 sizeof(DiVariable) );
1303 VG_(OSetGen_Insert)( scope, nyu );
1304 }
1305
1306 vg_assert( VG_(sizeXA)(di->varinfo) > level );
1307 scope = *(OSet**)VG_(indexXA)( di->varinfo, level );
1308 vg_assert(scope);
1309
1310 var.name = name;
1311 var.typeR = typeR;
1312 var.gexpr = gexpr;
1313 var.fbGX = fbGX;
1314 var.fndn_ix = fndn_ix;
1315 var.lineNo = lineNo;
1316
1317 all = aMin == (Addr)0 && aMax == ~(Addr)0;
1318 vg_assert(level == 0 ? all : !all);
1319
1320 add_var_to_arange( /*MOD*/scope, aMin, aMax, &var );
1321 }
1322
1323
1324 /* This really just checks the constructed data structure, as there is
1325 no canonicalisation to do. */
canonicaliseVarInfo(struct _DebugInfo * di)1326 static void canonicaliseVarInfo ( struct _DebugInfo* di )
1327 {
1328 Word i, nInThisScope;
1329
1330 if (!di->varinfo)
1331 return;
1332
1333 for (i = 0; i < VG_(sizeXA)(di->varinfo); i++) {
1334
1335 DiAddrRange *range, *rangep;
1336 OSet* scope = *(OSet**)VG_(indexXA)(di->varinfo, i);
1337 if (!scope) continue;
1338
1339 /* Deal with the global-scope case. */
1340 if (i == 0) {
1341 Addr zero = 0;
1342 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1343 range = VG_(OSetGen_Lookup)( scope, &zero );
1344 vg_assert(range);
1345 vg_assert(range->aMin == (Addr)0);
1346 vg_assert(range->aMax == ~(Addr)0);
1347 continue;
1348 }
1349
1350 /* All the rest of this is for the local-scope case. */
1351 /* iterate over all entries in 'scope' */
1352 nInThisScope = 0;
1353 rangep = NULL;
1354 VG_(OSetGen_ResetIter)(scope);
1355 while (True) {
1356 range = VG_(OSetGen_Next)(scope);
1357 if (!range) {
1358 /* We just saw the last one. There must have been at
1359 least one entry in the range. */
1360 vg_assert(rangep);
1361 vg_assert(rangep->aMax == ~(Addr)0);
1362 break;
1363 }
1364
1365 vg_assert(range->aMin <= range->aMax);
1366 vg_assert(range->vars);
1367
1368 if (!rangep) {
1369 /* This is the first entry in the range. */
1370 vg_assert(range->aMin == 0);
1371 } else {
1372 vg_assert(rangep->aMax + 1 == range->aMin);
1373 }
1374
1375 rangep = range;
1376 nInThisScope++;
1377 } /* iterating over ranges in a given scope */
1378
1379 /* If there's only one entry in this (local) scope, it must
1380 cover the entire address space (obviously), but it must not
1381 contain any vars. */
1382
1383 vg_assert(nInThisScope > 0);
1384 if (nInThisScope == 1) {
1385 Addr zero = 0;
1386 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1387 range = VG_(OSetGen_Lookup)( scope, &zero );
1388 vg_assert(range);
1389 vg_assert(range->aMin == (Addr)0);
1390 vg_assert(range->aMax == ~(Addr)0);
1391 vg_assert(range->vars);
1392 vg_assert(VG_(sizeXA)(range->vars) == 0);
1393 }
1394
1395 } /* iterate over scopes */
1396 }
1397
1398
1399 /*------------------------------------------------------------*/
1400 /*--- Canonicalisers ---*/
1401 /*------------------------------------------------------------*/
1402
1403 /* Sort the symtab by starting address, and emit warnings if any
1404 symbols have overlapping address ranges. We use that old chestnut,
1405 shellsort. Mash the table around so as to establish the property
1406 that addresses are in order and the ranges to not overlap. This
1407 facilitates using binary search to map addresses to symbols when we
1408 come to query the table.
1409 */
compare_DiSym(const void * va,const void * vb)1410 static Int compare_DiSym ( const void* va, const void* vb )
1411 {
1412 const DiSym* a = va;
1413 const DiSym* b = vb;
1414 if (a->avmas.main < b->avmas.main) return -1;
1415 if (a->avmas.main > b->avmas.main) return 1;
1416 return 0;
1417 }
1418
1419
1420 /* An address is associated with more than one name. Which do we
1421 prefer as the "display" name (that we show the user in stack
1422 traces)? In order:
1423
1424 - Prefer "PMPI_<foo>" over "MPI_<foo>".
1425
1426 - Else, prefer a non-empty name over an empty one.
1427
1428 - Else, prefer a non-whitespace name over an all-whitespace name.
1429
1430 - Else, prefer the shorter symbol name. If the symbol contains a
1431 version symbol ('@' on Linux, other platforms may differ), which means it
1432 is versioned, then the length up to the version symbol is used for length
1433 comparison purposes (so "foo@GLIBC_2.4.2" is considered shorter than
1434 "foobar").
1435
1436 - Else, if two symbols have the same length, prefer a versioned symbol over
1437 a non-versioned symbol.
1438
1439 - Else, use alphabetical ordering.
1440
1441 - Otherwise, they must be the same; use the name with the lower address.
1442
1443 Very occasionally this goes wrong (eg. 'memcmp' and 'bcmp' are
1444 aliases in glibc, we choose the 'bcmp' symbol because it's shorter,
1445 so we can misdescribe memcmp() as bcmp()). This is hard to avoid.
1446 It's mentioned in the FAQ file.
1447
1448 Returned value is True if a_name is preferred, False if b_name is
1449 preferred.
1450 */
1451 static
preferName(const DebugInfo * di,const HChar * a_name,const HChar * b_name,Addr sym_avma)1452 Bool preferName ( const DebugInfo* di,
1453 const HChar* a_name, const HChar* b_name,
1454 Addr sym_avma/*exposition only*/ )
1455 {
1456 Word cmp;
1457 Word vlena, vlenb; /* length without version */
1458 const HChar *vpa, *vpb;
1459
1460 Bool preferA = False;
1461 Bool preferB = False;
1462
1463 vg_assert(a_name);
1464 vg_assert(b_name);
1465 // vg_assert(a_name != b_name);
1466 // ???? now the pointers can be equal but is that
1467 // ???? not the indication of a latent bug ????
1468
1469 vlena = VG_(strlen)(a_name);
1470 vlenb = VG_(strlen)(b_name);
1471
1472 # if defined(VGO_linux) || defined(VGO_solaris)
1473 # define VERSION_CHAR '@'
1474 # elif defined(VGO_darwin)
1475 # define VERSION_CHAR '$'
1476 # else
1477 # error Unknown OS
1478 # endif
1479
1480 vpa = VG_(strchr)(a_name, VERSION_CHAR);
1481 vpb = VG_(strchr)(b_name, VERSION_CHAR);
1482
1483 # undef VERSION_CHAR
1484
1485 if (vpa)
1486 vlena = vpa - a_name;
1487 if (vpb)
1488 vlenb = vpb - b_name;
1489
1490 /* MPI hack: prefer PMPI_Foo over MPI_Foo */
1491 if (0==VG_(strncmp)(a_name, "MPI_", 4)
1492 && 0==VG_(strncmp)(b_name, "PMPI_", 5)
1493 && 0==VG_(strcmp)(a_name, 1+b_name)) {
1494 preferB = True; goto out;
1495 }
1496 if (0==VG_(strncmp)(b_name, "MPI_", 4)
1497 && 0==VG_(strncmp)(a_name, "PMPI_", 5)
1498 && 0==VG_(strcmp)(b_name, 1+a_name)) {
1499 preferA = True; goto out;
1500 }
1501
1502 /* Prefer non-empty name. */
1503 if (vlena && !vlenb) {
1504 preferA = True; goto out;
1505 }
1506 if (vlenb && !vlena) {
1507 preferB = True; goto out;
1508 }
1509
1510 /* Prefer non-whitespace name. */
1511 {
1512 Bool blankA = True;
1513 Bool blankB = True;
1514 const HChar *s;
1515 s = a_name;
1516 while (*s) {
1517 if (!VG_(isspace)(*s++)) {
1518 blankA = False;
1519 break;
1520 }
1521 }
1522 s = b_name;
1523 while (*s) {
1524 if (!VG_(isspace)(*s++)) {
1525 blankB = False;
1526 break;
1527 }
1528 }
1529
1530 if (!blankA && blankB) {
1531 preferA = True; goto out;
1532 }
1533 if (!blankB && blankA) {
1534 preferB = True; goto out;
1535 }
1536 }
1537
1538 /* Select the shortest unversioned name */
1539 if (vlena < vlenb) {
1540 preferA = True; goto out;
1541 }
1542 if (vlenb < vlena) {
1543 preferB = True; goto out;
1544 }
1545
1546 /* Equal lengths; select the versioned name */
1547 if (vpa && !vpb) {
1548 preferA = True; goto out;
1549 }
1550 if (vpb && !vpa) {
1551 preferB = True; goto out;
1552 }
1553
1554 /* Either both versioned or neither is versioned; select them
1555 alphabetically */
1556 cmp = VG_(strcmp)(a_name, b_name);
1557 if (cmp < 0) {
1558 preferA = True; goto out;
1559 }
1560 if (cmp > 0) {
1561 preferB = True; goto out;
1562 }
1563
1564 /* If we get here, they are the same name. */
1565
1566 /* In this case we could choose either (arbitrarily), but might as
1567 well choose the one with the lowest DiSym* address, so as to try
1568 and make the comparison mechanism more stable (a la sorting
1569 parlance). Also, skip the diagnostic printing in this case. */
1570 return a_name <= b_name ? True : False;
1571
1572 /*NOTREACHED*/
1573 vg_assert(0);
1574 out:
1575 if (preferA && !preferB) {
1576 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1577 sym_avma, a_name, b_name );
1578 return True;
1579 }
1580 if (preferB && !preferA) {
1581 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1582 sym_avma, b_name, a_name );
1583 return False;
1584 }
1585 /*NOTREACHED*/
1586 vg_assert(0);
1587 }
1588
1589
1590 /* Add the names in FROM to the names in TO. */
1591 static
add_DiSym_names_to_from(const DebugInfo * di,DiSym * to,const DiSym * from)1592 void add_DiSym_names_to_from ( const DebugInfo* di, DiSym* to,
1593 const DiSym* from )
1594 {
1595 vg_assert(to->pri_name);
1596 vg_assert(from->pri_name);
1597 /* Figure out how many names there will be in the new combined
1598 secondary vector. */
1599 const HChar** to_sec = to->sec_names;
1600 const HChar** from_sec = from->sec_names;
1601 Word n_new_sec = 1;
1602 if (from_sec) {
1603 while (*from_sec) {
1604 n_new_sec++;
1605 from_sec++;
1606 }
1607 }
1608 if (to_sec) {
1609 while (*to_sec) {
1610 n_new_sec++;
1611 to_sec++;
1612 }
1613 }
1614 if (0)
1615 TRACE_SYMTAB("merge: -> %ld\n", n_new_sec);
1616 /* Create the new sec and copy stuff into it, putting the new
1617 entries at the end. */
1618 const HChar** new_sec = ML_(dinfo_zalloc)( "di.storage.aDntf.1",
1619 (n_new_sec+1) * sizeof(HChar*) );
1620 from_sec = from->sec_names;
1621 to_sec = to->sec_names;
1622 Word i = 0;
1623 if (to_sec) {
1624 while (*to_sec) {
1625 new_sec[i++] = *to_sec;
1626 to_sec++;
1627 }
1628 }
1629 new_sec[i++] = from->pri_name;
1630 if (from_sec) {
1631 while (*from_sec) {
1632 new_sec[i++] = *from_sec;
1633 from_sec++;
1634 }
1635 }
1636 vg_assert(i == n_new_sec);
1637 vg_assert(new_sec[i] == NULL);
1638 /* If we're replacing an existing secondary vector, free it. */
1639 if (to->sec_names) {
1640 ML_(dinfo_free)(to->sec_names);
1641 }
1642 to->sec_names = new_sec;
1643 }
1644
1645
canonicaliseSymtab(struct _DebugInfo * di)1646 static void canonicaliseSymtab ( struct _DebugInfo* di )
1647 {
1648 Word i, j, n_truncated;
1649 Addr sta1, sta2, end1, end2, toc1, toc2;
1650 const HChar *pri1, *pri2, **sec1, **sec2;
1651 Bool ist1, ist2, isf1, isf2, isg1, isg2;
1652
1653 # define SWAP(ty,aa,bb) \
1654 do { ty tt = (aa); (aa) = (bb); (bb) = tt; } while (0)
1655
1656 if (di->symtab_used == 0)
1657 return;
1658
1659 /* Check initial invariants */
1660 for (i = 0; i < di->symtab_used; i++) {
1661 DiSym* sym = &di->symtab[i];
1662 vg_assert(sym->pri_name);
1663 vg_assert(!sym->sec_names);
1664 }
1665
1666 /* Sort by address. */
1667 VG_(ssort)(di->symtab, di->symtab_used,
1668 sizeof(*di->symtab), compare_DiSym);
1669
1670 cleanup_more:
1671
1672 /* BEGIN Detect and "fix" identical address ranges. */
1673 while (1) {
1674 Word r, w, n_merged;
1675 n_merged = 0;
1676 w = 0;
1677 /* A pass merging entries together in the case where they agree
1678 on .isText -- that is, either: both are .isText or both are
1679 not .isText. They are merged into a single entry, but both
1680 sets of names are preserved, so we end up knowing all the
1681 names for that particular address range.*/
1682 for (r = 1; r < di->symtab_used; r++) {
1683 vg_assert(w < r);
1684 if ( di->symtab[w].avmas.main == di->symtab[r].avmas.main
1685 && di->symtab[w].size == di->symtab[r].size
1686 && !!di->symtab[w].isText == !!di->symtab[r].isText) {
1687 /* merge the two into one */
1688 n_merged++;
1689 /* Add r names to w if r has secondary names
1690 or r and w primary names differ. */
1691 if (di->symtab[r].sec_names
1692 || (0 != VG_(strcmp)(di->symtab[r].pri_name,
1693 di->symtab[w].pri_name))) {
1694 add_DiSym_names_to_from(di, &di->symtab[w], &di->symtab[r]);
1695 }
1696 /* mark w as an IFunc if either w or r are */
1697 di->symtab[w].isIFunc = di->symtab[w].isIFunc || di->symtab[r].isIFunc;
1698 /* likewise for global symbols */
1699 di->symtab[w].isGlobal = di->symtab[w].isGlobal || di->symtab[r].isGlobal;
1700 /* and use ::pri_names to indicate this slot is no longer in use */
1701 di->symtab[r].pri_name = NULL;
1702 if (di->symtab[r].sec_names) {
1703 ML_(dinfo_free)(di->symtab[r].sec_names);
1704 di->symtab[r].sec_names = NULL;
1705 }
1706 /* Completely zap the entry -- paranoia to make it more
1707 likely we'll notice if we inadvertently use it
1708 again. */
1709 VG_(memset)(&di->symtab[r], 0, sizeof(DiSym));
1710 } else {
1711 w = r;
1712 }
1713 }
1714
1715 /* A second pass merging entries together where one .isText but
1716 the other isn't. In such cases, just ignore the non-.isText
1717 one (a heuristic hack.) */
1718 for (r = 1; r < di->symtab_used; r++) {
1719 /* Either of the symbols might already have been zapped by
1720 the previous pass, so we first have to check that. */
1721 if (di->symtab[r-1].pri_name == NULL) continue;
1722 if (di->symtab[r-0].pri_name == NULL) continue;
1723 /* ok, they are both still valid. Identical address ranges? */
1724 if (di->symtab[r-1].avmas.main != di->symtab[r-0].avmas.main) continue;
1725 if (di->symtab[r-1].size != di->symtab[r-0].size) continue;
1726 /* Identical address ranges. They must disagree on .isText
1727 since if they agreed, the previous pass would have merged
1728 them. */
1729 if (di->symtab[r-1].isText && di->symtab[r-0].isText) vg_assert(0);
1730 if (!di->symtab[r-1].isText && !di->symtab[r-0].isText) vg_assert(0);
1731 Word to_zap = di->symtab[r-1].isText ? (r-0) : (r-1);
1732 Word to_keep = di->symtab[r-1].isText ? (r-1) : (r-0);
1733 vg_assert(!di->symtab[to_zap].isText);
1734 vg_assert(di->symtab[to_keep].isText);
1735 /* Add to_zap's names to to_keep if to_zap has secondary names
1736 or to_zap's and to_keep's primary names differ. */
1737 if (di->symtab[to_zap].sec_names
1738 || (0 != VG_(strcmp)(di->symtab[to_zap].pri_name,
1739 di->symtab[to_keep].pri_name))) {
1740 add_DiSym_names_to_from(di, &di->symtab[to_keep],
1741 &di->symtab[to_zap]);
1742 }
1743 /* Mark to_zap as not-in use in the same way as in the
1744 previous loop. */
1745 di->symtab[to_zap].pri_name = NULL;
1746 if (di->symtab[to_zap].sec_names) {
1747 ML_(dinfo_free)(di->symtab[to_zap].sec_names);
1748 di->symtab[to_zap].sec_names = NULL;
1749 }
1750 VG_(memset)(&di->symtab[to_zap], 0, sizeof(DiSym));
1751 n_merged++;
1752 }
1753
1754 TRACE_SYMTAB( "canonicaliseSymtab: %ld symbols merged\n", n_merged);
1755 if (n_merged == 0)
1756 break;
1757 /* Now a pass to squeeze out any unused ones */
1758 w = 0;
1759 for (r = 0; r < di->symtab_used; r++) {
1760 vg_assert(w <= r);
1761 if (di->symtab[r].pri_name == NULL)
1762 continue;
1763 if (w < r) {
1764 di->symtab[w] = di->symtab[r];
1765 }
1766 w++;
1767 }
1768 vg_assert(w + n_merged == di->symtab_used);
1769 di->symtab_used = w;
1770 } /* while (1) */
1771 /* END Detect and "fix" identical address ranges. */
1772
1773 /* BEGIN Detect and "fix" overlapping address ranges. */
1774 n_truncated = 0;
1775
1776 for (i = 0; i < ((Word)di->symtab_used) -1; i++) {
1777
1778 vg_assert(di->symtab[i].avmas.main <= di->symtab[i+1].avmas.main);
1779
1780 /* Check for common (no overlap) case. */
1781 if (di->symtab[i].avmas.main + di->symtab[i].size
1782 <= di->symtab[i+1].avmas.main)
1783 continue;
1784
1785 /* There's an overlap. Truncate one or the other. */
1786 if (di->trace_symtab) {
1787 VG_(printf)("overlapping address ranges in symbol table\n\t");
1788 ML_(ppSym)( i, &di->symtab[i] );
1789 VG_(printf)("\t");
1790 ML_(ppSym)( i+1, &di->symtab[i+1] );
1791 VG_(printf)("\n");
1792 }
1793
1794 /* Truncate one or the other. */
1795 sta1 = di->symtab[i].avmas.main;
1796 end1 = sta1 + di->symtab[i].size - 1;
1797 toc1 = GET_TOCPTR_AVMA(di->symtab[i].avmas);
1798 // aren't we missing local_ep here ????
1799 pri1 = di->symtab[i].pri_name;
1800 sec1 = di->symtab[i].sec_names;
1801 ist1 = di->symtab[i].isText;
1802 isf1 = di->symtab[i].isIFunc;
1803 isg1 = di->symtab[i].isGlobal;
1804
1805 sta2 = di->symtab[i+1].avmas.main;
1806 end2 = sta2 + di->symtab[i+1].size - 1;
1807 toc2 = GET_TOCPTR_AVMA(di->symtab[i+1].avmas);
1808 // aren't we missing local_ep here ????
1809 pri2 = di->symtab[i+1].pri_name;
1810 sec2 = di->symtab[i+1].sec_names;
1811 ist2 = di->symtab[i+1].isText;
1812 isf2 = di->symtab[i+1].isIFunc;
1813 isg2 = di->symtab[i+1].isGlobal;
1814
1815 if (sta1 < sta2) {
1816 end1 = sta2 - 1;
1817 } else {
1818 vg_assert(sta1 == sta2);
1819 if (end1 > end2) {
1820 sta1 = end2 + 1;
1821 SWAP(Addr,sta1,sta2); SWAP(Addr,end1,end2); SWAP(Addr,toc1,toc2);
1822 SWAP(const HChar*,pri1,pri2); SWAP(const HChar**,sec1,sec2);
1823 SWAP(Bool,ist1,ist2); SWAP(Bool,isf1,isf2); SWAP(Bool, isg1, isg2);
1824 } else
1825 if (end1 < end2) {
1826 sta2 = end1 + 1;
1827 } else {
1828 /* end1 == end2. Identical addr ranges. We'll eventually wind
1829 up back at cleanup_more, which will take care of it. */
1830 }
1831 }
1832 di->symtab[i].avmas.main = sta1;
1833 di->symtab[i].size = end1 - sta1 + 1;
1834 SET_TOCPTR_AVMA(di->symtab[i].avmas, toc1);
1835 // missing local_ep ???
1836 di->symtab[i].pri_name = pri1;
1837 di->symtab[i].sec_names = sec1;
1838 di->symtab[i].isText = ist1;
1839 di->symtab[i].isIFunc = isf1;
1840 di->symtab[i].isGlobal = isg1;
1841
1842 di->symtab[i+1].avmas.main = sta2;
1843 di->symtab[i+1].size = end2 - sta2 + 1;
1844 SET_TOCPTR_AVMA(di->symtab[i+1].avmas, toc2);
1845 // missing local_ep ???
1846 di->symtab[i+1].pri_name = pri2;
1847 di->symtab[i+1].sec_names = sec2;
1848 di->symtab[i+1].isText = ist2;
1849 di->symtab[i+1].isIFunc = isf2;
1850 di->symtab[i+1].isGlobal = isg2;
1851
1852 vg_assert(sta1 <= sta2);
1853 vg_assert(di->symtab[i].size > 0);
1854 vg_assert(di->symtab[i+1].size > 0);
1855 /* It may be that the i+1 entry now needs to be moved further
1856 along to maintain the address order requirement. */
1857 j = i+1;
1858 while (j < ((Word)di->symtab_used)-1
1859 && di->symtab[j].avmas.main > di->symtab[j+1].avmas.main) {
1860 SWAP(DiSym,di->symtab[j],di->symtab[j+1]);
1861 j++;
1862 }
1863 n_truncated++;
1864 }
1865 /* END Detect and "fix" overlapping address ranges. */
1866
1867 if (n_truncated > 0) goto cleanup_more;
1868
1869 /* Ensure relevant postconditions hold. */
1870 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1871 /* No zero-sized symbols. */
1872 vg_assert(di->symtab[i].size > 0);
1873 /* In order. */
1874 vg_assert(di->symtab[i].avmas.main < di->symtab[i+1].avmas.main);
1875 /* No overlaps. */
1876 vg_assert(di->symtab[i].avmas.main + di->symtab[i].size - 1
1877 < di->symtab[i+1].avmas.main);
1878 /* Names are sane(ish) */
1879 vg_assert(di->symtab[i].pri_name);
1880 if (di->symtab[i].sec_names) {
1881 vg_assert(di->symtab[i].sec_names[0]);
1882 }
1883 }
1884
1885 /* For each symbol that has more than one name, use preferName to
1886 select the primary name. This is a complete kludge in that
1887 doing it properly requires making a total ordering on the
1888 candidate names, whilst what we have to work with is an ad-hoc
1889 binary relation (preferName) that certainly doesn't have the
1890 relevant transitivity etc properties that are needed to induce a
1891 legitimate total order. Doesn't matter though if it doesn't
1892 always work right since this is only used to generate names to
1893 show the user. */
1894 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1895 DiSym* sym = &di->symtab[i];
1896 const HChar** sec = sym->sec_names;
1897 if (!sec)
1898 continue;
1899 /* Slow but simple. Copy all the cands into a temp array,
1900 choose the primary name, and copy them all back again. */
1901 Word n_tmp = 1;
1902 while (*sec) { n_tmp++; sec++; }
1903 j = 0;
1904 const HChar** tmp = ML_(dinfo_zalloc)( "di.storage.cS.1",
1905 (n_tmp+1) * sizeof(HChar*) );
1906 tmp[j++] = sym->pri_name;
1907 sec = sym->sec_names;
1908 while (*sec) { tmp[j++] = *sec; sec++; }
1909 vg_assert(j == n_tmp);
1910 vg_assert(tmp[n_tmp] == NULL); /* because of zalloc */
1911 /* Choose the most favoured. */
1912 Word best = 0;
1913 for (j = 1; j < n_tmp; j++) {
1914 if (preferName(di, tmp[best], tmp[j], di->symtab[i].avmas.main)) {
1915 /* best is unchanged */
1916 } else {
1917 best = j;
1918 }
1919 }
1920 vg_assert(best >= 0 && best < n_tmp);
1921 /* Copy back */
1922 sym->pri_name = tmp[best];
1923 const HChar** cursor = sym->sec_names;
1924 for (j = 0; j < n_tmp; j++) {
1925 if (j == best)
1926 continue;
1927 *cursor = tmp[j];
1928 cursor++;
1929 }
1930 vg_assert(*cursor == NULL);
1931 ML_(dinfo_free)( tmp );
1932 }
1933
1934 # undef SWAP
1935 }
1936
1937
1938 static DiLoc* sorting_loctab = NULL;
compare_DiLoc_via_ix(const void * va,const void * vb)1939 static Int compare_DiLoc_via_ix ( const void* va, const void* vb )
1940 {
1941 const DiLoc* a = &sorting_loctab[*(const UInt*)va];
1942 const DiLoc* b = &sorting_loctab[*(const UInt*)vb];
1943 if (a->addr < b->addr) return -1;
1944 if (a->addr > b->addr) return 1;
1945 return 0;
1946 }
sort_loctab_and_loctab_fndn_ix(struct _DebugInfo * di)1947 static void sort_loctab_and_loctab_fndn_ix (struct _DebugInfo* di )
1948 {
1949 /* We have to sort the array loctab by addr
1950 together with its "parallel" array loctab_fndn_ix.
1951 We first build sort_ix : an array of indexes in loctab,
1952 that we sort by loctab address. Then we can reorder both
1953 arrays according to sort_ix. */
1954 UInt *sort_ix = ML_(dinfo_zalloc)("di.storage.six",
1955 di->loctab_used*sizeof(UInt));
1956 Word i, j, k;
1957
1958 for (i = 0; i < di->loctab_used; i++) sort_ix[i] = i;
1959 sorting_loctab = di->loctab;
1960 VG_(ssort)(sort_ix, di->loctab_used,
1961 sizeof(*sort_ix), compare_DiLoc_via_ix);
1962 sorting_loctab = NULL;
1963
1964 // Permute in place, using the sort_ix.
1965 for (i=0; i < di->loctab_used; i++) {
1966 DiLoc tmp_diloc;
1967 UInt tmp_fndn_ix;
1968
1969 if (i == sort_ix[i])
1970 continue; // i already at the good place
1971
1972 tmp_diloc = di->loctab[i];
1973 tmp_fndn_ix = ML_(fndn_ix)(di, i);
1974 j = i;
1975 for (;;) {
1976 k = sort_ix[j];
1977 sort_ix[j] = j;
1978 if (k == i)
1979 break;
1980 di->loctab[j] = di->loctab[k];
1981 set_fndn_ix (di, j, ML_(fndn_ix)(di, k));
1982 j = k;
1983 }
1984 di->loctab[j] = tmp_diloc;
1985 set_fndn_ix (di, j, tmp_fndn_ix);
1986 }
1987 ML_(dinfo_free)(sort_ix);
1988 }
1989
1990 /* Sort the location table by starting address. Mash the table around
1991 so as to establish the property that addresses are in order and the
1992 ranges do not overlap. This facilitates using binary search to map
1993 addresses to locations when we come to query the table.
1994 */
canonicaliseLoctab(struct _DebugInfo * di)1995 static void canonicaliseLoctab ( struct _DebugInfo* di )
1996 {
1997 Word i, j;
1998
1999 if (di->loctab_used == 0)
2000 return;
2001
2002 /* sort loctab and loctab_fndn_ix by addr. */
2003 sort_loctab_and_loctab_fndn_ix (di);
2004
2005 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
2006 vg_assert(di->loctab[i].size < 10000);
2007 /* If two adjacent entries overlap, truncate the first. */
2008 if (di->loctab[i].addr + di->loctab[i].size > di->loctab[i+1].addr) {
2009 /* Do this in signed int32 because the actual .size fields
2010 are only 12 bits. */
2011 Int new_size = di->loctab[i+1].addr - di->loctab[i].addr;
2012 TRACE_LOCTAB_CANON ("Truncating",
2013 &(di->loctab[i]), &(di->loctab[i+1]));
2014 if (new_size < 0) {
2015 di->loctab[i].size = 0;
2016 } else
2017 if (new_size > MAX_LOC_SIZE) {
2018 di->loctab[i].size = MAX_LOC_SIZE;
2019 } else {
2020 di->loctab[i].size = (UShort)new_size;
2021 }
2022 }
2023 }
2024
2025 /* Zap any zero-sized entries resulting from the truncation
2026 process. */
2027 j = 0;
2028 for (i = 0; i < (Word)di->loctab_used; i++) {
2029 if (di->loctab[i].size > 0) {
2030 if (j != i) {
2031 di->loctab[j] = di->loctab[i];
2032 set_fndn_ix(di, j, ML_(fndn_ix)(di, i));
2033 }
2034 j++;
2035 }
2036 }
2037 di->loctab_used = j;
2038
2039 /* Ensure relevant postconditions hold. */
2040 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
2041 if (0)
2042 VG_(printf)("%ld 0x%p lno:%d sz:%d fndn_ix:%u i+1 0x%p\n",
2043 i,
2044 (void*)di->loctab[i].addr,
2045 di->loctab[i].lineno,
2046 di->loctab[i].size,
2047 ML_(fndn_ix)(di, i),
2048 (void*)di->loctab[i+1].addr);
2049
2050 /* No zero-sized symbols. */
2051 vg_assert(di->loctab[i].size > 0);
2052 /* In order. */
2053 vg_assert(di->loctab[i].addr < di->loctab[i+1].addr);
2054 /* No overlaps. */
2055 vg_assert(di->loctab[i].addr + di->loctab[i].size - 1
2056 < di->loctab[i+1].addr);
2057 }
2058
2059 /* Free up unused space at the end of the table. */
2060 shrinkLocTab(di);
2061 }
2062
2063 /* Sort the inlined call table by starting address. Mash the table around
2064 so as to establish the property that addresses are in order.
2065 This facilitates using binary search to map addresses to locations when
2066 we come to query the table.
2067 Note : ranges can overlap, multiple ranges can start at an address,
2068 multiple ranges can end at an address.
2069 */
compare_DiInlLoc(const void * va,const void * vb)2070 static Int compare_DiInlLoc ( const void* va, const void* vb )
2071 {
2072 const DiInlLoc* a = va;
2073 const DiInlLoc* b = vb;
2074 if (a->addr_lo < b->addr_lo) return -1;
2075 if (a->addr_lo > b->addr_lo) return 1;
2076 return 0;
2077 }
2078
canonicaliseInltab(struct _DebugInfo * di)2079 static void canonicaliseInltab ( struct _DebugInfo* di )
2080 {
2081 Word i;
2082
2083 if (di->inltab_used == 0)
2084 return;
2085
2086 /* Sort by start address. */
2087 VG_(ssort)(di->inltab, di->inltab_used,
2088 sizeof(*di->inltab), compare_DiInlLoc);
2089
2090 /* Ensure relevant postconditions hold. */
2091 for (i = 0; i < ((Word)di->inltab_used)-1; i++) {
2092 /* No zero-sized inlined call. */
2093 vg_assert(di->inltab[i].addr_lo < di->inltab[i].addr_hi);
2094 /* In order, but we can have duplicates and overlapping ranges. */
2095 vg_assert(di->inltab[i].addr_lo <= di->inltab[i+1].addr_lo);
2096 }
2097
2098 /* Free up unused space at the end of the table. */
2099 shrinkInlTab(di);
2100 }
2101
2102
2103 /* Sort the call-frame-info cfsi_rd by starting address. Mash the table
2104 around so as to establish the property that addresses are in order
2105 and the ranges do not overlap. This facilitates using binary
2106 search to map addresses to locations when we come to query the
2107 table.
2108
2109 Also, set cfisi_minaddr and cfisi_maxaddr to be the min and max of
2110 any of the address ranges contained in cfisi[0 .. cfisi_used-1], so
2111 as to facilitate rapidly skipping this SegInfo when looking for an
2112 address which falls outside that range.
2113 */
compare_DiCfSI(const void * va,const void * vb)2114 static Int compare_DiCfSI ( const void* va, const void* vb )
2115 {
2116 const DiCfSI* a = va;
2117 const DiCfSI* b = vb;
2118 if (a->base < b->base) return -1;
2119 if (a->base > b->base) return 1;
2120 return 0;
2121 }
2122
get_cfsi_rd_stats(const DebugInfo * di,UWord * n_mergeables,UWord * n_holes)2123 static void get_cfsi_rd_stats ( const DebugInfo* di,
2124 UWord *n_mergeables, UWord *n_holes )
2125 {
2126 Word i;
2127
2128 *n_mergeables = 0;
2129 *n_holes = 0;
2130
2131 vg_assert (di->cfsi_used == 0 || di->cfsi_rd);
2132 for (i = 1; i < (Word)di->cfsi_used; i++) {
2133 Addr here_min = di->cfsi_rd[i].base;
2134 Addr prev_max = di->cfsi_rd[i-1].base + di->cfsi_rd[i-1].len - 1;
2135 Addr sep = here_min - prev_max;
2136 if (sep > 1)
2137 (*n_holes)++;
2138 if (sep == 1 && di->cfsi_rd[i-1].cfsi_m_ix == di->cfsi_rd[i].cfsi_m_ix)
2139 (*n_mergeables)++;
2140 }
2141 }
2142
ML_(canonicaliseCFI)2143 void ML_(canonicaliseCFI) ( struct _DebugInfo* di )
2144 {
2145 Word i, j;
2146 const Addr minAvma = 0;
2147 const Addr maxAvma = ~minAvma;
2148
2149 /* Note: take care in here. di->cfsi can be NULL, in which
2150 case _used and _size fields will be zero. */
2151 if (di->cfsi_rd == NULL) {
2152 vg_assert(di->cfsi_used == 0);
2153 vg_assert(di->cfsi_size == 0);
2154 vg_assert(di->cfsi_m_pool == NULL);
2155 } else {
2156 vg_assert(di->cfsi_size != 0);
2157 vg_assert(di->cfsi_m_pool != NULL);
2158 }
2159
2160 /* Set cfsi_minavma and cfsi_maxavma to summarise the entire
2161 address range contained in cfsi_rd[0 .. cfsi_used-1]. */
2162 di->cfsi_minavma = maxAvma;
2163 di->cfsi_maxavma = minAvma;
2164 for (i = 0; i < (Word)di->cfsi_used; i++) {
2165 Addr here_min = di->cfsi_rd[i].base;
2166 Addr here_max = di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1;
2167 if (here_min < di->cfsi_minavma)
2168 di->cfsi_minavma = here_min;
2169 if (here_max > di->cfsi_maxavma)
2170 di->cfsi_maxavma = here_max;
2171 }
2172
2173 if (di->trace_cfi)
2174 VG_(printf)("canonicaliseCfiSI: %lu entries, %#lx .. %#lx\n",
2175 di->cfsi_used,
2176 di->cfsi_minavma, di->cfsi_maxavma);
2177
2178 /* Sort the cfsi_rd array by base address. */
2179 VG_(ssort)(di->cfsi_rd, di->cfsi_used, sizeof(*di->cfsi_rd), compare_DiCfSI);
2180
2181 /* If two adjacent entries overlap, truncate the first. */
2182 for (i = 0; i < (Word)di->cfsi_used-1; i++) {
2183 if (di->cfsi_rd[i].base + di->cfsi_rd[i].len > di->cfsi_rd[i+1].base) {
2184 Word new_len = di->cfsi_rd[i+1].base - di->cfsi_rd[i].base;
2185 /* how could it be otherwise? The entries are sorted by the
2186 .base field. */
2187 vg_assert(new_len >= 0);
2188 vg_assert(new_len <= di->cfsi_rd[i].len);
2189 di->cfsi_rd[i].len = new_len;
2190 }
2191 }
2192
2193 /* Zap any zero-sized entries resulting from the truncation
2194 process. */
2195 j = 0;
2196 for (i = 0; i < (Word)di->cfsi_used; i++) {
2197 if (di->cfsi_rd[i].len > 0) {
2198 if (j != i)
2199 di->cfsi_rd[j] = di->cfsi_rd[i];
2200 j++;
2201 }
2202 }
2203 /* VG_(printf)("XXXXXXXXXXXXX %d %d\n", di->cfsi_used, j); */
2204 di->cfsi_used = j;
2205
2206 /* Ensure relevant postconditions hold. */
2207 for (i = 0; i < (Word)di->cfsi_used; i++) {
2208 /* No zero-length ranges. */
2209 vg_assert(di->cfsi_rd[i].len > 0);
2210 /* Makes sense w.r.t. summary address range */
2211 vg_assert(di->cfsi_rd[i].base >= di->cfsi_minavma);
2212 vg_assert(di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1
2213 <= di->cfsi_maxavma);
2214
2215 if (i < di->cfsi_used - 1) {
2216 /*
2217 if (!(di->cfsi[i].base < di->cfsi[i+1].base)) {
2218 VG_(printf)("\nOOO cfsis:\n");
2219 ML_(ppCfiSI)(&di->cfsi[i]);
2220 ML_(ppCfiSI)(&di->cfsi[i+1]);
2221 }
2222 */
2223 /* In order. */
2224 vg_assert(di->cfsi_rd[i].base < di->cfsi_rd[i+1].base);
2225 /* No overlaps. */
2226 vg_assert(di->cfsi_rd[i].base + di->cfsi_rd[i].len - 1
2227 < di->cfsi_rd[i+1].base);
2228 }
2229 }
2230
2231 if (VG_(clo_stats) && VG_(clo_verbosity) >= 3) {
2232 UWord n_mergeables, n_holes;
2233 get_cfsi_rd_stats (di, &n_mergeables, &n_holes);
2234 VG_(dmsg)("CFSI total %lu mergeables %lu holes %lu uniq cfsi_m %u\n",
2235 di->cfsi_used, n_mergeables, n_holes,
2236 di->cfsi_m_pool ? VG_(sizeDedupPA) (di->cfsi_m_pool) : 0);
2237 }
2238 }
2239
ML_(finish_CFSI_arrays)2240 void ML_(finish_CFSI_arrays) ( struct _DebugInfo* di )
2241 {
2242 UWord n_mergeables, n_holes;
2243 UWord new_used;
2244 UWord i;
2245 UWord pos;
2246 UWord f_mergeables, f_holes;
2247 UInt sz_cfsi_m_pool;
2248
2249 get_cfsi_rd_stats (di, &n_mergeables, &n_holes);
2250
2251 if (di->cfsi_used == 0) {
2252 vg_assert (di->cfsi_rd == NULL);
2253 vg_assert (di->cfsi_m_pool == NULL);
2254 vg_assert (n_mergeables == 0);
2255 vg_assert (n_holes == 0);
2256 return;
2257 }
2258
2259 vg_assert (di->cfsi_used > n_mergeables);
2260 new_used = di->cfsi_used - n_mergeables + n_holes;
2261
2262 sz_cfsi_m_pool = VG_(sizeDedupPA)(di->cfsi_m_pool);
2263 vg_assert (sz_cfsi_m_pool > 0);
2264 if (sz_cfsi_m_pool <= 255)
2265 di->sizeof_cfsi_m_ix = 1;
2266 else if (sz_cfsi_m_pool <= 65535)
2267 di->sizeof_cfsi_m_ix = 2;
2268 else
2269 di->sizeof_cfsi_m_ix = 4;
2270
2271 di->cfsi_base = ML_(dinfo_zalloc)( "di.storage.finCfSI.1",
2272 new_used * sizeof(Addr) );
2273 di->cfsi_m_ix = ML_(dinfo_zalloc)( "di.storage.finCfSI.2",
2274 new_used * sizeof(UChar)*di->sizeof_cfsi_m_ix);
2275
2276 pos = 0;
2277 f_mergeables = 0;
2278 f_holes = 0;
2279 for (i = 0; i < (Word)di->cfsi_used; i++) {
2280 if (i > 0) {
2281 Addr here_min = di->cfsi_rd[i].base;
2282 Addr prev_max = di->cfsi_rd[i-1].base + di->cfsi_rd[i-1].len - 1;
2283 SizeT sep = here_min - prev_max;
2284
2285 // Skip a mergeable entry.
2286 if (sep == 1) {
2287 if (di->cfsi_rd[i-1].cfsi_m_ix == di->cfsi_rd[i].cfsi_m_ix) {
2288 f_mergeables++;
2289 continue;
2290 }
2291 }
2292 // Insert a hole if needed.
2293 if (sep > 1) {
2294 f_holes++;
2295 di->cfsi_base[pos] = prev_max + 1;
2296 switch (di->sizeof_cfsi_m_ix) {
2297 case 1: ((UChar*) di->cfsi_m_ix)[pos] = 0; break;
2298 case 2: ((UShort*)di->cfsi_m_ix)[pos] = 0; break;
2299 case 4: ((UInt*) di->cfsi_m_ix)[pos] = 0; break;
2300 default: vg_assert(0);
2301 }
2302 pos++;
2303 }
2304 }
2305
2306 // Insert the cfsi entry i.
2307 di->cfsi_base[pos] = di->cfsi_rd[i].base;
2308 switch (di->sizeof_cfsi_m_ix) {
2309 case 1: ((UChar*) di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2310 case 2: ((UShort*)di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2311 case 4: ((UInt*) di->cfsi_m_ix)[pos] = di->cfsi_rd[i].cfsi_m_ix; break;
2312 default: vg_assert(0);
2313 }
2314 pos++;
2315 }
2316
2317 vg_assert (f_mergeables == n_mergeables);
2318 vg_assert (f_holes == n_holes);
2319 vg_assert (pos == new_used);
2320
2321 di->cfsi_used = new_used;
2322 di->cfsi_size = new_used;
2323 ML_(dinfo_free) (di->cfsi_rd);
2324 di->cfsi_rd = NULL;
2325 }
2326
2327
2328 /* Canonicalise the tables held by 'di', in preparation for use. Call
2329 this after finishing adding entries to these tables. */
ML_(canonicaliseTables)2330 void ML_(canonicaliseTables) ( struct _DebugInfo* di )
2331 {
2332 canonicaliseSymtab ( di );
2333 canonicaliseLoctab ( di );
2334 canonicaliseInltab ( di );
2335 ML_(canonicaliseCFI) ( di );
2336 if (di->cfsi_m_pool)
2337 VG_(freezeDedupPA) (di->cfsi_m_pool, ML_(dinfo_shrink_block));
2338 canonicaliseVarInfo ( di );
2339 if (di->strpool)
2340 VG_(freezeDedupPA) (di->strpool, ML_(dinfo_shrink_block));
2341 if (di->fndnpool)
2342 VG_(freezeDedupPA) (di->fndnpool, ML_(dinfo_shrink_block));
2343 }
2344
2345
2346 /*------------------------------------------------------------*/
2347 /*--- Searching the tables ---*/
2348 /*------------------------------------------------------------*/
2349
2350 /* Find a symbol-table index containing the specified pointer, or -1
2351 if not found. Binary search. */
2352
ML_(search_one_symtab)2353 Word ML_(search_one_symtab) ( const DebugInfo* di, Addr ptr,
2354 Bool findText )
2355 {
2356 Addr a_mid_lo, a_mid_hi;
2357 Word mid,
2358 lo = 0,
2359 hi = di->symtab_used-1;
2360 while (True) {
2361 /* current unsearched space is from lo to hi, inclusive. */
2362 if (lo > hi) return -1; /* not found */
2363 mid = (lo + hi) / 2;
2364 a_mid_lo = di->symtab[mid].avmas.main;
2365 a_mid_hi = ((Addr)di->symtab[mid].avmas.main) + di->symtab[mid].size - 1;
2366
2367 if (ptr < a_mid_lo) { hi = mid-1; continue; }
2368 if (ptr > a_mid_hi) { lo = mid+1; continue; }
2369 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
2370 /* Found a symbol with the correct address range. But is it
2371 of the right kind (text vs data) ? */
2372 if ( findText && di->symtab[mid].isText ) return mid;
2373 if ( (!findText) && (!di->symtab[mid].isText) ) return mid;
2374 return -1;
2375 }
2376 }
2377
2378
2379 /* Find a location-table index containing the specified pointer, or -1
2380 if not found. Binary search. */
2381
ML_(search_one_loctab)2382 Word ML_(search_one_loctab) ( const DebugInfo* di, Addr ptr )
2383 {
2384 Addr a_mid_lo, a_mid_hi;
2385 Word mid,
2386 lo = 0,
2387 hi = di->loctab_used-1;
2388 while (True) {
2389 /* current unsearched space is from lo to hi, inclusive. */
2390 if (lo > hi) return -1; /* not found */
2391 mid = (lo + hi) / 2;
2392 a_mid_lo = di->loctab[mid].addr;
2393 a_mid_hi = ((Addr)di->loctab[mid].addr) + di->loctab[mid].size - 1;
2394
2395 if (ptr < a_mid_lo) { hi = mid-1; continue; }
2396 if (ptr > a_mid_hi) { lo = mid+1; continue; }
2397 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
2398 return mid;
2399 }
2400 }
2401
2402
2403 /* Find a CFI-table index containing the specified pointer, or -1
2404 if not found. Binary search. */
2405
ML_(search_one_cfitab)2406 Word ML_(search_one_cfitab) ( const DebugInfo* di, Addr ptr )
2407 {
2408 Word mid,
2409 lo = 0,
2410 hi = di->cfsi_used-1;
2411
2412 while (lo <= hi) {
2413 /* Invariants : hi == cfsi_used-1 || ptr < cfsi_base[hi+1]
2414 lo == 0 || ptr > cfsi_base[lo-1]
2415 (the first part of the invariants is similar to considering
2416 that cfsi_base[-1] is 0 and cfsi_base[cfsi_used] is ~0) */
2417 mid = (lo + hi) / 2;
2418 if (ptr < di->cfsi_base[mid]) { hi = mid-1; continue; }
2419 if (ptr > di->cfsi_base[mid]) { lo = mid+1; continue; }
2420 lo = mid+1; break;
2421 }
2422
2423 #if 0
2424 for (mid = 0; mid <= di->cfsi_used-1; mid++)
2425 if (ptr < di->cfsi_base[mid])
2426 break;
2427 vg_assert (lo - 1 == mid - 1);
2428 #endif
2429 return lo - 1;
2430 }
2431
2432
2433 /* Find a FPO-table index containing the specified pointer, or -1
2434 if not found. Binary search. */
2435
ML_(search_one_fpotab)2436 Word ML_(search_one_fpotab) ( const DebugInfo* di, Addr ptr )
2437 {
2438 Addr const addr = ptr - di->fpo_base_avma;
2439 Addr a_mid_lo, a_mid_hi;
2440 Word mid, size,
2441 lo = 0,
2442 hi = di->fpo_size-1;
2443 while (True) {
2444 /* current unsearched space is from lo to hi, inclusive. */
2445 if (lo > hi) return -1; /* not found */
2446 mid = (lo + hi) / 2;
2447 a_mid_lo = di->fpo[mid].ulOffStart;
2448 size = di->fpo[mid].cbProcSize;
2449 a_mid_hi = a_mid_lo + size - 1;
2450 vg_assert(a_mid_hi >= a_mid_lo);
2451 if (addr < a_mid_lo) { hi = mid-1; continue; }
2452 if (addr > a_mid_hi) { lo = mid+1; continue; }
2453 vg_assert(addr >= a_mid_lo && addr <= a_mid_hi);
2454 return mid;
2455 }
2456 }
2457
2458 /*--------------------------------------------------------------------*/
2459 /*--- end ---*/
2460 /*--------------------------------------------------------------------*/
2461