• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: segment initialisation and        ---*/
4 /*--- tracking, stack operations                                   ---*/
5 /*---                                                              ---*/
6 /*--- Implementation for Linux (and Darwin!)   m_aspacemgr-linux.c ---*/
7 /*--------------------------------------------------------------------*/
8 
9 /*
10    This file is part of Valgrind, a dynamic binary instrumentation
11    framework.
12 
13    Copyright (C) 2000-2013 Julian Seward
14       jseward@acm.org
15 
16    This program is free software; you can redistribute it and/or
17    modify it under the terms of the GNU General Public License as
18    published by the Free Software Foundation; either version 2 of the
19    License, or (at your option) any later version.
20 
21    This program is distributed in the hope that it will be useful, but
22    WITHOUT ANY WARRANTY; without even the implied warranty of
23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24    General Public License for more details.
25 
26    You should have received a copy of the GNU General Public License
27    along with this program; if not, write to the Free Software
28    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29    02111-1307, USA.
30 
31    The GNU General Public License is contained in the file COPYING.
32 */
33 
34 #if defined(VGO_linux) || defined(VGO_darwin)
35 
36 /* *************************************************************
37    DO NOT INCLUDE ANY OTHER FILES HERE.
38    ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
39    AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
40    ************************************************************* */
41 
42 #include "priv_aspacemgr.h"
43 #include "config.h"
44 
45 
46 /* Note: many of the exported functions implemented below are
47    described more fully in comments in pub_core_aspacemgr.h.
48 */
49 
50 
51 /*-----------------------------------------------------------------*/
52 /*---                                                           ---*/
53 /*--- Overview.                                                 ---*/
54 /*---                                                           ---*/
55 /*-----------------------------------------------------------------*/
56 
57 /* Purpose
58    ~~~~~~~
59    The purpose of the address space manager (aspacem) is:
60 
61    (1) to record the disposition of all parts of the process' address
62        space at all times.
63 
64    (2) to the extent that it can, influence layout in ways favourable
65        to our purposes.
66 
67    It is important to appreciate that whilst it can and does attempt
68    to influence layout, and usually succeeds, it isn't possible to
69    impose absolute control: in the end, the kernel is the final
70    arbiter, and can always bounce our requests.
71 
72    Strategy
73    ~~~~~~~~
74    The strategy is therefore as follows:
75 
76    * Track ownership of mappings.  Each one can belong either to
77      Valgrind or to the client.
78 
79    * Try to place the client's fixed and hinted mappings at the
80      requested addresses.  Fixed mappings are allowed anywhere except
81      in areas reserved by Valgrind; the client can trash its own
82      mappings if it wants.  Hinted mappings are allowed providing they
83      fall entirely in free areas; if not, they will be placed by
84      aspacem in a free area.
85 
86    * Anonymous mappings are allocated so as to keep Valgrind and
87      client areas widely separated when possible.  If address space
88      runs low, then they may become intermingled: aspacem will attempt
89      to use all possible space.  But under most circumstances lack of
90      address space is not a problem and so the areas will remain far
91      apart.
92 
93      Searches for client space start at aspacem_cStart and will wrap
94      around the end of the available space if needed.  Searches for
95      Valgrind space start at aspacem_vStart and will also wrap around.
96      Because aspacem_cStart is approximately at the start of the
97      available space and aspacem_vStart is approximately in the
98      middle, for the most part the client anonymous mappings will be
99      clustered towards the start of available space, and Valgrind ones
100      in the middle.
101 
102      The available space is delimited by aspacem_minAddr and
103      aspacem_maxAddr.  aspacem is flexible and can operate with these
104      at any (sane) setting.  For 32-bit Linux, aspacem_minAddr is set
105      to some low-ish value at startup (64M) and aspacem_maxAddr is
106      derived from the stack pointer at system startup.  This seems a
107      reliable way to establish the initial boundaries.
108      A command line option allows to change the value of aspacem_minAddr,
109      so as to allow memory hungry applications to use the lowest
110      part of the memory.
111 
112      64-bit Linux is similar except for the important detail that the
113      upper boundary is set to 64G.  The reason is so that all
114      anonymous mappings (basically all client data areas) are kept
115      below 64G, since that is the maximum range that memcheck can
116      track shadow memory using a fast 2-level sparse array.  It can go
117      beyond that but runs much more slowly.  The 64G limit is
118      arbitrary and is trivially changed.  So, with the current
119      settings, programs on 64-bit Linux will appear to run out of
120      address space and presumably fail at the 64G limit.  Given the
121      considerable space overhead of Memcheck, that means you should be
122      able to memcheckify programs that use up to about 32G natively.
123 
124    Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
125    anonymous mappings.  The client can still do fixed and hinted maps
126    at any addresses provided they do not overlap Valgrind's segments.
127    This makes Valgrind able to load prelinked .so's at their requested
128    addresses on 64-bit platforms, even if they are very high (eg,
129    112TB).
130 
131    At startup, aspacem establishes the usable limits, and advises
132    m_main to place the client stack at the top of the range, which on
133    a 32-bit machine will be just below the real initial stack.  One
134    effect of this is that self-hosting sort-of works, because an inner
135    valgrind will then place its client's stack just below its own
136    initial stack.
137 
138    The segment array and segment kinds
139    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
140    The central data structure is the segment array (segments[0
141    .. nsegments_used-1]).  This covers the entire address space in
142    order, giving account of every byte of it.  Free spaces are
143    represented explicitly as this makes many operations simpler.
144    Mergeable adjacent segments are aggressively merged so as to create
145    a "normalised" representation (preen_nsegments).
146 
147    There are 7 (mutually-exclusive) segment kinds, the meaning of
148    which is important:
149 
150    SkFree: a free space, which may be allocated either to Valgrind (V)
151       or the client (C).
152 
153    SkAnonC: an anonymous mapping belonging to C.  For these, aspacem
154       tracks a boolean indicating whether or not is is part of the
155       client's heap area (can't remember why).
156 
157    SkFileC: a file mapping belonging to C.
158 
159    SkShmC: a shared memory segment belonging to C.
160 
161    SkAnonV: an anonymous mapping belonging to V.  These cover all V's
162       dynamic memory needs, including non-client malloc/free areas,
163       shadow memory, and the translation cache.
164 
165    SkFileV: a file mapping belonging to V.  As far as I know these are
166       only created transiently for the purposes of reading debug info.
167 
168    SkResvn: a reservation segment.
169 
170    These are mostly straightforward.  Reservation segments have some
171    subtlety, however.
172 
173    A reservation segment is unmapped from the kernel's point of view,
174    but is an area in which aspacem will not create anonymous maps
175    (either Vs or Cs).  The idea is that we will try to keep it clear
176    when the choice to do so is ours.  Reservation segments are
177    'invisible' from the client's point of view: it may choose to park
178    a fixed mapping in the middle of one, and that's just tough -- we
179    can't do anything about that.  From the client's perspective
180    reservations are semantically equivalent to (although
181    distinguishable from, if it makes enquiries) free areas.
182 
183    Reservations are a primitive mechanism provided for whatever
184    purposes the rest of the system wants.  Currently they are used to
185    reserve the expansion space into which a growdown stack is
186    expanded, and into which the data segment is extended.  Note,
187    though, those uses are entirely external to this module, which only
188    supplies the primitives.
189 
190    Reservations may be shrunk in order that an adjoining anonymous
191    mapping may be extended.  This makes dataseg/stack expansion work.
192    A reservation may not be shrunk below one page.
193 
194    The advise/notify concept
195    ~~~~~~~~~~~~~~~~~~~~~~~~~
196    All mmap-related calls must be routed via aspacem.  Calling
197    sys_mmap directly from the rest of the system is very dangerous
198    because aspacem's data structures will become out of date.
199 
200    The fundamental mode of operation of aspacem is to support client
201    mmaps.  Here's what happens (in ML_(generic_PRE_sys_mmap)):
202 
203    * m_syswrap intercepts the mmap call.  It examines the parameters
204      and identifies the requested placement constraints.  There are
205      three possibilities: no constraint (MAny), hinted (MHint, "I
206      prefer X but will accept anything"), and fixed (MFixed, "X or
207      nothing").
208 
209    * This request is passed to VG_(am_get_advisory).  This decides on
210      a placement as described in detail in Strategy above.  It may
211      also indicate that the map should fail, because it would trash
212      one of Valgrind's areas, which would probably kill the system.
213 
214    * Control returns to the wrapper.  If VG_(am_get_advisory) has
215      declared that the map should fail, then it must be made to do so.
216      Usually, though, the request is considered acceptable, in which
217      case an "advised" address is supplied.  The advised address
218      replaces the original address supplied by the client, and
219      MAP_FIXED is set.
220 
221      Note at this point that although aspacem has been asked for
222      advice on where to place the mapping, no commitment has yet been
223      made by either it or the kernel.
224 
225    * The adjusted request is handed off to the kernel.
226 
227    * The kernel's result is examined.  If the map succeeded, aspacem
228      is told of the outcome (VG_(am_notify_client_mmap)), so it can
229      update its records accordingly.
230 
231   This then is the central advise-notify idiom for handling client
232   mmap/munmap/mprotect/shmat:
233 
234   * ask aspacem for an advised placement (or a veto)
235 
236   * if not vetoed, hand request to kernel, using the advised placement
237 
238   * examine result, and if successful, notify aspacem of the result.
239 
240   There are also many convenience functions, eg
241   VG_(am_mmap_anon_fixed_client), which do both phases entirely within
242   aspacem.
243 
244   To debug all this, a sync-checker is provided.  It reads
245   /proc/self/maps, compares what it sees with aspacem's records, and
246   complains if there is a difference.  --sanity-level=3 runs it before
247   and after each syscall, which is a powerful, if slow way of finding
248   buggy syscall wrappers.
249 
250   Loss of pointercheck
251   ~~~~~~~~~~~~~~~~~~~~
252   Up to and including Valgrind 2.4.1, x86 segmentation was used to
253   enforce seperation of V and C, so that wild writes by C could not
254   trash V.  This got called "pointercheck".  Unfortunately, the new
255   more flexible memory layout, plus the need to be portable across
256   different architectures, means doing this in hardware is no longer
257   viable, and doing it in software is expensive.  So at the moment we
258   don't do it at all.
259 */
260 
261 
262 /*-----------------------------------------------------------------*/
263 /*---                                                           ---*/
264 /*--- The Address Space Manager's state.                        ---*/
265 /*---                                                           ---*/
266 /*-----------------------------------------------------------------*/
267 
268 /* ------ start of STATE for the address-space manager ------ */
269 
270 /* Max number of segments we can track.  On Android, virtual address
271    space is limited, so keep a low limit -- 5000 x sizef(NSegment) is
272    360KB. */
273 #if defined(VGPV_arm_linux_android) || defined(VGPV_x86_linux_android)
274 # define VG_N_SEGMENTS 5000
275 #else
276 # define VG_N_SEGMENTS 30000
277 #endif
278 
279 /* Max number of segment file names we can track.  These are big (1002
280    bytes) so on Android limit the space usage to ~1MB. */
281 #if defined(VGPV_arm_linux_android) || defined(VGPV_x86_linux_android)
282 # define VG_N_SEGNAMES 1000
283 #else
284 # define VG_N_SEGNAMES 6000
285 #endif
286 
287 /* Max length of a segment file name. */
288 #define VG_MAX_SEGNAMELEN 1000
289 
290 
291 typedef
292    struct {
293       Bool  inUse;
294       Bool  mark;
295       HChar fname[VG_MAX_SEGNAMELEN];
296    }
297    SegName;
298 
299 /* Filename table.  _used is the high water mark; an entry is only
300    valid if its index >= 0, < _used, and its .inUse field == True.
301    The .mark field is used to garbage-collect dead entries.
302 */
303 static SegName segnames[VG_N_SEGNAMES];
304 static Int     segnames_used = 0;
305 
306 
307 /* Array [0 .. nsegments_used-1] of all mappings. */
308 /* Sorted by .addr field. */
309 /* I: len may not be zero. */
310 /* I: overlapping segments are not allowed. */
311 /* I: the segments cover the entire address space precisely. */
312 /* Each segment can optionally hold an index into the filename table. */
313 
314 static NSegment nsegments[VG_N_SEGMENTS];
315 static Int      nsegments_used = 0;
316 
317 #define Addr_MIN ((Addr)0)
318 #define Addr_MAX ((Addr)(-1ULL))
319 
320 /* Limits etc */
321 
322 
323 Addr VG_(clo_aspacem_minAddr)
324 #if defined(VGO_darwin)
325 # if VG_WORDSIZE == 4
326    = (Addr) 0x00001000;
327 # else
328    = (Addr) 0x100000000;  // 4GB page zero
329 # endif
330 #else
331    = (Addr) 0x04000000; // 64M
332 #endif
333 
334 
335 // The smallest address that aspacem will try to allocate
336 static Addr aspacem_minAddr = 0;
337 
338 // The largest address that aspacem will try to allocate
339 static Addr aspacem_maxAddr = 0;
340 
341 // Where aspacem will start looking for client space
342 static Addr aspacem_cStart = 0;
343 
344 // Where aspacem will start looking for Valgrind space
345 static Addr aspacem_vStart = 0;
346 
347 
348 #define AM_SANITY_CHECK                                      \
349    do {                                                      \
350       if (VG_(clo_sanity_level >= 3))                        \
351          aspacem_assert(VG_(am_do_sync_check)                \
352             (__PRETTY_FUNCTION__,__FILE__,__LINE__));        \
353    } while (0)
354 
355 /* ------ end of STATE for the address-space manager ------ */
356 
357 /* ------ Forwards decls ------ */
358 inline
359 static Int  find_nsegment_idx ( Addr a );
360 
361 static void parse_procselfmaps (
362       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
363                               ULong dev, ULong ino, Off64T offset,
364                               const HChar* filename ),
365       void (*record_gap)( Addr addr, SizeT len )
366    );
367 
368 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
369 /* Not that I have anything against the commpage per se.  It's just
370    that it's not listed in /proc/self/maps, which is a royal PITA --
371    we have to fake it up, in parse_procselfmaps.
372 
373    But note also bug 254556 comment #2: this is now fixed in newer
374    kernels -- it is listed as a "[vectors]" entry.  Presumably the
375    fake entry made here duplicates the [vectors] entry, and so, if at
376    some point in the future, we can stop supporting buggy kernels,
377    then this kludge can be removed entirely, since the procmap parser
378    below will read that entry in the normal way. */
379 #if defined(VGP_arm_linux)
380 #  define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
381 #  define ARM_LINUX_FAKE_COMMPAGE_END1  0xFFFF1000
382 #endif
383 
384 
385 /*-----------------------------------------------------------------*/
386 /*---                                                           ---*/
387 /*--- SegName array management.                                 ---*/
388 /*---                                                           ---*/
389 /*-----------------------------------------------------------------*/
390 
391 /* Searches the filename table to find an index for the given name.
392    If none is found, an index is allocated and the name stored.  If no
393    space is available we just give up.  If the string is too long to
394    store, return -1.
395 */
allocate_segname(const HChar * name)396 static Int allocate_segname ( const HChar* name )
397 {
398    Int i, j, len;
399 
400    aspacem_assert(name);
401 
402    if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
403 
404    len = VG_(strlen)(name);
405    if (len >= VG_MAX_SEGNAMELEN-1) {
406       return -1;
407    }
408 
409    /* first see if we already have the name. */
410    for (i = 0; i < segnames_used; i++) {
411       if (!segnames[i].inUse)
412          continue;
413       if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
414          return i;
415       }
416    }
417 
418    /* no we don't.  So look for a free slot. */
419    for (i = 0; i < segnames_used; i++)
420       if (!segnames[i].inUse)
421          break;
422 
423    if (i == segnames_used) {
424       /* no free slots .. advance the high-water mark. */
425       if (segnames_used+1 < VG_N_SEGNAMES) {
426          i = segnames_used;
427          segnames_used++;
428       } else {
429          ML_(am_barf_toolow)("VG_N_SEGNAMES");
430       }
431    }
432 
433    /* copy it in */
434    segnames[i].inUse = True;
435    for (j = 0; j < len; j++)
436       segnames[i].fname[j] = name[j];
437    aspacem_assert(len < VG_MAX_SEGNAMELEN);
438    segnames[i].fname[len] = 0;
439    return i;
440 }
441 
442 
443 /*-----------------------------------------------------------------*/
444 /*---                                                           ---*/
445 /*--- Displaying the segment array.                             ---*/
446 /*---                                                           ---*/
447 /*-----------------------------------------------------------------*/
448 
show_SegKind(SegKind sk)449 static const HChar* show_SegKind ( SegKind sk )
450 {
451    switch (sk) {
452       case SkFree:  return "    ";
453       case SkAnonC: return "anon";
454       case SkAnonV: return "ANON";
455       case SkFileC: return "file";
456       case SkFileV: return "FILE";
457       case SkShmC:  return "shm ";
458       case SkResvn: return "RSVN";
459       default:      return "????";
460    }
461 }
462 
show_ShrinkMode(ShrinkMode sm)463 static const HChar* show_ShrinkMode ( ShrinkMode sm )
464 {
465    switch (sm) {
466       case SmLower: return "SmLower";
467       case SmUpper: return "SmUpper";
468       case SmFixed: return "SmFixed";
469       default: return "Sm?????";
470    }
471 }
472 
show_len_concisely(HChar * buf,Addr start,Addr end)473 static void show_len_concisely ( /*OUT*/HChar* buf, Addr start, Addr end )
474 {
475    const HChar* fmt;
476    ULong len = ((ULong)end) - ((ULong)start) + 1;
477 
478    if (len < 10*1000*1000ULL) {
479       fmt = "%7llu";
480    }
481    else if (len < 999999ULL * (1ULL<<20)) {
482       fmt = "%6llum";
483       len >>= 20;
484    }
485    else if (len < 999999ULL * (1ULL<<30)) {
486       fmt = "%6llug";
487       len >>= 30;
488    }
489    else if (len < 999999ULL * (1ULL<<40)) {
490       fmt = "%6llut";
491       len >>= 40;
492    }
493    else {
494       fmt = "%6llue";
495       len >>= 50;
496    }
497    ML_(am_sprintf)(buf, fmt, len);
498 }
499 
500 
501 /* Show full details of an NSegment */
502 
503 static void __attribute__ ((unused))
show_nsegment_full(Int logLevel,Int segNo,NSegment * seg)504             show_nsegment_full ( Int logLevel, Int segNo, NSegment* seg )
505 {
506    HChar len_buf[20];
507    const HChar* name = "(none)";
508 
509    if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
510                        && segnames[seg->fnIdx].inUse
511                        && segnames[seg->fnIdx].fname[0] != 0)
512       name = segnames[seg->fnIdx].fname;
513 
514    show_len_concisely(len_buf, seg->start, seg->end);
515 
516    VG_(debugLog)(
517       logLevel, "aspacem",
518       "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s "
519       "d=0x%03llx i=%-7lld o=%-7lld (%d) m=%d %s\n",
520       segNo, show_SegKind(seg->kind),
521       (ULong)seg->start, (ULong)seg->end, len_buf,
522       seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
523       seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
524       seg->isCH ? 'H' : '-',
525       show_ShrinkMode(seg->smode),
526       seg->dev, seg->ino, seg->offset, seg->fnIdx,
527       (Int)seg->mark, name
528    );
529 }
530 
531 
532 /* Show an NSegment in a user-friendly-ish way. */
533 
show_nsegment(Int logLevel,Int segNo,NSegment * seg)534 static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
535 {
536    HChar len_buf[20];
537    show_len_concisely(len_buf, seg->start, seg->end);
538 
539    switch (seg->kind) {
540 
541       case SkFree:
542          VG_(debugLog)(
543             logLevel, "aspacem",
544             "%3d: %s %010llx-%010llx %s\n",
545             segNo, show_SegKind(seg->kind),
546             (ULong)seg->start, (ULong)seg->end, len_buf
547          );
548          break;
549 
550       case SkAnonC: case SkAnonV: case SkShmC:
551          VG_(debugLog)(
552             logLevel, "aspacem",
553             "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
554             segNo, show_SegKind(seg->kind),
555             (ULong)seg->start, (ULong)seg->end, len_buf,
556             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
557             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
558             seg->isCH ? 'H' : '-'
559          );
560          break;
561 
562       case SkFileC: case SkFileV:
563          VG_(debugLog)(
564             logLevel, "aspacem",
565             "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
566             "i=%-7lld o=%-7lld (%d)\n",
567             segNo, show_SegKind(seg->kind),
568             (ULong)seg->start, (ULong)seg->end, len_buf,
569             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
570             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
571             seg->isCH ? 'H' : '-',
572             seg->dev, seg->ino, seg->offset, seg->fnIdx
573          );
574          break;
575 
576       case SkResvn:
577          VG_(debugLog)(
578             logLevel, "aspacem",
579             "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
580             segNo, show_SegKind(seg->kind),
581             (ULong)seg->start, (ULong)seg->end, len_buf,
582             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
583             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
584             seg->isCH ? 'H' : '-',
585             show_ShrinkMode(seg->smode)
586          );
587          break;
588 
589       default:
590          VG_(debugLog)(
591             logLevel, "aspacem",
592             "%3d: ???? UNKNOWN SEGMENT KIND\n",
593             segNo
594          );
595          break;
596    }
597 }
598 
599 /* Print out the segment array (debugging only!). */
VG_(am_show_nsegments)600 void VG_(am_show_nsegments) ( Int logLevel, const HChar* who )
601 {
602    Int i;
603    VG_(debugLog)(logLevel, "aspacem",
604                  "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
605                  who, nsegments_used, segnames_used);
606    for (i = 0; i < segnames_used; i++) {
607       if (!segnames[i].inUse)
608          continue;
609       VG_(debugLog)(logLevel, "aspacem",
610                     "(%2d) %s\n", i, segnames[i].fname);
611    }
612    for (i = 0; i < nsegments_used; i++)
613      show_nsegment( logLevel, i, &nsegments[i] );
614    VG_(debugLog)(logLevel, "aspacem",
615                  ">>>\n");
616 }
617 
618 
619 /* Get the filename corresponding to this segment, if known and if it
620    has one.  The returned name's storage cannot be assumed to be
621    persistent, so the caller should immediately copy the name
622    elsewhere. */
VG_(am_get_filename)623 HChar* VG_(am_get_filename)( NSegment const * seg )
624 {
625    Int i;
626    aspacem_assert(seg);
627    i = seg->fnIdx;
628    if (i < 0 || i >= segnames_used || !segnames[i].inUse)
629       return NULL;
630    else
631       return &segnames[i].fname[0];
632 }
633 
634 /* Collect up the start addresses of all non-free, non-resvn segments.
635    The interface is a bit strange in order to avoid potential
636    segment-creation races caused by dynamic allocation of the result
637    buffer *starts.
638 
639    The function first computes how many entries in the result
640    buffer *starts will be needed.  If this number <= nStarts,
641    they are placed in starts[0..], and the number is returned.
642    If nStarts is not large enough, nothing is written to
643    starts[0..], and the negation of the size is returned.
644 
645    Correct use of this function may mean calling it multiple times in
646    order to establish a suitably-sized buffer. */
647 
VG_(am_get_segment_starts)648 Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
649 {
650    Int i, j, nSegs;
651 
652    /* don't pass dumbass arguments */
653    aspacem_assert(nStarts >= 0);
654 
655    nSegs = 0;
656    for (i = 0; i < nsegments_used; i++) {
657       if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
658          continue;
659       nSegs++;
660    }
661 
662    if (nSegs > nStarts) {
663       /* The buffer isn't big enough.  Tell the caller how big it needs
664          to be. */
665       return -nSegs;
666    }
667 
668    /* There's enough space.  So write into the result buffer. */
669    aspacem_assert(nSegs <= nStarts);
670 
671    j = 0;
672    for (i = 0; i < nsegments_used; i++) {
673       if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
674          continue;
675       starts[j] = nsegments[i].start;
676       j++;
677    }
678 
679    aspacem_assert(j == nSegs); /* this should not fail */
680    return nSegs;
681 }
682 
683 
684 /*-----------------------------------------------------------------*/
685 /*---                                                           ---*/
686 /*--- Sanity checking and preening of the segment array.        ---*/
687 /*---                                                           ---*/
688 /*-----------------------------------------------------------------*/
689 
690 /* Check representational invariants for NSegments. */
691 
sane_NSegment(NSegment * s)692 static Bool sane_NSegment ( NSegment* s )
693 {
694    if (s == NULL) return False;
695 
696    /* No zero sized segments and no wraparounds. */
697    if (s->start >= s->end) return False;
698 
699    /* .mark is used for admin purposes only. */
700    if (s->mark) return False;
701 
702    /* require page alignment */
703    if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
704    if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
705 
706    switch (s->kind) {
707 
708       case SkFree:
709          return
710             s->smode == SmFixed
711             && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
712             && !s->hasR && !s->hasW && !s->hasX && !s->hasT
713             && !s->isCH;
714 
715       case SkAnonC: case SkAnonV: case SkShmC:
716          return
717             s->smode == SmFixed
718             && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
719             && (s->kind==SkAnonC ? True : !s->isCH);
720 
721       case SkFileC: case SkFileV:
722          return
723             s->smode == SmFixed
724             && (s->fnIdx == -1 ||
725                 (s->fnIdx >= 0 && s->fnIdx < segnames_used
726                                && segnames[s->fnIdx].inUse))
727             && !s->isCH;
728 
729       case SkResvn:
730          return
731             s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
732             && !s->hasR && !s->hasW && !s->hasX && !s->hasT
733             && !s->isCH;
734 
735       default:
736          return False;
737    }
738 }
739 
740 
741 /* Try merging s2 into s1, if possible.  If successful, s1 is
742    modified, and True is returned.  Otherwise s1 is unchanged and
743    False is returned. */
744 
maybe_merge_nsegments(NSegment * s1,NSegment * s2)745 static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
746 {
747    if (s1->kind != s2->kind)
748       return False;
749 
750    if (s1->end+1 != s2->start)
751       return False;
752 
753    /* reject cases which would cause wraparound */
754    if (s1->start > s2->end)
755       return False;
756 
757    switch (s1->kind) {
758 
759       case SkFree:
760          s1->end = s2->end;
761          return True;
762 
763       case SkAnonC: case SkAnonV:
764          if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
765              && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
766             s1->end = s2->end;
767             s1->hasT |= s2->hasT;
768             return True;
769          }
770          break;
771 
772       case SkFileC: case SkFileV:
773          if (s1->hasR == s2->hasR
774              && s1->hasW == s2->hasW && s1->hasX == s2->hasX
775              && s1->dev == s2->dev && s1->ino == s2->ino
776              && s2->offset == s1->offset
777                               + ((ULong)s2->start) - ((ULong)s1->start) ) {
778             s1->end = s2->end;
779             s1->hasT |= s2->hasT;
780             return True;
781          }
782          break;
783 
784       case SkShmC:
785          return False;
786 
787       case SkResvn:
788          if (s1->smode == SmFixed && s2->smode == SmFixed) {
789             s1->end = s2->end;
790             return True;
791          }
792 
793       default:
794          break;
795 
796    }
797 
798    return False;
799 }
800 
801 
802 /* Sanity-check and canonicalise the segment array (merge mergable
803    segments).  Returns True if any segments were merged. */
804 
preen_nsegments(void)805 static Bool preen_nsegments ( void )
806 {
807    Int i, j, r, w, nsegments_used_old = nsegments_used;
808 
809    /* Pass 1: check the segment array covers the entire address space
810       exactly once, and also that each segment is sane. */
811    aspacem_assert(nsegments_used > 0);
812    aspacem_assert(nsegments[0].start == Addr_MIN);
813    aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
814 
815    aspacem_assert(sane_NSegment(&nsegments[0]));
816    for (i = 1; i < nsegments_used; i++) {
817       aspacem_assert(sane_NSegment(&nsegments[i]));
818       aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
819    }
820 
821    /* Pass 2: merge as much as possible, using
822       maybe_merge_segments. */
823    w = 0;
824    for (r = 1; r < nsegments_used; r++) {
825       if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
826          /* nothing */
827       } else {
828          w++;
829          if (w != r)
830             nsegments[w] = nsegments[r];
831       }
832    }
833    w++;
834    aspacem_assert(w > 0 && w <= nsegments_used);
835    nsegments_used = w;
836 
837    /* Pass 3: free up unused string table slots */
838    /* clear mark bits */
839    for (i = 0; i < segnames_used; i++)
840       segnames[i].mark = False;
841    /* mark */
842    for (i = 0; i < nsegments_used; i++) {
843      j = nsegments[i].fnIdx;
844       aspacem_assert(j >= -1 && j < segnames_used);
845       if (j >= 0) {
846          aspacem_assert(segnames[j].inUse);
847          segnames[j].mark = True;
848       }
849    }
850    /* release */
851    for (i = 0; i < segnames_used; i++) {
852       if (segnames[i].mark == False) {
853          segnames[i].inUse = False;
854          segnames[i].fname[0] = 0;
855       }
856    }
857 
858    return nsegments_used != nsegments_used_old;
859 }
860 
861 
862 /* Check the segment array corresponds with the kernel's view of
863    memory layout.  sync_check_ok returns True if no anomalies were
864    found, else False.  In the latter case the mismatching segments are
865    displayed.
866 
867    The general idea is: we get the kernel to show us all its segments
868    and also the gaps in between.  For each such interval, try and find
869    a sequence of appropriate intervals in our segment array which
870    cover or more than cover the kernel's interval, and which all have
871    suitable kinds/permissions etc.
872 
873    Although any specific kernel interval is not matched exactly to a
874    valgrind interval or sequence thereof, eventually any disagreement
875    on mapping boundaries will be detected.  This is because, if for
876    example valgrind's intervals cover a greater range than the current
877    kernel interval, it must be the case that a neighbouring free-space
878    interval belonging to valgrind cannot cover the neighbouring
879    free-space interval belonging to the kernel.  So the disagreement
880    is detected.
881 
882    In other words, we examine each kernel interval in turn, and check
883    we do not disagree over the range of that interval.  Because all of
884    the address space is examined, any disagreements must eventually be
885    detected.
886 */
887 
888 static Bool sync_check_ok = False;
889 
sync_check_mapping_callback(Addr addr,SizeT len,UInt prot,ULong dev,ULong ino,Off64T offset,const HChar * filename)890 static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
891                                           ULong dev, ULong ino, Off64T offset,
892                                           const HChar* filename )
893 {
894    Int  iLo, iHi, i;
895    Bool sloppyXcheck;
896 
897    /* If a problem has already been detected, don't continue comparing
898       segments, so as to avoid flooding the output with error
899       messages. */
900 #if !defined(VGO_darwin)
901    /* GrP fixme not */
902    if (!sync_check_ok)
903       return;
904 #endif
905    if (len == 0)
906       return;
907 
908    /* The kernel should not give us wraparounds. */
909    aspacem_assert(addr <= addr + len - 1);
910 
911    iLo = find_nsegment_idx( addr );
912    iHi = find_nsegment_idx( addr + len - 1 );
913 
914    /* These 5 should be guaranteed by find_nsegment_idx. */
915    aspacem_assert(0 <= iLo && iLo < nsegments_used);
916    aspacem_assert(0 <= iHi && iHi < nsegments_used);
917    aspacem_assert(iLo <= iHi);
918    aspacem_assert(nsegments[iLo].start <= addr );
919    aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
920 
921    /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
922       most recent NX-bit enabled CPUs) and so recent kernels attempt
923       to provide execute protection by placing all executable mappings
924       low down in the address space and then reducing the size of the
925       code segment to prevent code at higher addresses being executed.
926 
927       These kernels report which mappings are really executable in
928       the /proc/self/maps output rather than mirroring what was asked
929       for when each mapping was created. In order to cope with this we
930       have a sloppyXcheck mode which we enable on x86 and s390 - in this
931       mode we allow the kernel to report execute permission when we weren't
932       expecting it but not vice versa. */
933 #  if defined(VGA_x86) || defined (VGA_s390x)
934    sloppyXcheck = True;
935 #  else
936    sloppyXcheck = False;
937 #  endif
938 
939    /* NSegments iLo .. iHi inclusive should agree with the presented
940       data. */
941    for (i = iLo; i <= iHi; i++) {
942 
943       Bool same, cmp_offsets, cmp_devino;
944       UInt seg_prot;
945 
946       /* compare the kernel's offering against ours. */
947       same = nsegments[i].kind == SkAnonC
948              || nsegments[i].kind == SkAnonV
949              || nsegments[i].kind == SkFileC
950              || nsegments[i].kind == SkFileV
951              || nsegments[i].kind == SkShmC;
952 
953       seg_prot = 0;
954       if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
955       if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
956       if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
957 
958       cmp_offsets
959          = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
960 
961       cmp_devino
962          = nsegments[i].dev != 0 || nsegments[i].ino != 0;
963 
964       /* Consider other reasons to not compare dev/inode */
965 #if defined(VGO_linux)
966       /* bproc does some godawful hack on /dev/zero at process
967          migration, which changes the name of it, and its dev & ino */
968       if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
969          cmp_devino = False;
970 
971       /* hack apparently needed on MontaVista Linux */
972       if (filename && VG_(strstr)(filename, "/.lib-ro/"))
973          cmp_devino = False;
974 #endif
975 
976 #if defined(VGO_darwin)
977       // GrP fixme kernel info doesn't have dev/inode
978       cmp_devino = False;
979 
980       // GrP fixme V and kernel don't agree on offsets
981       cmp_offsets = False;
982 #endif
983 
984       /* If we are doing sloppy execute permission checks then we
985          allow segment to have X permission when we weren't expecting
986          it (but not vice versa) so if the kernel reported execute
987          permission then pretend that this segment has it regardless
988          of what we were expecting. */
989       if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
990          seg_prot |= VKI_PROT_EXEC;
991       }
992 
993       same = same
994              && seg_prot == prot
995              && (cmp_devino
996                    ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
997                    : True)
998              && (cmp_offsets
999                    ? nsegments[i].start-nsegments[i].offset == addr-offset
1000                    : True);
1001       if (!same) {
1002          Addr start = addr;
1003          Addr end = start + len - 1;
1004          HChar len_buf[20];
1005          show_len_concisely(len_buf, start, end);
1006 
1007          sync_check_ok = False;
1008 
1009          VG_(debugLog)(
1010             0,"aspacem",
1011               "segment mismatch: V's seg 1st, kernel's 2nd:\n");
1012          show_nsegment_full( 0, i, &nsegments[i] );
1013          VG_(debugLog)(0,"aspacem",
1014             "...: .... %010llx-%010llx %s %c%c%c.. ....... "
1015             "d=0x%03llx i=%-7lld o=%-7lld (.) m=. %s\n",
1016             (ULong)start, (ULong)end, len_buf,
1017             prot & VKI_PROT_READ  ? 'r' : '-',
1018             prot & VKI_PROT_WRITE ? 'w' : '-',
1019             prot & VKI_PROT_EXEC  ? 'x' : '-',
1020             dev, ino, offset, filename ? filename : "(none)" );
1021 
1022          return;
1023       }
1024    }
1025 
1026    /* Looks harmless.  Keep going. */
1027    return;
1028 }
1029 
sync_check_gap_callback(Addr addr,SizeT len)1030 static void sync_check_gap_callback ( Addr addr, SizeT len )
1031 {
1032    Int iLo, iHi, i;
1033 
1034    /* If a problem has already been detected, don't continue comparing
1035       segments, so as to avoid flooding the output with error
1036       messages. */
1037 #if !defined(VGO_darwin)
1038    /* GrP fixme not */
1039    if (!sync_check_ok)
1040       return;
1041 #endif
1042    if (len == 0)
1043       return;
1044 
1045    /* The kernel should not give us wraparounds. */
1046    aspacem_assert(addr <= addr + len - 1);
1047 
1048    iLo = find_nsegment_idx( addr );
1049    iHi = find_nsegment_idx( addr + len - 1 );
1050 
1051    /* These 5 should be guaranteed by find_nsegment_idx. */
1052    aspacem_assert(0 <= iLo && iLo < nsegments_used);
1053    aspacem_assert(0 <= iHi && iHi < nsegments_used);
1054    aspacem_assert(iLo <= iHi);
1055    aspacem_assert(nsegments[iLo].start <= addr );
1056    aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
1057 
1058    /* NSegments iLo .. iHi inclusive should agree with the presented
1059       data. */
1060    for (i = iLo; i <= iHi; i++) {
1061 
1062       Bool same;
1063 
1064       /* compare the kernel's offering against ours. */
1065       same = nsegments[i].kind == SkFree
1066              || nsegments[i].kind == SkResvn;
1067 
1068       if (!same) {
1069          Addr start = addr;
1070          Addr end = start + len - 1;
1071          HChar len_buf[20];
1072          show_len_concisely(len_buf, start, end);
1073 
1074          sync_check_ok = False;
1075 
1076          VG_(debugLog)(
1077             0,"aspacem",
1078               "segment mismatch: V's gap 1st, kernel's 2nd:\n");
1079          show_nsegment_full( 0, i, &nsegments[i] );
1080          VG_(debugLog)(0,"aspacem",
1081             "   : .... %010llx-%010llx %s\n",
1082             (ULong)start, (ULong)end, len_buf);
1083          return;
1084       }
1085    }
1086 
1087    /* Looks harmless.  Keep going. */
1088    return;
1089 }
1090 
1091 
1092 /* Sanity check: check that Valgrind and the kernel agree on the
1093    address space layout.  Prints offending segments and call point if
1094    a discrepancy is detected, but does not abort the system.  Returned
1095    Bool is False if a discrepancy was found. */
1096 
VG_(am_do_sync_check)1097 Bool VG_(am_do_sync_check) ( const HChar* fn,
1098                              const HChar* file, Int line )
1099 {
1100    sync_check_ok = True;
1101    if (0)
1102       VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
1103    parse_procselfmaps( sync_check_mapping_callback,
1104                        sync_check_gap_callback );
1105    if (!sync_check_ok) {
1106       VG_(debugLog)(0,"aspacem",
1107                       "sync check at %s:%d (%s): FAILED\n",
1108                       file, line, fn);
1109       VG_(debugLog)(0,"aspacem", "\n");
1110 
1111 #     if 0
1112       {
1113          HChar buf[100];
1114          VG_(am_show_nsegments)(0,"post syncheck failure");
1115          VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1116          VG_(system)(buf);
1117       }
1118 #     endif
1119 
1120    }
1121    return sync_check_ok;
1122 }
1123 
1124 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
ML_(am_do_sanity_check)1125 void ML_(am_do_sanity_check)( void )
1126 {
1127    AM_SANITY_CHECK;
1128 }
1129 
1130 
1131 /*-----------------------------------------------------------------*/
1132 /*---                                                           ---*/
1133 /*--- Low level access / modification of the segment array.     ---*/
1134 /*---                                                           ---*/
1135 /*-----------------------------------------------------------------*/
1136 
1137 /* Binary search the interval array for a given address.  Since the
1138    array covers the entire address space the search cannot fail.  The
1139    _WRK function does the real work.  Its caller (just below) caches
1140    the results thereof, to save time.  With N_CACHE of 63 we get a hit
1141    rate exceeding 90% when running OpenOffice.
1142 
1143    Re ">> 12", it doesn't matter that the page size of some targets
1144    might be different from 12.  Really "(a >> 12) % N_CACHE" is merely
1145    a hash function, and the actual cache entry is always validated
1146    correctly against the selected cache entry before use.
1147 */
1148 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1149 __attribute__((noinline))
find_nsegment_idx_WRK(Addr a)1150 static Int find_nsegment_idx_WRK ( Addr a )
1151 {
1152    Addr a_mid_lo, a_mid_hi;
1153    Int  mid,
1154         lo = 0,
1155         hi = nsegments_used-1;
1156    while (True) {
1157       /* current unsearched space is from lo to hi, inclusive. */
1158       if (lo > hi) {
1159          /* Not found.  This can't happen. */
1160          ML_(am_barf)("find_nsegment_idx: not found");
1161       }
1162       mid      = (lo + hi) / 2;
1163       a_mid_lo = nsegments[mid].start;
1164       a_mid_hi = nsegments[mid].end;
1165 
1166       if (a < a_mid_lo) { hi = mid-1; continue; }
1167       if (a > a_mid_hi) { lo = mid+1; continue; }
1168       aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
1169       aspacem_assert(0 <= mid && mid < nsegments_used);
1170       return mid;
1171    }
1172 }
1173 
find_nsegment_idx(Addr a)1174 inline static Int find_nsegment_idx ( Addr a )
1175 {
1176 #  define N_CACHE 131 /*prime*/
1177    static Addr cache_pageno[N_CACHE];
1178    static Int  cache_segidx[N_CACHE];
1179    static Bool cache_inited = False;
1180 
1181    static UWord n_q = 0;
1182    static UWord n_m = 0;
1183 
1184    UWord ix;
1185 
1186    if (LIKELY(cache_inited)) {
1187       /* do nothing */
1188    } else {
1189       for (ix = 0; ix < N_CACHE; ix++) {
1190          cache_pageno[ix] = 0;
1191          cache_segidx[ix] = -1;
1192       }
1193       cache_inited = True;
1194    }
1195 
1196    ix = (a >> 12) % N_CACHE;
1197 
1198    n_q++;
1199    if (0 && 0 == (n_q & 0xFFFF))
1200       VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
1201 
1202    if ((a >> 12) == cache_pageno[ix]
1203        && cache_segidx[ix] >= 0
1204        && cache_segidx[ix] < nsegments_used
1205        && nsegments[cache_segidx[ix]].start <= a
1206        && a <= nsegments[cache_segidx[ix]].end) {
1207       /* hit */
1208       /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1209       return cache_segidx[ix];
1210    }
1211    /* miss */
1212    n_m++;
1213    cache_segidx[ix] = find_nsegment_idx_WRK(a);
1214    cache_pageno[ix] = a >> 12;
1215    return cache_segidx[ix];
1216 #  undef N_CACHE
1217 }
1218 
1219 
1220 
1221 /* Finds the segment containing 'a'.  Only returns file/anon/resvn
1222    segments.  This returns a 'NSegment const *' - a pointer to
1223    readonly data. */
VG_(am_find_nsegment)1224 NSegment const * VG_(am_find_nsegment) ( Addr a )
1225 {
1226    Int i = find_nsegment_idx(a);
1227    aspacem_assert(i >= 0 && i < nsegments_used);
1228    aspacem_assert(nsegments[i].start <= a);
1229    aspacem_assert(a <= nsegments[i].end);
1230    if (nsegments[i].kind == SkFree)
1231       return NULL;
1232    else
1233       return &nsegments[i];
1234 }
1235 
1236 
1237 /* Given a pointer to a seg, tries to figure out which one it is in
1238    nsegments[..].  Very paranoid. */
segAddr_to_index(const NSegment * seg)1239 static Int segAddr_to_index ( const NSegment* seg )
1240 {
1241    Int i;
1242    if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
1243       return -1;
1244    i = ((const UChar*)seg - (const UChar*)(&nsegments[0])) / sizeof(NSegment);
1245    if (i < 0 || i >= nsegments_used)
1246       return -1;
1247    if (seg == &nsegments[i])
1248       return i;
1249    return -1;
1250 }
1251 
1252 
1253 /* Find the next segment along from 'here', if it is a file/anon/resvn
1254    segment. */
VG_(am_next_nsegment)1255 NSegment const * VG_(am_next_nsegment) ( const NSegment* here, Bool fwds )
1256 {
1257    Int i = segAddr_to_index(here);
1258    if (i < 0 || i >= nsegments_used)
1259       return NULL;
1260    if (fwds) {
1261       i++;
1262       if (i >= nsegments_used)
1263          return NULL;
1264    } else {
1265       i--;
1266       if (i < 0)
1267          return NULL;
1268    }
1269    switch (nsegments[i].kind) {
1270       case SkFileC: case SkFileV: case SkShmC:
1271       case SkAnonC: case SkAnonV: case SkResvn:
1272          return &nsegments[i];
1273       default:
1274          break;
1275    }
1276    return NULL;
1277 }
1278 
1279 
1280 /* Trivial fn: return the total amount of space in anonymous mappings,
1281    both for V and the client.  Is used for printing stats in
1282    out-of-memory messages. */
VG_(am_get_anonsize_total)1283 ULong VG_(am_get_anonsize_total)( void )
1284 {
1285    Int   i;
1286    ULong total = 0;
1287    for (i = 0; i < nsegments_used; i++) {
1288       if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
1289          total += (ULong)nsegments[i].end
1290                   - (ULong)nsegments[i].start + 1ULL;
1291       }
1292    }
1293    return total;
1294 }
1295 
1296 
1297 /* Test if a piece of memory is addressable by client or by valgrind with at
1298    least the "prot" protection permissions by examining the underlying
1299    segments.  If client && freeOk is True then SkFree areas are also allowed.
1300 */
1301 static
is_valid_for(Bool client,Addr start,SizeT len,UInt prot,Bool freeOk)1302 Bool is_valid_for( Bool client, Addr start, SizeT len, UInt prot, Bool freeOk )
1303 {
1304    Int  i, iLo, iHi;
1305    Bool needR, needW, needX;
1306 
1307    if (len == 0)
1308       return True; /* somewhat dubious case */
1309    if (start + len < start)
1310       return False; /* reject wraparounds */
1311 
1312    needR = toBool(prot & VKI_PROT_READ);
1313    needW = toBool(prot & VKI_PROT_WRITE);
1314    needX = toBool(prot & VKI_PROT_EXEC);
1315 
1316    iLo = find_nsegment_idx(start);
1317    aspacem_assert(start >= nsegments[iLo].start);
1318 
1319    if (start+len-1 <= nsegments[iLo].end) {
1320       /* This is a speedup hack which avoids calling find_nsegment_idx
1321          a second time when possible.  It is always correct to just
1322          use the "else" clause below, but is_valid_for_client is
1323          called a lot by the leak checker, so avoiding pointless calls
1324          to find_nsegment_idx, which can be expensive, is helpful. */
1325       iHi = iLo;
1326    } else {
1327       iHi = find_nsegment_idx(start + len - 1);
1328    }
1329 
1330    if (client) {
1331       for (i = iLo; i <= iHi; i++) {
1332          if ( (nsegments[i].kind == SkFileC
1333                || nsegments[i].kind == SkAnonC
1334                || nsegments[i].kind == SkShmC
1335                || (nsegments[i].kind == SkFree  && freeOk)
1336                || (nsegments[i].kind == SkResvn && freeOk))
1337               && (needR ? nsegments[i].hasR : True)
1338               && (needW ? nsegments[i].hasW : True)
1339               && (needX ? nsegments[i].hasX : True) ) {
1340             /* ok */
1341          } else {
1342             return False;
1343          }
1344       }
1345    } else {
1346       for (i = iLo; i <= iHi; i++) {
1347          if ( (nsegments[i].kind == SkFileV
1348                || nsegments[i].kind == SkAnonV)
1349               && (needR ? nsegments[i].hasR : True)
1350               && (needW ? nsegments[i].hasW : True)
1351               && (needX ? nsegments[i].hasX : True) ) {
1352             /* ok */
1353          } else {
1354             return False;
1355          }
1356       }
1357    }
1358    return True;
1359 }
1360 
1361 /* Test if a piece of memory is addressable by the client with at
1362    least the "prot" protection permissions by examining the underlying
1363    segments. */
VG_(am_is_valid_for_client)1364 Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
1365                                   UInt prot )
1366 {
1367    return is_valid_for(/* client */ True,
1368                        start, len, prot, False/*free not OK*/ );
1369 }
1370 
1371 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
1372    be consider part of the client's addressable space.  It also
1373    considers reservations to be allowable, since from the client's
1374    point of view they don't exist. */
VG_(am_is_valid_for_client_or_free_or_resvn)1375 Bool VG_(am_is_valid_for_client_or_free_or_resvn)
1376    ( Addr start, SizeT len, UInt prot )
1377 {
1378    return is_valid_for(/* client */ True,
1379                         start, len, prot, True/*free is OK*/ );
1380 }
1381 
1382 
VG_(am_is_valid_for_valgrind)1383 Bool VG_(am_is_valid_for_valgrind) ( Addr start, SizeT len, UInt prot )
1384 {
1385    return is_valid_for(/* client */ False,
1386                         start, len, prot, False/*irrelevant*/ );
1387 }
1388 
1389 
1390 /* Returns True if any part of the address range is marked as having
1391    translations made from it.  This is used to determine when to
1392    discard code, so if in doubt return True. */
1393 
any_Ts_in_range(Addr start,SizeT len)1394 static Bool any_Ts_in_range ( Addr start, SizeT len )
1395 {
1396    Int iLo, iHi, i;
1397    aspacem_assert(len > 0);
1398    aspacem_assert(start + len > start);
1399    iLo = find_nsegment_idx(start);
1400    iHi = find_nsegment_idx(start + len - 1);
1401    for (i = iLo; i <= iHi; i++) {
1402       if (nsegments[i].hasT)
1403          return True;
1404    }
1405    return False;
1406 }
1407 
1408 
1409 /*-----------------------------------------------------------------*/
1410 /*---                                                           ---*/
1411 /*--- Modifying the segment array, and constructing segments.   ---*/
1412 /*---                                                           ---*/
1413 /*-----------------------------------------------------------------*/
1414 
1415 /* Split the segment containing 'a' into two, so that 'a' is
1416    guaranteed to be the start of a new segment.  If 'a' is already the
1417    start of a segment, do nothing. */
1418 
split_nsegment_at(Addr a)1419 static void split_nsegment_at ( Addr a )
1420 {
1421    Int i, j;
1422 
1423    aspacem_assert(a > 0);
1424    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1425 
1426    i = find_nsegment_idx(a);
1427    aspacem_assert(i >= 0 && i < nsegments_used);
1428 
1429    if (nsegments[i].start == a)
1430       /* 'a' is already the start point of a segment, so nothing to be
1431          done. */
1432       return;
1433 
1434    /* else we have to slide the segments upwards to make a hole */
1435    if (nsegments_used >= VG_N_SEGMENTS)
1436       ML_(am_barf_toolow)("VG_N_SEGMENTS");
1437    for (j = nsegments_used-1; j > i; j--)
1438       nsegments[j+1] = nsegments[j];
1439    nsegments_used++;
1440 
1441    nsegments[i+1]       = nsegments[i];
1442    nsegments[i+1].start = a;
1443    nsegments[i].end     = a-1;
1444 
1445    if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
1446       nsegments[i+1].offset
1447          += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
1448 
1449    aspacem_assert(sane_NSegment(&nsegments[i]));
1450    aspacem_assert(sane_NSegment(&nsegments[i+1]));
1451 }
1452 
1453 
1454 /* Do the minimum amount of segment splitting necessary to ensure that
1455    sLo is the first address denoted by some segment and sHi is the
1456    highest address denoted by some other segment.  Returns the indices
1457    of the lowest and highest segments in the range. */
1458 
1459 static
split_nsegments_lo_and_hi(Addr sLo,Addr sHi,Int * iLo,Int * iHi)1460 void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
1461                                  /*OUT*/Int* iLo,
1462                                  /*OUT*/Int* iHi )
1463 {
1464    aspacem_assert(sLo < sHi);
1465    aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
1466    aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
1467 
1468    if (sLo > 0)
1469       split_nsegment_at(sLo);
1470    if (sHi < sHi+1)
1471       split_nsegment_at(sHi+1);
1472 
1473    *iLo = find_nsegment_idx(sLo);
1474    *iHi = find_nsegment_idx(sHi);
1475    aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
1476    aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
1477    aspacem_assert(*iLo <= *iHi);
1478    aspacem_assert(nsegments[*iLo].start == sLo);
1479    aspacem_assert(nsegments[*iHi].end == sHi);
1480    /* Not that I'm overly paranoid or anything, definitely not :-) */
1481 }
1482 
1483 
1484 /* Add SEG to the collection, deleting/truncating any it overlaps.
1485    This deals with all the tricky cases of splitting up segments as
1486    needed. */
1487 
add_segment(NSegment * seg)1488 static void add_segment ( NSegment* seg )
1489 {
1490    Int  i, iLo, iHi, delta;
1491    Bool segment_is_sane;
1492 
1493    Addr sStart = seg->start;
1494    Addr sEnd   = seg->end;
1495 
1496    aspacem_assert(sStart <= sEnd);
1497    aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
1498    aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
1499 
1500    segment_is_sane = sane_NSegment(seg);
1501    if (!segment_is_sane) show_nsegment_full(0,-1,seg);
1502    aspacem_assert(segment_is_sane);
1503 
1504    split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
1505 
1506    /* Now iLo .. iHi inclusive is the range of segment indices which
1507       seg will replace.  If we're replacing more than one segment,
1508       slide those above the range down to fill the hole. */
1509    delta = iHi - iLo;
1510    aspacem_assert(delta >= 0);
1511    if (delta > 0) {
1512       for (i = iLo; i < nsegments_used-delta; i++)
1513          nsegments[i] = nsegments[i+delta];
1514       nsegments_used -= delta;
1515    }
1516 
1517    nsegments[iLo] = *seg;
1518 
1519    (void)preen_nsegments();
1520    if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
1521 }
1522 
1523 
1524 /* Clear out an NSegment record. */
1525 
init_nsegment(NSegment * seg)1526 static void init_nsegment ( /*OUT*/NSegment* seg )
1527 {
1528    seg->kind     = SkFree;
1529    seg->start    = 0;
1530    seg->end      = 0;
1531    seg->smode    = SmFixed;
1532    seg->dev      = 0;
1533    seg->ino      = 0;
1534    seg->mode     = 0;
1535    seg->offset   = 0;
1536    seg->fnIdx    = -1;
1537    seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
1538    seg->mark = False;
1539 }
1540 
1541 /* Make an NSegment which holds a reservation. */
1542 
init_resvn(NSegment * seg,Addr start,Addr end)1543 static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
1544 {
1545    aspacem_assert(start < end);
1546    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
1547    aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
1548    init_nsegment(seg);
1549    seg->kind  = SkResvn;
1550    seg->start = start;
1551    seg->end   = end;
1552 }
1553 
1554 
1555 /*-----------------------------------------------------------------*/
1556 /*---                                                           ---*/
1557 /*--- Startup, including reading /proc/self/maps.               ---*/
1558 /*---                                                           ---*/
1559 /*-----------------------------------------------------------------*/
1560 
read_maps_callback(Addr addr,SizeT len,UInt prot,ULong dev,ULong ino,Off64T offset,const HChar * filename)1561 static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
1562                                  ULong dev, ULong ino, Off64T offset,
1563                                  const HChar* filename )
1564 {
1565    NSegment seg;
1566    init_nsegment( &seg );
1567    seg.start  = addr;
1568    seg.end    = addr+len-1;
1569    seg.dev    = dev;
1570    seg.ino    = ino;
1571    seg.offset = offset;
1572    seg.hasR   = toBool(prot & VKI_PROT_READ);
1573    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
1574    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
1575    seg.hasT   = False;
1576 
1577    /* Don't use the presence of a filename to decide if a segment in
1578       the initial /proc/self/maps to decide if the segment is an AnonV
1579       or FileV segment as some systems don't report the filename. Use
1580       the device and inode numbers instead. Fixes bug #124528. */
1581    seg.kind = SkAnonV;
1582    if (dev != 0 && ino != 0)
1583       seg.kind = SkFileV;
1584 
1585 #  if defined(VGO_darwin)
1586    // GrP fixme no dev/ino on darwin
1587    if (offset != 0)
1588       seg.kind = SkFileV;
1589 #  endif // defined(VGO_darwin)
1590 
1591 #  if defined(VGP_arm_linux)
1592    /* The standard handling of entries read from /proc/self/maps will
1593       cause the faked up commpage segment to have type SkAnonV, which
1594       is a problem because it contains code we want the client to
1595       execute, and so later m_translate will segfault the client when
1596       it tries to go in there.  Hence change the ownership of it here
1597       to the client (SkAnonC).  The least-worst kludge I could think
1598       of. */
1599    if (addr == ARM_LINUX_FAKE_COMMPAGE_START
1600        && addr + len == ARM_LINUX_FAKE_COMMPAGE_END1
1601        && seg.kind == SkAnonV)
1602       seg.kind = SkAnonC;
1603 #  endif // defined(VGP_arm_linux)
1604 
1605    if (filename)
1606       seg.fnIdx = allocate_segname( filename );
1607 
1608    if (0) show_nsegment( 2,0, &seg );
1609    add_segment( &seg );
1610 }
1611 
1612 /* Initialise the address space manager, setting up the initial
1613    segment list, and reading /proc/self/maps into it.  This must
1614    be called before any other function.
1615 
1616    Takes a pointer to the SP at the time V gained control.  This is
1617    taken to be the highest usable address (more or less).  Based on
1618    that (and general consultation of tea leaves, etc) return a
1619    suggested end address for the client's stack. */
1620 
VG_(am_startup)1621 Addr VG_(am_startup) ( Addr sp_at_startup )
1622 {
1623    NSegment seg;
1624    Addr     suggested_clstack_top;
1625 
1626    aspacem_assert(sizeof(Word)   == sizeof(void*));
1627    aspacem_assert(sizeof(Addr)   == sizeof(void*));
1628    aspacem_assert(sizeof(SizeT)  == sizeof(void*));
1629    aspacem_assert(sizeof(SSizeT) == sizeof(void*));
1630 
1631    /* Check that we can store the largest imaginable dev, ino and
1632       offset numbers in an NSegment. */
1633    aspacem_assert(sizeof(seg.dev)    == 8);
1634    aspacem_assert(sizeof(seg.ino)    == 8);
1635    aspacem_assert(sizeof(seg.offset) == 8);
1636    aspacem_assert(sizeof(seg.mode)   == 4);
1637 
1638    /* Add a single interval covering the entire address space. */
1639    init_nsegment(&seg);
1640    seg.kind        = SkFree;
1641    seg.start       = Addr_MIN;
1642    seg.end         = Addr_MAX;
1643    nsegments[0]    = seg;
1644    nsegments_used  = 1;
1645 
1646    aspacem_minAddr = VG_(clo_aspacem_minAddr);
1647 
1648 #if defined(VGO_darwin)
1649 
1650 # if VG_WORDSIZE == 4
1651    aspacem_maxAddr = (Addr) 0xffffffff;
1652 
1653    aspacem_cStart = aspacem_minAddr;
1654    aspacem_vStart = 0xf0000000;  // 0xc0000000..0xf0000000 available
1655 # else
1656    aspacem_maxAddr = (Addr) 0x7fffffffffff;
1657 
1658    aspacem_cStart = aspacem_minAddr;
1659    aspacem_vStart = 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
1660    // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
1661 # endif
1662 
1663    suggested_clstack_top = -1; // ignored; Mach-O specifies its stack
1664 
1665 #else /* !defined(VGO_darwin) */
1666 
1667    /* Establish address limits and block out unusable parts
1668       accordingly. */
1669 
1670    VG_(debugLog)(2, "aspacem",
1671                     "        sp_at_startup = 0x%010llx (supplied)\n",
1672                     (ULong)sp_at_startup );
1673 
1674 #  if VG_WORDSIZE == 8
1675      aspacem_maxAddr = (Addr)0x1000000000ULL - 1; // 64G
1676 #    ifdef ENABLE_INNER
1677      { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
1678        if (aspacem_maxAddr > cse)
1679           aspacem_maxAddr = cse;
1680      }
1681 #    endif
1682 #  else
1683      aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
1684 #  endif
1685 
1686    aspacem_cStart = aspacem_minAddr;
1687    aspacem_vStart = VG_PGROUNDUP(aspacem_minAddr
1688                                  + (aspacem_maxAddr - aspacem_minAddr + 1) / 2);
1689 #  ifdef ENABLE_INNER
1690    aspacem_vStart -= 0x10000000; // 256M
1691 #  endif
1692 
1693    suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
1694                                            + VKI_PAGE_SIZE;
1695 
1696 #endif /* #else of 'defined(VGO_darwin)' */
1697 
1698    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
1699    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
1700    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
1701    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
1702    aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
1703 
1704    VG_(debugLog)(2, "aspacem",
1705                     "              minAddr = 0x%010llx (computed)\n",
1706                     (ULong)aspacem_minAddr);
1707    VG_(debugLog)(2, "aspacem",
1708                     "              maxAddr = 0x%010llx (computed)\n",
1709                     (ULong)aspacem_maxAddr);
1710    VG_(debugLog)(2, "aspacem",
1711                     "               cStart = 0x%010llx (computed)\n",
1712                     (ULong)aspacem_cStart);
1713    VG_(debugLog)(2, "aspacem",
1714                     "               vStart = 0x%010llx (computed)\n",
1715                     (ULong)aspacem_vStart);
1716    VG_(debugLog)(2, "aspacem",
1717                     "suggested_clstack_top = 0x%010llx (computed)\n",
1718                     (ULong)suggested_clstack_top);
1719 
1720    if (aspacem_cStart > Addr_MIN) {
1721       init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
1722       add_segment(&seg);
1723    }
1724    if (aspacem_maxAddr < Addr_MAX) {
1725       init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
1726       add_segment(&seg);
1727    }
1728 
1729    /* Create a 1-page reservation at the notional initial
1730       client/valgrind boundary.  This isn't strictly necessary, but
1731       because the advisor does first-fit and starts searches for
1732       valgrind allocations at the boundary, this is kind of necessary
1733       in order to get it to start allocating in the right place. */
1734    init_resvn(&seg, aspacem_vStart,  aspacem_vStart + VKI_PAGE_SIZE - 1);
1735    add_segment(&seg);
1736 
1737    VG_(am_show_nsegments)(2, "Initial layout");
1738 
1739    VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
1740    parse_procselfmaps( read_maps_callback, NULL );
1741    /* NB: on arm-linux, parse_procselfmaps automagically kludges up
1742       (iow, hands to its callbacks) a description of the ARM Commpage,
1743       since that's not listed in /proc/self/maps (kernel bug IMO).  We
1744       have to fake up its existence in parse_procselfmaps and not
1745       merely add it here as an extra segment, because doing the latter
1746       causes sync checking to fail: we see we have an extra segment in
1747       the segments array, which isn't listed in /proc/self/maps.
1748       Hence we must make it appear that /proc/self/maps contained this
1749       segment all along.  Sigh. */
1750 
1751    VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
1752 
1753    AM_SANITY_CHECK;
1754    return suggested_clstack_top;
1755 }
1756 
1757 
1758 /*-----------------------------------------------------------------*/
1759 /*---                                                           ---*/
1760 /*--- The core query-notify mechanism.                          ---*/
1761 /*---                                                           ---*/
1762 /*-----------------------------------------------------------------*/
1763 
1764 /* Query aspacem to ask where a mapping should go. */
1765 
VG_(am_get_advisory)1766 Addr VG_(am_get_advisory) ( MapRequest*  req,
1767                             Bool         forClient,
1768                             /*OUT*/Bool* ok )
1769 {
1770    /* This function implements allocation policy.
1771 
1772       The nature of the allocation request is determined by req, which
1773       specifies the start and length of the request and indicates
1774       whether the start address is mandatory, a hint, or irrelevant,
1775       and by forClient, which says whether this is for the client or
1776       for V.
1777 
1778       Return values: the request can be vetoed (*ok is set to False),
1779       in which case the caller should not attempt to proceed with
1780       making the mapping.  Otherwise, *ok is set to True, the caller
1781       may proceed, and the preferred address at which the mapping
1782       should happen is returned.
1783 
1784       Note that this is an advisory system only: the kernel can in
1785       fact do whatever it likes as far as placement goes, and we have
1786       no absolute control over it.
1787 
1788       Allocations will never be granted in a reserved area.
1789 
1790       The Default Policy is:
1791 
1792         Search the address space for two free intervals: one of them
1793         big enough to contain the request without regard to the
1794         specified address (viz, as if it was a floating request) and
1795         the other being able to contain the request at the specified
1796         address (viz, as if were a fixed request).  Then, depending on
1797         the outcome of the search and the kind of request made, decide
1798         whether the request is allowable and what address to advise.
1799 
1800       The Default Policy is overriden by Policy Exception #1:
1801 
1802         If the request is for a fixed client map, we are prepared to
1803         grant it providing all areas inside the request are either
1804         free, reservations, or mappings belonging to the client.  In
1805         other words we are prepared to let the client trash its own
1806         mappings if it wants to.
1807 
1808       The Default Policy is overriden by Policy Exception #2:
1809 
1810         If the request is for a hinted client map, we are prepared to
1811         grant it providing all areas inside the request are either
1812         free or reservations.  In other words we are prepared to let
1813         the client have a hinted mapping anywhere it likes provided
1814         it does not trash either any of its own mappings or any of
1815         valgrind's mappings.
1816    */
1817    Int  i, j;
1818    Addr holeStart, holeEnd, holeLen;
1819    Bool fixed_not_required;
1820 
1821    Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
1822 
1823    Addr reqStart = req->rkind==MAny ? 0 : req->start;
1824    Addr reqEnd   = reqStart + req->len - 1;
1825    Addr reqLen   = req->len;
1826 
1827    /* These hold indices for segments found during search, or -1 if not
1828       found. */
1829    Int floatIdx = -1;
1830    Int fixedIdx = -1;
1831 
1832    aspacem_assert(nsegments_used > 0);
1833 
1834    if (0) {
1835       VG_(am_show_nsegments)(0,"getAdvisory");
1836       VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
1837                       (ULong)req->start, (ULong)req->len);
1838    }
1839 
1840    /* Reject zero-length requests */
1841    if (req->len == 0) {
1842       *ok = False;
1843       return 0;
1844    }
1845 
1846    /* Reject wraparounds */
1847    if ((req->rkind==MFixed || req->rkind==MHint)
1848        && req->start + req->len < req->start) {
1849       *ok = False;
1850       return 0;
1851    }
1852 
1853    /* ------ Implement Policy Exception #1 ------ */
1854 
1855    if (forClient && req->rkind == MFixed) {
1856       Int  iLo   = find_nsegment_idx(reqStart);
1857       Int  iHi   = find_nsegment_idx(reqEnd);
1858       Bool allow = True;
1859       for (i = iLo; i <= iHi; i++) {
1860          if (nsegments[i].kind == SkFree
1861              || nsegments[i].kind == SkFileC
1862              || nsegments[i].kind == SkAnonC
1863              || nsegments[i].kind == SkShmC
1864              || nsegments[i].kind == SkResvn) {
1865             /* ok */
1866          } else {
1867             allow = False;
1868             break;
1869          }
1870       }
1871       if (allow) {
1872          /* Acceptable.  Granted. */
1873          *ok = True;
1874          return reqStart;
1875       }
1876       /* Not acceptable.  Fail. */
1877       *ok = False;
1878       return 0;
1879    }
1880 
1881    /* ------ Implement Policy Exception #2 ------ */
1882 
1883    if (forClient && req->rkind == MHint) {
1884       Int  iLo   = find_nsegment_idx(reqStart);
1885       Int  iHi   = find_nsegment_idx(reqEnd);
1886       Bool allow = True;
1887       for (i = iLo; i <= iHi; i++) {
1888          if (nsegments[i].kind == SkFree
1889              || nsegments[i].kind == SkResvn) {
1890             /* ok */
1891          } else {
1892             allow = False;
1893             break;
1894          }
1895       }
1896       if (allow) {
1897          /* Acceptable.  Granted. */
1898          *ok = True;
1899          return reqStart;
1900       }
1901       /* Not acceptable.  Fall through to the default policy. */
1902    }
1903 
1904    /* ------ Implement the Default Policy ------ */
1905 
1906    /* Don't waste time looking for a fixed match if not requested to. */
1907    fixed_not_required = req->rkind == MAny;
1908 
1909    i = find_nsegment_idx(startPoint);
1910 
1911    /* Examine holes from index i back round to i-1.  Record the
1912       index first fixed hole and the first floating hole which would
1913       satisfy the request. */
1914    for (j = 0; j < nsegments_used; j++) {
1915 
1916       if (nsegments[i].kind != SkFree) {
1917          i++;
1918          if (i >= nsegments_used) i = 0;
1919          continue;
1920       }
1921 
1922       holeStart = nsegments[i].start;
1923       holeEnd   = nsegments[i].end;
1924 
1925       /* Stay sane .. */
1926       aspacem_assert(holeStart <= holeEnd);
1927       aspacem_assert(aspacem_minAddr <= holeStart);
1928       aspacem_assert(holeEnd <= aspacem_maxAddr);
1929 
1930       /* See if it's any use to us. */
1931       holeLen = holeEnd - holeStart + 1;
1932 
1933       if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
1934          fixedIdx = i;
1935 
1936       if (floatIdx == -1 && holeLen >= reqLen)
1937          floatIdx = i;
1938 
1939       /* Don't waste time searching once we've found what we wanted. */
1940       if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
1941          break;
1942 
1943       i++;
1944       if (i >= nsegments_used) i = 0;
1945    }
1946 
1947    aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
1948    if (fixedIdx >= 0)
1949       aspacem_assert(nsegments[fixedIdx].kind == SkFree);
1950 
1951    aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
1952    if (floatIdx >= 0)
1953       aspacem_assert(nsegments[floatIdx].kind == SkFree);
1954 
1955    AM_SANITY_CHECK;
1956 
1957    /* Now see if we found anything which can satisfy the request. */
1958    switch (req->rkind) {
1959       case MFixed:
1960          if (fixedIdx >= 0) {
1961             *ok = True;
1962             return req->start;
1963          } else {
1964             *ok = False;
1965             return 0;
1966          }
1967          break;
1968       case MHint:
1969          if (fixedIdx >= 0) {
1970             *ok = True;
1971             return req->start;
1972          }
1973          if (floatIdx >= 0) {
1974             *ok = True;
1975             return nsegments[floatIdx].start;
1976          }
1977          *ok = False;
1978          return 0;
1979       case MAny:
1980          if (floatIdx >= 0) {
1981             *ok = True;
1982             return nsegments[floatIdx].start;
1983          }
1984          *ok = False;
1985          return 0;
1986       default:
1987          break;
1988    }
1989 
1990    /*NOTREACHED*/
1991    ML_(am_barf)("getAdvisory: unknown request kind");
1992    *ok = False;
1993    return 0;
1994 }
1995 
1996 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
1997    fixed requests.  If start is zero, a floating request is issued; if
1998    nonzero, a fixed request at that address is issued.  Same comments
1999    about return values apply. */
2000 
VG_(am_get_advisory_client_simple)2001 Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
2002                                           /*OUT*/Bool* ok )
2003 {
2004    MapRequest mreq;
2005    mreq.rkind = start==0 ? MAny : MFixed;
2006    mreq.start = start;
2007    mreq.len   = len;
2008    return VG_(am_get_advisory)( &mreq, True/*forClient*/, ok );
2009 }
2010 
2011 /* Similar to VG_(am_find_nsegment) but only returns free segments. */
VG_(am_find_free_nsegment)2012 static NSegment const * VG_(am_find_free_nsegment) ( Addr a )
2013 {
2014    Int i = find_nsegment_idx(a);
2015    aspacem_assert(i >= 0 && i < nsegments_used);
2016    aspacem_assert(nsegments[i].start <= a);
2017    aspacem_assert(a <= nsegments[i].end);
2018    if (nsegments[i].kind == SkFree)
2019       return &nsegments[i];
2020    else
2021       return NULL;
2022 }
2023 
VG_(am_covered_by_single_free_segment)2024 Bool VG_(am_covered_by_single_free_segment)
2025    ( Addr start, SizeT len)
2026 {
2027    NSegment const* segLo = VG_(am_find_free_nsegment)( start );
2028    NSegment const* segHi = VG_(am_find_free_nsegment)( start + len - 1 );
2029 
2030    return segLo != NULL && segHi != NULL && segLo == segHi;
2031 }
2032 
2033 
2034 /* Notifies aspacem that the client completed an mmap successfully.
2035    The segment array is updated accordingly.  If the returned Bool is
2036    True, the caller should immediately discard translations from the
2037    specified address range. */
2038 
2039 Bool
VG_(am_notify_client_mmap)2040 VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
2041                             Int fd, Off64T offset )
2042 {
2043    HChar    buf[VKI_PATH_MAX];
2044    ULong    dev, ino;
2045    UInt     mode;
2046    NSegment seg;
2047    Bool     needDiscard;
2048 
2049    aspacem_assert(len > 0);
2050    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2051    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2052    aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2053 
2054    /* Discard is needed if any of the just-trashed range had T. */
2055    needDiscard = any_Ts_in_range( a, len );
2056 
2057    init_nsegment( &seg );
2058    seg.kind   = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
2059    seg.start  = a;
2060    seg.end    = a + len - 1;
2061    seg.hasR   = toBool(prot & VKI_PROT_READ);
2062    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2063    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2064    if (!(flags & VKI_MAP_ANONYMOUS)) {
2065       // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2066       seg.offset = offset;
2067       if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2068          seg.dev = dev;
2069          seg.ino = ino;
2070          seg.mode = mode;
2071       }
2072       if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2073          seg.fnIdx = allocate_segname( buf );
2074       }
2075    }
2076    add_segment( &seg );
2077    AM_SANITY_CHECK;
2078    return needDiscard;
2079 }
2080 
2081 /* Notifies aspacem that the client completed a shmat successfully.
2082    The segment array is updated accordingly.  If the returned Bool is
2083    True, the caller should immediately discard translations from the
2084    specified address range. */
2085 
2086 Bool
VG_(am_notify_client_shmat)2087 VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
2088 {
2089    NSegment seg;
2090    Bool     needDiscard;
2091 
2092    aspacem_assert(len > 0);
2093    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2094    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2095 
2096    /* Discard is needed if any of the just-trashed range had T. */
2097    needDiscard = any_Ts_in_range( a, len );
2098 
2099    init_nsegment( &seg );
2100    seg.kind   = SkShmC;
2101    seg.start  = a;
2102    seg.end    = a + len - 1;
2103    seg.offset = 0;
2104    seg.hasR   = toBool(prot & VKI_PROT_READ);
2105    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2106    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2107    add_segment( &seg );
2108    AM_SANITY_CHECK;
2109    return needDiscard;
2110 }
2111 
2112 /* Notifies aspacem that an mprotect was completed successfully.  The
2113    segment array is updated accordingly.  Note, as with
2114    VG_(am_notify_munmap), it is not the job of this function to reject
2115    stupid mprotects, for example the client doing mprotect of
2116    non-client areas.  Such requests should be intercepted earlier, by
2117    the syscall wrapper for mprotect.  This function merely records
2118    whatever it is told.  If the returned Bool is True, the caller
2119    should immediately discard translations from the specified address
2120    range. */
2121 
VG_(am_notify_mprotect)2122 Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
2123 {
2124    Int  i, iLo, iHi;
2125    Bool newR, newW, newX, needDiscard;
2126 
2127    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2128    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2129 
2130    if (len == 0)
2131       return False;
2132 
2133    newR = toBool(prot & VKI_PROT_READ);
2134    newW = toBool(prot & VKI_PROT_WRITE);
2135    newX = toBool(prot & VKI_PROT_EXEC);
2136 
2137    /* Discard is needed if we're dumping X permission */
2138    needDiscard = any_Ts_in_range( start, len ) && !newX;
2139 
2140    split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2141 
2142    iLo = find_nsegment_idx(start);
2143    iHi = find_nsegment_idx(start + len - 1);
2144 
2145    for (i = iLo; i <= iHi; i++) {
2146       /* Apply the permissions to all relevant segments. */
2147       switch (nsegments[i].kind) {
2148          case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
2149             nsegments[i].hasR = newR;
2150             nsegments[i].hasW = newW;
2151             nsegments[i].hasX = newX;
2152             aspacem_assert(sane_NSegment(&nsegments[i]));
2153             break;
2154          default:
2155             break;
2156       }
2157    }
2158 
2159    /* Changing permissions could have made previously un-mergable
2160       segments mergeable.  Therefore have to re-preen them. */
2161    (void)preen_nsegments();
2162    AM_SANITY_CHECK;
2163    return needDiscard;
2164 }
2165 
2166 
2167 /* Notifies aspacem that an munmap completed successfully.  The
2168    segment array is updated accordingly.  As with
2169    VG_(am_notify_munmap), we merely record the given info, and don't
2170    check it for sensibleness.  If the returned Bool is True, the
2171    caller should immediately discard translations from the specified
2172    address range. */
2173 
VG_(am_notify_munmap)2174 Bool VG_(am_notify_munmap)( Addr start, SizeT len )
2175 {
2176    NSegment seg;
2177    Bool     needDiscard;
2178    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2179    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2180 
2181    if (len == 0)
2182       return False;
2183 
2184    needDiscard = any_Ts_in_range( start, len );
2185 
2186    init_nsegment( &seg );
2187    seg.start = start;
2188    seg.end   = start + len - 1;
2189 
2190    /* The segment becomes unused (free).  Segments from above
2191       aspacem_maxAddr were originally SkResvn and so we make them so
2192       again.  Note, this isn't really right when the segment straddles
2193       the aspacem_maxAddr boundary - then really it should be split in
2194       two, the lower part marked as SkFree and the upper part as
2195       SkResvn.  Ah well. */
2196    if (start > aspacem_maxAddr
2197        && /* check previous comparison is meaningful */
2198           aspacem_maxAddr < Addr_MAX)
2199       seg.kind = SkResvn;
2200    else
2201    /* Ditto for segments from below aspacem_minAddr. */
2202    if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
2203       seg.kind = SkResvn;
2204    else
2205       seg.kind = SkFree;
2206 
2207    add_segment( &seg );
2208 
2209    /* Unmapping could create two adjacent free segments, so a preen is
2210       needed.  add_segment() will do that, so no need to here. */
2211    AM_SANITY_CHECK;
2212    return needDiscard;
2213 }
2214 
2215 
2216 /*-----------------------------------------------------------------*/
2217 /*---                                                           ---*/
2218 /*--- Handling mappings which do not arise directly from the    ---*/
2219 /*--- simulation of the client.                                 ---*/
2220 /*---                                                           ---*/
2221 /*-----------------------------------------------------------------*/
2222 
2223 /* --- --- --- map, unmap, protect  --- --- --- */
2224 
2225 /* Map a file at a fixed address for the client, and update the
2226    segment array accordingly. */
2227 
VG_(am_mmap_file_fixed_client)2228 SysRes VG_(am_mmap_file_fixed_client)
2229      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
2230 {
2231    return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
2232 }
2233 
VG_(am_mmap_named_file_fixed_client)2234 SysRes VG_(am_mmap_named_file_fixed_client)
2235      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
2236 {
2237    SysRes     sres;
2238    NSegment   seg;
2239    Addr       advised;
2240    Bool       ok;
2241    MapRequest req;
2242    ULong      dev, ino;
2243    UInt       mode;
2244    HChar      buf[VKI_PATH_MAX];
2245 
2246    /* Not allowable. */
2247    if (length == 0
2248        || !VG_IS_PAGE_ALIGNED(start)
2249        || !VG_IS_PAGE_ALIGNED(offset))
2250       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2251 
2252    /* Ask for an advisory.  If it's negative, fail immediately. */
2253    req.rkind = MFixed;
2254    req.start = start;
2255    req.len   = length;
2256    advised = VG_(am_get_advisory)( &req, True/*forClient*/, &ok );
2257    if (!ok || advised != start)
2258       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2259 
2260    /* We have been advised that the mapping is allowable at the
2261       specified address.  So hand it off to the kernel, and propagate
2262       any resulting failure immediately. */
2263    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2264    sres = VG_(am_do_mmap_NO_NOTIFY)(
2265              start, length, prot,
2266              VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2267              fd, offset
2268           );
2269    if (sr_isError(sres))
2270       return sres;
2271 
2272    if (sr_Res(sres) != start) {
2273       /* I don't think this can happen.  It means the kernel made a
2274          fixed map succeed but not at the requested location.  Try to
2275          repair the damage, then return saying the mapping failed. */
2276       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2277       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2278    }
2279 
2280    /* Ok, the mapping succeeded.  Now notify the interval map. */
2281    init_nsegment( &seg );
2282    seg.kind   = SkFileC;
2283    seg.start  = start;
2284    seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
2285    seg.offset = offset;
2286    seg.hasR   = toBool(prot & VKI_PROT_READ);
2287    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2288    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2289    if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2290       seg.dev = dev;
2291       seg.ino = ino;
2292       seg.mode = mode;
2293    }
2294    if (name) {
2295       seg.fnIdx = allocate_segname( name );
2296    } else if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2297       seg.fnIdx = allocate_segname( buf );
2298    }
2299    add_segment( &seg );
2300 
2301    AM_SANITY_CHECK;
2302    return sres;
2303 }
2304 
2305 
2306 /* Map anonymously at a fixed address for the client, and update
2307    the segment array accordingly. */
2308 
VG_(am_mmap_anon_fixed_client)2309 SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
2310 {
2311    SysRes     sres;
2312    NSegment   seg;
2313    Addr       advised;
2314    Bool       ok;
2315    MapRequest req;
2316 
2317    /* Not allowable. */
2318    if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
2319       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2320 
2321    /* Ask for an advisory.  If it's negative, fail immediately. */
2322    req.rkind = MFixed;
2323    req.start = start;
2324    req.len   = length;
2325    advised = VG_(am_get_advisory)( &req, True/*forClient*/, &ok );
2326    if (!ok || advised != start)
2327       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2328 
2329    /* We have been advised that the mapping is allowable at the
2330       specified address.  So hand it off to the kernel, and propagate
2331       any resulting failure immediately. */
2332    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2333    sres = VG_(am_do_mmap_NO_NOTIFY)(
2334              start, length, prot,
2335              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2336              0, 0
2337           );
2338    if (sr_isError(sres))
2339       return sres;
2340 
2341    if (sr_Res(sres) != start) {
2342       /* I don't think this can happen.  It means the kernel made a
2343          fixed map succeed but not at the requested location.  Try to
2344          repair the damage, then return saying the mapping failed. */
2345       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2346       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2347    }
2348 
2349    /* Ok, the mapping succeeded.  Now notify the interval map. */
2350    init_nsegment( &seg );
2351    seg.kind  = SkAnonC;
2352    seg.start = start;
2353    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
2354    seg.hasR  = toBool(prot & VKI_PROT_READ);
2355    seg.hasW  = toBool(prot & VKI_PROT_WRITE);
2356    seg.hasX  = toBool(prot & VKI_PROT_EXEC);
2357    add_segment( &seg );
2358 
2359    AM_SANITY_CHECK;
2360    return sres;
2361 }
2362 
2363 
2364 /* Map anonymously at an unconstrained address for the client, and
2365    update the segment array accordingly.  */
2366 
VG_(am_mmap_anon_float_client)2367 SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
2368 {
2369    SysRes     sres;
2370    NSegment   seg;
2371    Addr       advised;
2372    Bool       ok;
2373    MapRequest req;
2374 
2375    /* Not allowable. */
2376    if (length == 0)
2377       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2378 
2379    /* Ask for an advisory.  If it's negative, fail immediately. */
2380    req.rkind = MAny;
2381    req.start = 0;
2382    req.len   = length;
2383    advised = VG_(am_get_advisory)( &req, True/*forClient*/, &ok );
2384    if (!ok)
2385       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2386 
2387    /* We have been advised that the mapping is allowable at the
2388       advised address.  So hand it off to the kernel, and propagate
2389       any resulting failure immediately. */
2390    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2391    sres = VG_(am_do_mmap_NO_NOTIFY)(
2392              advised, length, prot,
2393              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2394              0, 0
2395           );
2396    if (sr_isError(sres))
2397       return sres;
2398 
2399    if (sr_Res(sres) != advised) {
2400       /* I don't think this can happen.  It means the kernel made a
2401          fixed map succeed but not at the requested location.  Try to
2402          repair the damage, then return saying the mapping failed. */
2403       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2404       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2405    }
2406 
2407    /* Ok, the mapping succeeded.  Now notify the interval map. */
2408    init_nsegment( &seg );
2409    seg.kind  = SkAnonC;
2410    seg.start = advised;
2411    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
2412    seg.hasR  = toBool(prot & VKI_PROT_READ);
2413    seg.hasW  = toBool(prot & VKI_PROT_WRITE);
2414    seg.hasX  = toBool(prot & VKI_PROT_EXEC);
2415    add_segment( &seg );
2416 
2417    AM_SANITY_CHECK;
2418    return sres;
2419 }
2420 
2421 
2422 /* Map anonymously at an unconstrained address for V, and update the
2423    segment array accordingly.  This is fundamentally how V allocates
2424    itself more address space when needed. */
2425 
VG_(am_mmap_anon_float_valgrind)2426 SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
2427 {
2428    SysRes     sres;
2429    NSegment   seg;
2430    Addr       advised;
2431    Bool       ok;
2432    MapRequest req;
2433 
2434    /* Not allowable. */
2435    if (length == 0)
2436       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2437 
2438    /* Ask for an advisory.  If it's negative, fail immediately. */
2439    req.rkind = MAny;
2440    req.start = 0;
2441    req.len   = length;
2442    advised = VG_(am_get_advisory)( &req, False/*forClient*/, &ok );
2443    if (!ok)
2444       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2445 
2446 // On Darwin, for anonymous maps you can pass in a tag which is used by
2447 // programs like vmmap for statistical purposes.
2448 #ifndef VM_TAG_VALGRIND
2449 #  define VM_TAG_VALGRIND 0
2450 #endif
2451 
2452    /* We have been advised that the mapping is allowable at the
2453       specified address.  So hand it off to the kernel, and propagate
2454       any resulting failure immediately. */
2455    /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
2456       another thread can pre-empt our spot.  [At one point on the DARWIN
2457       branch the VKI_MAP_FIXED was commented out;  unclear if this is
2458       necessary or not given the second Darwin-only call that immediately
2459       follows if this one fails.  --njn]
2460       Also, an inner valgrind cannot observe the mmap syscalls done by
2461       the outer valgrind. The outer Valgrind might make the mmap
2462       fail here, as the inner valgrind believes that a segment is free,
2463       while it is in fact used by the outer valgrind.
2464       So, for an inner valgrind, similarly to DARWIN, if the fixed mmap
2465       fails, retry the mmap without map fixed.
2466       This is a kludge which on linux is only activated for the inner.
2467       The state of the inner aspacemgr is not made correct by this kludge
2468       and so a.o. VG_(am_do_sync_check) could fail.
2469       A proper solution implies a better collaboration between the
2470       inner and the outer (e.g. inner VG_(am_get_advisory) should do
2471       a client request to call the outer VG_(am_get_advisory). */
2472    sres = VG_(am_do_mmap_NO_NOTIFY)(
2473              advised, length,
2474              VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2475              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2476              VM_TAG_VALGRIND, 0
2477           );
2478 #if defined(VGO_darwin) || defined(ENABLE_INNER)
2479    /* Kludge on Darwin and inner linux if the fixed mmap failed. */
2480    if (sr_isError(sres)) {
2481        /* try again, ignoring the advisory */
2482        sres = VG_(am_do_mmap_NO_NOTIFY)(
2483              0, length,
2484              VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2485              /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2486              VM_TAG_VALGRIND, 0
2487           );
2488    }
2489 #endif
2490    if (sr_isError(sres))
2491       return sres;
2492 
2493 #if defined(VGO_linux) && !defined(ENABLE_INNER)
2494    /* Doing the check only in linux not inner, as the below
2495       check can fail when the kludge above has been used. */
2496    if (sr_Res(sres) != advised) {
2497       /* I don't think this can happen.  It means the kernel made a
2498          fixed map succeed but not at the requested location.  Try to
2499          repair the damage, then return saying the mapping failed. */
2500       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2501       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2502    }
2503 #endif
2504 
2505    /* Ok, the mapping succeeded.  Now notify the interval map. */
2506    init_nsegment( &seg );
2507    seg.kind  = SkAnonV;
2508    seg.start = sr_Res(sres);
2509    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
2510    seg.hasR  = True;
2511    seg.hasW  = True;
2512    seg.hasX  = True;
2513    add_segment( &seg );
2514 
2515    AM_SANITY_CHECK;
2516    return sres;
2517 }
2518 
2519 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2520 
VG_(am_shadow_alloc)2521 void* VG_(am_shadow_alloc)(SizeT size)
2522 {
2523    SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
2524    return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
2525 }
2526 
2527 /* Map a file at an unconstrained address for V, and update the
2528    segment array accordingly. Use the provided flags */
2529 
VG_(am_mmap_file_float_valgrind_flags)2530 static SysRes VG_(am_mmap_file_float_valgrind_flags) ( SizeT length, UInt prot,
2531                                                        UInt flags,
2532                                                        Int fd, Off64T offset )
2533 {
2534    SysRes     sres;
2535    NSegment   seg;
2536    Addr       advised;
2537    Bool       ok;
2538    MapRequest req;
2539    ULong      dev, ino;
2540    UInt       mode;
2541    HChar      buf[VKI_PATH_MAX];
2542 
2543    /* Not allowable. */
2544    if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
2545       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2546 
2547    /* Ask for an advisory.  If it's negative, fail immediately. */
2548    req.rkind = MAny;
2549    req.start = 0;
2550    #if defined(VGA_arm) || defined(VGA_arm64) \
2551       || defined(VGA_mips32) || defined(VGA_mips64)
2552    aspacem_assert(VKI_SHMLBA >= VKI_PAGE_SIZE);
2553    #else
2554    aspacem_assert(VKI_SHMLBA == VKI_PAGE_SIZE);
2555    #endif
2556    if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & flags)) {
2557       /* arm-linux only. See ML_(generic_PRE_sys_shmat) and bug 290974 */
2558       req.len = length + VKI_SHMLBA - VKI_PAGE_SIZE;
2559    } else {
2560       req.len = length;
2561    }
2562    advised = VG_(am_get_advisory)( &req, False/*forClient*/, &ok );
2563    if (!ok)
2564       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2565    if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & flags))
2566       advised = VG_ROUNDUP(advised, VKI_SHMLBA);
2567 
2568    /* We have been advised that the mapping is allowable at the
2569       specified address.  So hand it off to the kernel, and propagate
2570       any resulting failure immediately. */
2571    sres = VG_(am_do_mmap_NO_NOTIFY)(
2572              advised, length, prot,
2573              flags,
2574              fd, offset
2575           );
2576    if (sr_isError(sres))
2577       return sres;
2578 
2579    if (sr_Res(sres) != advised) {
2580       /* I don't think this can happen.  It means the kernel made a
2581          fixed map succeed but not at the requested location.  Try to
2582          repair the damage, then return saying the mapping failed. */
2583       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2584       return VG_(mk_SysRes_Error)( VKI_EINVAL );
2585    }
2586 
2587    /* Ok, the mapping succeeded.  Now notify the interval map. */
2588    init_nsegment( &seg );
2589    seg.kind   = SkFileV;
2590    seg.start  = sr_Res(sres);
2591    seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
2592    seg.offset = offset;
2593    seg.hasR   = toBool(prot & VKI_PROT_READ);
2594    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
2595    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
2596    if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2597       seg.dev  = dev;
2598       seg.ino  = ino;
2599       seg.mode = mode;
2600    }
2601    if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2602       seg.fnIdx = allocate_segname( buf );
2603    }
2604    add_segment( &seg );
2605 
2606    AM_SANITY_CHECK;
2607    return sres;
2608 }
2609 /* Map privately a file at an unconstrained address for V, and update the
2610    segment array accordingly.  This is used by V for transiently
2611    mapping in object files to read their debug info.  */
2612 
VG_(am_mmap_file_float_valgrind)2613 SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
2614                                           Int fd, Off64T offset )
2615 {
2616    return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
2617                                                   VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2618                                                   fd, offset );
2619 }
2620 
VG_(am_shared_mmap_file_float_valgrind)2621 SysRes VG_(am_shared_mmap_file_float_valgrind)
2622    ( SizeT length, UInt prot, Int fd, Off64T offset )
2623 {
2624    return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
2625                                                   VKI_MAP_FIXED|VKI_MAP_SHARED,
2626                                                   fd, offset );
2627 }
2628 
2629 /* --- --- munmap helper --- --- */
2630 
2631 static
am_munmap_both_wrk(Bool * need_discard,Addr start,SizeT len,Bool forClient)2632 SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
2633                             Addr start, SizeT len, Bool forClient )
2634 {
2635    Bool   d;
2636    SysRes sres;
2637 
2638    if (!VG_IS_PAGE_ALIGNED(start))
2639       goto eINVAL;
2640 
2641    if (len == 0) {
2642       *need_discard = False;
2643       return VG_(mk_SysRes_Success)( 0 );
2644    }
2645 
2646    if (start + len < len)
2647       goto eINVAL;
2648 
2649    len = VG_PGROUNDUP(len);
2650    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2651    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2652 
2653    if (forClient) {
2654       if (!VG_(am_is_valid_for_client_or_free_or_resvn)
2655             ( start, len, VKI_PROT_NONE ))
2656          goto eINVAL;
2657    } else {
2658       if (!VG_(am_is_valid_for_valgrind)
2659             ( start, len, VKI_PROT_NONE ))
2660          goto eINVAL;
2661    }
2662 
2663    d = any_Ts_in_range( start, len );
2664 
2665    sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
2666    if (sr_isError(sres))
2667       return sres;
2668 
2669    VG_(am_notify_munmap)( start, len );
2670    AM_SANITY_CHECK;
2671    *need_discard = d;
2672    return sres;
2673 
2674   eINVAL:
2675    return VG_(mk_SysRes_Error)( VKI_EINVAL );
2676 }
2677 
2678 /* Unmap the given address range and update the segment array
2679    accordingly.  This fails if the range isn't valid for the client.
2680    If *need_discard is True after a successful return, the caller
2681    should immediately discard translations from the specified address
2682    range. */
2683 
VG_(am_munmap_client)2684 SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
2685                               Addr start, SizeT len )
2686 {
2687    return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
2688 }
2689 
2690 /* Unmap the given address range and update the segment array
2691    accordingly.  This fails if the range isn't valid for valgrind. */
2692 
VG_(am_munmap_valgrind)2693 SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
2694 {
2695    Bool need_discard;
2696    SysRes r = am_munmap_both_wrk( &need_discard,
2697                                   start, len, False/*valgrind*/ );
2698    /* If this assertion fails, it means we allowed translations to be
2699       made from a V-owned section.  Which shouldn't happen. */
2700    if (!sr_isError(r))
2701       aspacem_assert(!need_discard);
2702    return r;
2703 }
2704 
2705 /* Let (start,len) denote an area within a single Valgrind-owned
2706   segment (anon or file).  Change the ownership of [start, start+len)
2707   to the client instead.  Fails if (start,len) does not denote a
2708   suitable segment. */
2709 
VG_(am_change_ownership_v_to_c)2710 Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
2711 {
2712    Int i, iLo, iHi;
2713 
2714    if (len == 0)
2715       return True;
2716    if (start + len < start)
2717       return False;
2718    if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
2719       return False;
2720 
2721    i = find_nsegment_idx(start);
2722    if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
2723       return False;
2724    if (start+len-1 > nsegments[i].end)
2725       return False;
2726 
2727    aspacem_assert(start >= nsegments[i].start);
2728    aspacem_assert(start+len-1 <= nsegments[i].end);
2729 
2730    /* This scheme is like how mprotect works: split the to-be-changed
2731       range into its own segment(s), then mess with them (it).  There
2732       should be only one. */
2733    split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2734    aspacem_assert(iLo == iHi);
2735    switch (nsegments[iLo].kind) {
2736       case SkFileV: nsegments[iLo].kind = SkFileC; break;
2737       case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
2738       default: aspacem_assert(0); /* can't happen - guarded above */
2739    }
2740 
2741    preen_nsegments();
2742    return True;
2743 }
2744 
2745 /* 'seg' must be NULL or have been obtained from
2746    VG_(am_find_nsegment), and still valid.  If non-NULL, and if it
2747    denotes a SkAnonC (anonymous client mapping) area, set the .isCH
2748    (is-client-heap) flag for that area.  Otherwise do nothing.
2749    (Bizarre interface so that the same code works for both Linux and
2750    AIX and does not impose inefficiencies on the Linux version.) */
VG_(am_set_segment_isCH_if_SkAnonC)2751 void VG_(am_set_segment_isCH_if_SkAnonC)( const NSegment* seg )
2752 {
2753    Int i = segAddr_to_index( seg );
2754    aspacem_assert(i >= 0 && i < nsegments_used);
2755    if (nsegments[i].kind == SkAnonC) {
2756       nsegments[i].isCH = True;
2757    } else {
2758       aspacem_assert(nsegments[i].isCH == False);
2759    }
2760 }
2761 
2762 /* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
2763    segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
2764    segment. */
VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)2765 void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( const NSegment* seg )
2766 {
2767    Int i = segAddr_to_index( seg );
2768    aspacem_assert(i >= 0 && i < nsegments_used);
2769    if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
2770       nsegments[i].hasT = True;
2771    }
2772 }
2773 
2774 
2775 /* --- --- --- reservations --- --- --- */
2776 
2777 /* Create a reservation from START .. START+LENGTH-1, with the given
2778    ShrinkMode.  When checking whether the reservation can be created,
2779    also ensure that at least abs(EXTRA) extra free bytes will remain
2780    above (> 0) or below (< 0) the reservation.
2781 
2782    The reservation will only be created if it, plus the extra-zone,
2783    falls entirely within a single free segment.  The returned Bool
2784    indicates whether the creation succeeded. */
2785 
VG_(am_create_reservation)2786 Bool VG_(am_create_reservation) ( Addr start, SizeT length,
2787                                   ShrinkMode smode, SSizeT extra )
2788 {
2789    Int      startI, endI;
2790    NSegment seg;
2791 
2792    /* start and end, not taking into account the extra space. */
2793    Addr start1 = start;
2794    Addr end1   = start + length - 1;
2795 
2796    /* start and end, taking into account the extra space. */
2797    Addr start2 = start1;
2798    Addr end2   = end1;
2799 
2800    if (extra < 0) start2 += extra; // this moves it down :-)
2801    if (extra > 0) end2 += extra;
2802 
2803    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2804    aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
2805    aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
2806    aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
2807 
2808    startI = find_nsegment_idx( start2 );
2809    endI = find_nsegment_idx( end2 );
2810 
2811    /* If the start and end points don't fall within the same (free)
2812       segment, we're hosed.  This does rely on the assumption that all
2813       mergeable adjacent segments can be merged, but add_segment()
2814       should ensure that. */
2815    if (startI != endI)
2816       return False;
2817 
2818    if (nsegments[startI].kind != SkFree)
2819       return False;
2820 
2821    /* Looks good - make the reservation. */
2822    aspacem_assert(nsegments[startI].start <= start2);
2823    aspacem_assert(end2 <= nsegments[startI].end);
2824 
2825    init_nsegment( &seg );
2826    seg.kind  = SkResvn;
2827    seg.start = start1;  /* NB: extra space is not included in the
2828                            reservation. */
2829    seg.end   = end1;
2830    seg.smode = smode;
2831    add_segment( &seg );
2832 
2833    AM_SANITY_CHECK;
2834    return True;
2835 }
2836 
2837 
2838 /* Let SEG be an anonymous client mapping.  This fn extends the
2839    mapping by DELTA bytes, taking the space from a reservation section
2840    which must be adjacent.  If DELTA is positive, the segment is
2841    extended forwards in the address space, and the reservation must be
2842    the next one along.  If DELTA is negative, the segment is extended
2843    backwards in the address space and the reservation must be the
2844    previous one.  DELTA must be page aligned.  abs(DELTA) must not
2845    exceed the size of the reservation segment minus one page, that is,
2846    the reservation segment after the operation must be at least one
2847    page long. */
2848 
VG_(am_extend_into_adjacent_reservation_client)2849 Bool VG_(am_extend_into_adjacent_reservation_client) ( const NSegment* seg,
2850                                                        SSizeT    delta )
2851 {
2852    Int    segA, segR;
2853    UInt   prot;
2854    SysRes sres;
2855 
2856    /* Find the segment array index for SEG.  If the assertion fails it
2857       probably means you passed in a bogus SEG. */
2858    segA = segAddr_to_index( seg );
2859    aspacem_assert(segA >= 0 && segA < nsegments_used);
2860 
2861    if (nsegments[segA].kind != SkAnonC)
2862       return False;
2863 
2864    if (delta == 0)
2865       return True;
2866 
2867    prot =   (nsegments[segA].hasR ? VKI_PROT_READ : 0)
2868           | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
2869           | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
2870 
2871    aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
2872 
2873    if (delta > 0) {
2874 
2875       /* Extending the segment forwards. */
2876       segR = segA+1;
2877       if (segR >= nsegments_used
2878           || nsegments[segR].kind != SkResvn
2879           || nsegments[segR].smode != SmLower
2880           || nsegments[segR].start != nsegments[segA].end + 1
2881           || delta + VKI_PAGE_SIZE
2882                 > (nsegments[segR].end - nsegments[segR].start + 1))
2883         return False;
2884 
2885       /* Extend the kernel's mapping. */
2886       // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2887       sres = VG_(am_do_mmap_NO_NOTIFY)(
2888                 nsegments[segR].start, delta,
2889                 prot,
2890                 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2891                 0, 0
2892              );
2893       if (sr_isError(sres))
2894          return False; /* kernel bug if this happens? */
2895       if (sr_Res(sres) != nsegments[segR].start) {
2896          /* kernel bug if this happens? */
2897         (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2898         return False;
2899       }
2900 
2901       /* Ok, success with the kernel.  Update our structures. */
2902       nsegments[segR].start += delta;
2903       nsegments[segA].end += delta;
2904       aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2905 
2906    } else {
2907 
2908       /* Extending the segment backwards. */
2909       delta = -delta;
2910       aspacem_assert(delta > 0);
2911 
2912       segR = segA-1;
2913       if (segR < 0
2914           || nsegments[segR].kind != SkResvn
2915           || nsegments[segR].smode != SmUpper
2916           || nsegments[segR].end + 1 != nsegments[segA].start
2917           || delta + VKI_PAGE_SIZE
2918                 > (nsegments[segR].end - nsegments[segR].start + 1))
2919         return False;
2920 
2921       /* Extend the kernel's mapping. */
2922       // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2923       sres = VG_(am_do_mmap_NO_NOTIFY)(
2924                 nsegments[segA].start-delta, delta,
2925                 prot,
2926                 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2927                 0, 0
2928              );
2929       if (sr_isError(sres))
2930          return False; /* kernel bug if this happens? */
2931       if (sr_Res(sres) != nsegments[segA].start-delta) {
2932          /* kernel bug if this happens? */
2933         (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2934         return False;
2935       }
2936 
2937       /* Ok, success with the kernel.  Update our structures. */
2938       nsegments[segR].end -= delta;
2939       nsegments[segA].start -= delta;
2940       aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2941 
2942    }
2943 
2944    AM_SANITY_CHECK;
2945    return True;
2946 }
2947 
2948 
2949 /* --- --- --- resizing/move a mapping --- --- --- */
2950 
2951 #if HAVE_MREMAP
2952 
2953 /* Let SEG be a client mapping (anonymous or file).  This fn extends
2954    the mapping forwards only by DELTA bytes, and trashes whatever was
2955    in the new area.  Fails if SEG is not a single client mapping or if
2956    the new area is not accessible to the client.  Fails if DELTA is
2957    not page aligned.  *seg is invalid after a successful return.  If
2958    *need_discard is True after a successful return, the caller should
2959    immediately discard translations from the new area. */
2960 
VG_(am_extend_map_client)2961 Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
2962                                 const NSegment* seg, SizeT delta )
2963 {
2964    Addr     xStart;
2965    SysRes   sres;
2966    NSegment seg_copy = *seg;
2967    SizeT    seg_old_len = seg->end + 1 - seg->start;
2968 
2969    if (0)
2970       VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
2971 
2972    if (seg->kind != SkFileC && seg->kind != SkAnonC)
2973       return False;
2974 
2975    if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
2976       return False;
2977 
2978    xStart = seg->end+1;
2979    if (xStart + delta < delta)
2980       return False;
2981 
2982    if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
2983                                                       VKI_PROT_NONE ))
2984       return False;
2985 
2986    AM_SANITY_CHECK;
2987    sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
2988                                                seg_old_len,
2989                                                seg_old_len + delta );
2990    if (sr_isError(sres)) {
2991       AM_SANITY_CHECK;
2992       return False;
2993    } else {
2994       /* the area must not have moved */
2995       aspacem_assert(sr_Res(sres) == seg->start);
2996    }
2997 
2998    *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
2999 
3000    seg_copy.end += delta;
3001    add_segment( &seg_copy );
3002 
3003    if (0)
3004       VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
3005 
3006    AM_SANITY_CHECK;
3007    return True;
3008 }
3009 
3010 
3011 /* Remap the old address range to the new address range.  Fails if any
3012    parameter is not page aligned, if the either size is zero, if any
3013    wraparound is implied, if the old address range does not fall
3014    entirely within a single segment, if the new address range overlaps
3015    with the old one, or if the old address range is not a valid client
3016    mapping.  If *need_discard is True after a successful return, the
3017    caller should immediately discard translations from both specified
3018    address ranges.  */
3019 
VG_(am_relocate_nooverlap_client)3020 Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
3021                                         Addr old_addr, SizeT old_len,
3022                                         Addr new_addr, SizeT new_len )
3023 {
3024    Int      iLo, iHi;
3025    SysRes   sres;
3026    NSegment seg;
3027 
3028    if (old_len == 0 || new_len == 0)
3029       return False;
3030 
3031    if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
3032        || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
3033       return False;
3034 
3035    if (old_addr + old_len < old_addr
3036        || new_addr + new_len < new_addr)
3037       return False;
3038 
3039    if (old_addr + old_len - 1 < new_addr
3040        || new_addr + new_len - 1 < old_addr) {
3041       /* no overlap */
3042    } else
3043       return False;
3044 
3045    iLo = find_nsegment_idx( old_addr );
3046    iHi = find_nsegment_idx( old_addr + old_len - 1 );
3047    if (iLo != iHi)
3048       return False;
3049 
3050    if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
3051       return False;
3052 
3053    sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
3054              ( old_addr, old_len, new_addr, new_len );
3055    if (sr_isError(sres)) {
3056       AM_SANITY_CHECK;
3057       return False;
3058    } else {
3059       aspacem_assert(sr_Res(sres) == new_addr);
3060    }
3061 
3062    *need_discard = any_Ts_in_range( old_addr, old_len )
3063                    || any_Ts_in_range( new_addr, new_len );
3064 
3065    seg = nsegments[iLo];
3066 
3067    /* Mark the new area based on the old seg. */
3068    if (seg.kind == SkFileC) {
3069       seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
3070    } else {
3071       aspacem_assert(seg.kind == SkAnonC);
3072       aspacem_assert(seg.offset == 0);
3073    }
3074    seg.start = new_addr;
3075    seg.end   = new_addr + new_len - 1;
3076    add_segment( &seg );
3077 
3078    /* Create a free hole in the old location. */
3079    init_nsegment( &seg );
3080    seg.start = old_addr;
3081    seg.end   = old_addr + old_len - 1;
3082    /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3083       SkFree thing. */
3084    if (old_addr > aspacem_maxAddr
3085        && /* check previous comparison is meaningful */
3086           aspacem_maxAddr < Addr_MAX)
3087       seg.kind = SkResvn;
3088    else
3089       seg.kind = SkFree;
3090 
3091    add_segment( &seg );
3092 
3093    AM_SANITY_CHECK;
3094    return True;
3095 }
3096 
3097 #endif // HAVE_MREMAP
3098 
3099 
3100 #if defined(VGO_linux)
3101 
3102 /*-----------------------------------------------------------------*/
3103 /*---                                                           ---*/
3104 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3105 /*--- Almost completely independent of the stuff above.  The    ---*/
3106 /*--- only function it 'exports' to the code above this comment ---*/
3107 /*--- is parse_procselfmaps.                                    ---*/
3108 /*---                                                           ---*/
3109 /*-----------------------------------------------------------------*/
3110 
3111 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
3112 
3113 /* Size of a smallish table used to read /proc/self/map entries. */
3114 #define M_PROCMAP_BUF 100000
3115 
3116 /* static ... to keep it out of the stack frame. */
3117 static HChar procmap_buf[M_PROCMAP_BUF];
3118 
3119 /* Records length of /proc/self/maps read into procmap_buf. */
3120 static Int  buf_n_tot;
3121 
3122 /* Helper fns. */
3123 
hexdigit(HChar c)3124 static Int hexdigit ( HChar c )
3125 {
3126    if (c >= '0' && c <= '9') return (Int)(c - '0');
3127    if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
3128    if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
3129    return -1;
3130 }
3131 
decdigit(HChar c)3132 static Int decdigit ( HChar c )
3133 {
3134    if (c >= '0' && c <= '9') return (Int)(c - '0');
3135    return -1;
3136 }
3137 
readchar(const HChar * buf,HChar * ch)3138 static Int readchar ( const HChar* buf, HChar* ch )
3139 {
3140    if (*buf == 0) return 0;
3141    *ch = *buf;
3142    return 1;
3143 }
3144 
readhex(const HChar * buf,UWord * val)3145 static Int readhex ( const HChar* buf, UWord* val )
3146 {
3147    /* Read a word-sized hex number. */
3148    Int n = 0;
3149    *val = 0;
3150    while (hexdigit(*buf) >= 0) {
3151       *val = (*val << 4) + hexdigit(*buf);
3152       n++; buf++;
3153    }
3154    return n;
3155 }
3156 
readhex64(const HChar * buf,ULong * val)3157 static Int readhex64 ( const HChar* buf, ULong* val )
3158 {
3159    /* Read a potentially 64-bit hex number. */
3160    Int n = 0;
3161    *val = 0;
3162    while (hexdigit(*buf) >= 0) {
3163       *val = (*val << 4) + hexdigit(*buf);
3164       n++; buf++;
3165    }
3166    return n;
3167 }
3168 
readdec64(const HChar * buf,ULong * val)3169 static Int readdec64 ( const HChar* buf, ULong* val )
3170 {
3171    Int n = 0;
3172    *val = 0;
3173    while (decdigit(*buf) >= 0) {
3174       *val = (*val * 10) + decdigit(*buf);
3175       n++; buf++;
3176    }
3177    return n;
3178 }
3179 
3180 
3181 /* Get the contents of /proc/self/maps into a static buffer.  If
3182    there's a syntax error, it won't fit, or other failure, just
3183    abort. */
3184 
read_procselfmaps_into_buf(void)3185 static void read_procselfmaps_into_buf ( void )
3186 {
3187    Int    n_chunk;
3188    SysRes fd;
3189 
3190    /* Read the initial memory mapping from the /proc filesystem. */
3191    fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
3192    if (sr_isError(fd))
3193       ML_(am_barf)("can't open /proc/self/maps");
3194 
3195    buf_n_tot = 0;
3196    do {
3197       n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
3198                               M_PROCMAP_BUF - buf_n_tot );
3199       if (n_chunk >= 0)
3200          buf_n_tot += n_chunk;
3201    } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
3202 
3203    ML_(am_close)(sr_Res(fd));
3204 
3205    if (buf_n_tot >= M_PROCMAP_BUF-5)
3206       ML_(am_barf_toolow)("M_PROCMAP_BUF");
3207    if (buf_n_tot == 0)
3208       ML_(am_barf)("I/O error on /proc/self/maps");
3209 
3210    procmap_buf[buf_n_tot] = 0;
3211 }
3212 
3213 /* Parse /proc/self/maps.  For each map entry, call
3214    record_mapping, passing it, in this order:
3215 
3216       start address in memory
3217       length
3218       page protections (using the VKI_PROT_* flags)
3219       mapped file device and inode
3220       offset in file, or zero if no file
3221       filename, zero terminated, or NULL if no file
3222 
3223    So the sig of the called fn might be
3224 
3225       void (*record_mapping)( Addr start, SizeT size, UInt prot,
3226 			      UInt dev, UInt info,
3227                               ULong foffset, UChar* filename )
3228 
3229    Note that the supplied filename is transiently stored; record_mapping
3230    should make a copy if it wants to keep it.
3231 
3232    Nb: it is important that this function does not alter the contents of
3233        procmap_buf!
3234 */
parse_procselfmaps(void (* record_mapping)(Addr addr,SizeT len,UInt prot,ULong dev,ULong ino,Off64T offset,const HChar * filename),void (* record_gap)(Addr addr,SizeT len))3235 static void parse_procselfmaps (
3236       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3237                               ULong dev, ULong ino, Off64T offset,
3238                               const HChar* filename ),
3239       void (*record_gap)( Addr addr, SizeT len )
3240    )
3241 {
3242    Int    i, j, i_eol;
3243    Addr   start, endPlusOne, gapStart;
3244    HChar* filename;
3245    HChar  rr, ww, xx, pp, ch, tmp;
3246    UInt	  prot;
3247    UWord  maj, min;
3248    ULong  foffset, dev, ino;
3249 
3250    foffset = ino = 0; /* keep gcc-4.1.0 happy */
3251 
3252    read_procselfmaps_into_buf();
3253 
3254    aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
3255 
3256    if (0)
3257       VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
3258 
3259    /* Ok, it's safely aboard.  Parse the entries. */
3260    i = 0;
3261    gapStart = Addr_MIN;
3262    while (True) {
3263       if (i >= buf_n_tot) break;
3264 
3265       /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3266       j = readhex(&procmap_buf[i], &start);
3267       if (j > 0) i += j; else goto syntaxerror;
3268       j = readchar(&procmap_buf[i], &ch);
3269       if (j == 1 && ch == '-') i += j; else goto syntaxerror;
3270       j = readhex(&procmap_buf[i], &endPlusOne);
3271       if (j > 0) i += j; else goto syntaxerror;
3272 
3273       j = readchar(&procmap_buf[i], &ch);
3274       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3275 
3276       j = readchar(&procmap_buf[i], &rr);
3277       if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
3278       j = readchar(&procmap_buf[i], &ww);
3279       if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
3280       j = readchar(&procmap_buf[i], &xx);
3281       if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
3282       /* This field is the shared/private flag */
3283       j = readchar(&procmap_buf[i], &pp);
3284       if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
3285                                               i += j; else goto syntaxerror;
3286 
3287       j = readchar(&procmap_buf[i], &ch);
3288       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3289 
3290       j = readhex64(&procmap_buf[i], &foffset);
3291       if (j > 0) i += j; else goto syntaxerror;
3292 
3293       j = readchar(&procmap_buf[i], &ch);
3294       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3295 
3296       j = readhex(&procmap_buf[i], &maj);
3297       if (j > 0) i += j; else goto syntaxerror;
3298       j = readchar(&procmap_buf[i], &ch);
3299       if (j == 1 && ch == ':') i += j; else goto syntaxerror;
3300       j = readhex(&procmap_buf[i], &min);
3301       if (j > 0) i += j; else goto syntaxerror;
3302 
3303       j = readchar(&procmap_buf[i], &ch);
3304       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3305 
3306       j = readdec64(&procmap_buf[i], &ino);
3307       if (j > 0) i += j; else goto syntaxerror;
3308 
3309       goto read_line_ok;
3310 
3311     syntaxerror:
3312       VG_(debugLog)(0, "Valgrind:",
3313                        "FATAL: syntax error reading /proc/self/maps\n");
3314       { Int k, m;
3315         HChar buf50[51];
3316         m = 0;
3317         buf50[m] = 0;
3318         k = i - 50;
3319         if (k < 0) k = 0;
3320         for (; k <= i; k++) {
3321            buf50[m] = procmap_buf[k];
3322            buf50[m+1] = 0;
3323            if (m < 50-1) m++;
3324         }
3325         VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
3326       }
3327       ML_(am_exit)(1);
3328 
3329     read_line_ok:
3330 
3331       aspacem_assert(i < buf_n_tot);
3332 
3333       /* Try and find the name of the file mapped to this segment, if
3334          it exists.  Note that file names can contain spaces. */
3335 
3336       // Move i to the next non-space char, which should be either a '/',
3337       // a '[', or a newline.
3338       while (procmap_buf[i] == ' ') i++;
3339 
3340       // Move i_eol to the end of the line.
3341       i_eol = i;
3342       while (procmap_buf[i_eol] != '\n') i_eol++;
3343 
3344       // If there's a filename...
3345       if (procmap_buf[i] == '/') {
3346          /* Minor hack: put a '\0' at the filename end for the call to
3347             'record_mapping', then restore the old char with 'tmp'. */
3348          filename = &procmap_buf[i];
3349          tmp = filename[i_eol - i];
3350          filename[i_eol - i] = '\0';
3351       } else {
3352 	 tmp = 0;
3353          filename = NULL;
3354          foffset = 0;
3355       }
3356 
3357       prot = 0;
3358       if (rr == 'r') prot |= VKI_PROT_READ;
3359       if (ww == 'w') prot |= VKI_PROT_WRITE;
3360       if (xx == 'x') prot |= VKI_PROT_EXEC;
3361 
3362       /* Linux has two ways to encode a device number when it
3363          is exposed to user space (via fstat etc). The old way
3364          is the traditional unix scheme that produces a 16 bit
3365          device number with the top 8 being the major number and
3366          the bottom 8 the minor number.
3367 
3368          The new scheme allows for a 12 bit major number and
3369          a 20 bit minor number by using a 32 bit device number
3370          and putting the top 12 bits of the minor number into
3371          the top 12 bits of the device number thus leaving an
3372          extra 4 bits for the major number.
3373 
3374          If the minor and major number are both single byte
3375          values then both schemes give the same result so we
3376          use the new scheme here in case either number is
3377          outside the 0-255 range and then use fstat64 when
3378          available (or fstat on 64 bit systems) so that we
3379          should always have a new style device number and
3380          everything should match. */
3381       dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
3382 
3383       if (record_gap && gapStart < start)
3384          (*record_gap) ( gapStart, start-gapStart );
3385 
3386       if (record_mapping && start < endPlusOne)
3387          (*record_mapping) ( start, endPlusOne-start,
3388                              prot, dev, ino,
3389                              foffset, filename );
3390 
3391       if ('\0' != tmp) {
3392          filename[i_eol - i] = tmp;
3393       }
3394 
3395       i = i_eol + 1;
3396       gapStart = endPlusOne;
3397    }
3398 
3399 #  if defined(VGP_arm_linux)
3400    /* ARM puts code at the end of memory that contains processor
3401       specific stuff (cmpxchg, getting the thread local storage, etc.)
3402       This isn't specified in /proc/self/maps, so do it here.  This
3403       kludgery causes the view of memory, as presented to
3404       record_gap/record_mapping, to actually reflect reality.  IMO
3405       (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
3406       the commpage should be regarded as a bug in the kernel. */
3407    { const Addr commpage_start = ARM_LINUX_FAKE_COMMPAGE_START;
3408      const Addr commpage_end1  = ARM_LINUX_FAKE_COMMPAGE_END1;
3409      if (gapStart < commpage_start) {
3410         if (record_gap)
3411            (*record_gap)( gapStart, commpage_start - gapStart );
3412         if (record_mapping)
3413            (*record_mapping)( commpage_start, commpage_end1 - commpage_start,
3414                               VKI_PROT_READ|VKI_PROT_EXEC,
3415                               0/*dev*/, 0/*ino*/, 0/*foffset*/,
3416                               NULL);
3417         gapStart = commpage_end1;
3418      }
3419    }
3420 #  endif
3421 
3422    if (record_gap && gapStart < Addr_MAX)
3423       (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
3424 }
3425 
3426 /*------END-procmaps-parser-for-Linux----------------------------*/
3427 
3428 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
3429 
3430 #elif defined(VGO_darwin)
3431 #include <mach/mach.h>
3432 #include <mach/mach_vm.h>
3433 
mach2vki(unsigned int vm_prot)3434 static unsigned int mach2vki(unsigned int vm_prot)
3435 {
3436    return
3437       ((vm_prot & VM_PROT_READ)    ? VKI_PROT_READ    : 0) |
3438       ((vm_prot & VM_PROT_WRITE)   ? VKI_PROT_WRITE   : 0) |
3439       ((vm_prot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC    : 0) ;
3440 }
3441 
3442 static UInt stats_machcalls = 0;
3443 
parse_procselfmaps(void (* record_mapping)(Addr addr,SizeT len,UInt prot,ULong dev,ULong ino,Off64T offset,const HChar * filename),void (* record_gap)(Addr addr,SizeT len))3444 static void parse_procselfmaps (
3445       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3446                               ULong dev, ULong ino, Off64T offset,
3447                               const HChar* filename ),
3448       void (*record_gap)( Addr addr, SizeT len )
3449    )
3450 {
3451    vm_address_t iter;
3452    unsigned int depth;
3453    vm_address_t last;
3454 
3455    iter = 0;
3456    depth = 0;
3457    last = 0;
3458    while (1) {
3459       mach_vm_address_t addr = iter;
3460       mach_vm_size_t size;
3461       vm_region_submap_short_info_data_64_t info;
3462       kern_return_t kr;
3463 
3464       while (1) {
3465          mach_msg_type_number_t info_count
3466             = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
3467          stats_machcalls++;
3468          kr = mach_vm_region_recurse(mach_task_self(), &addr, &size, &depth,
3469                                      (vm_region_info_t)&info, &info_count);
3470          if (kr)
3471             return;
3472          if (info.is_submap) {
3473             depth++;
3474             continue;
3475          }
3476          break;
3477       }
3478       iter = addr + size;
3479 
3480       if (addr > last  &&  record_gap) {
3481          (*record_gap)(last, addr - last);
3482       }
3483       if (record_mapping) {
3484          (*record_mapping)(addr, size, mach2vki(info.protection),
3485                            0, 0, info.offset, NULL);
3486       }
3487       last = addr + size;
3488    }
3489 
3490    if ((Addr)-1 > last  &&  record_gap)
3491       (*record_gap)(last, (Addr)-1 - last);
3492 }
3493 
3494 // Urr.  So much for thread safety.
3495 static Bool        css_overflowed;
3496 static ChangedSeg* css_local;
3497 static Int         css_size_local;
3498 static Int         css_used_local;
3499 
Addr__max(Addr a,Addr b)3500 static Addr Addr__max ( Addr a, Addr b ) { return a > b ? a : b; }
Addr__min(Addr a,Addr b)3501 static Addr Addr__min ( Addr a, Addr b ) { return a < b ? a : b; }
3502 
add_mapping_callback(Addr addr,SizeT len,UInt prot,ULong dev,ULong ino,Off64T offset,const HChar * filename)3503 static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
3504                                  ULong dev, ULong ino, Off64T offset,
3505                                  const HChar *filename)
3506 {
3507    // derived from sync_check_mapping_callback()
3508 
3509    /* JRS 2012-Mar-07: this all seems very dubious to me.  It would be
3510       safer to see if we can find, in V's segment collection, one
3511       single segment that completely covers the range [addr, +len)
3512       (and possibly more), and that has the exact same other
3513       properties (prot, dev, ino, offset, etc) as the data presented
3514       here.  If found, we just skip.  Otherwise add the data presented
3515       here into css_local[]. */
3516 
3517    Int iLo, iHi, i;
3518 
3519    if (len == 0) return;
3520 
3521    /* The kernel should not give us wraparounds. */
3522    aspacem_assert(addr <= addr + len - 1);
3523 
3524    iLo = find_nsegment_idx( addr );
3525    iHi = find_nsegment_idx( addr + len - 1 );
3526 
3527    /* NSegments iLo .. iHi inclusive should agree with the presented
3528       data. */
3529    for (i = iLo; i <= iHi; i++) {
3530 
3531       UInt seg_prot;
3532 
3533       if (nsegments[i].kind == SkAnonV  ||  nsegments[i].kind == SkFileV) {
3534          /* Ignore V regions */
3535          continue;
3536       }
3537       else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
3538          /* Add mapping for SkResvn regions */
3539          ChangedSeg* cs = &css_local[css_used_local];
3540          if (css_used_local < css_size_local) {
3541             cs->is_added = True;
3542             cs->start    = addr;
3543             cs->end      = addr + len - 1;
3544             cs->prot     = prot;
3545             cs->offset   = offset;
3546             css_used_local++;
3547          } else {
3548             css_overflowed = True;
3549          }
3550          return;
3551 
3552       }
3553       else if (nsegments[i].kind == SkAnonC ||
3554                nsegments[i].kind == SkFileC ||
3555                nsegments[i].kind == SkShmC)
3556       {
3557          /* Check permissions on client regions */
3558          // GrP fixme
3559          seg_prot = 0;
3560          if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
3561          if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
3562 #        if defined(VGA_x86)
3563          // GrP fixme sloppyXcheck
3564          // darwin: kernel X ignored and spuriously changes? (vm_copy)
3565          seg_prot |= (prot & VKI_PROT_EXEC);
3566 #        else
3567          if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
3568 #        endif
3569          if (seg_prot != prot) {
3570              if (VG_(clo_trace_syscalls))
3571                  VG_(debugLog)(0,"aspacem","region %p..%p permission "
3572                                  "mismatch (kernel %x, V %x)\n",
3573                                  (void*)nsegments[i].start,
3574                                  (void*)(nsegments[i].end+1), prot, seg_prot);
3575             /* Add mapping for regions with protection changes */
3576             ChangedSeg* cs = &css_local[css_used_local];
3577             if (css_used_local < css_size_local) {
3578                cs->is_added = True;
3579                cs->start    = addr;
3580                cs->end      = addr + len - 1;
3581                cs->prot     = prot;
3582                cs->offset   = offset;
3583                css_used_local++;
3584             } else {
3585                css_overflowed = True;
3586             }
3587 	    return;
3588 
3589          }
3590 
3591       } else {
3592          aspacem_assert(0);
3593       }
3594    }
3595 }
3596 
remove_mapping_callback(Addr addr,SizeT len)3597 static void remove_mapping_callback(Addr addr, SizeT len)
3598 {
3599    // derived from sync_check_gap_callback()
3600 
3601    Int iLo, iHi, i;
3602 
3603    if (len == 0)
3604       return;
3605 
3606    /* The kernel should not give us wraparounds. */
3607    aspacem_assert(addr <= addr + len - 1);
3608 
3609    iLo = find_nsegment_idx( addr );
3610    iHi = find_nsegment_idx( addr + len - 1 );
3611 
3612    /* NSegments iLo .. iHi inclusive should agree with the presented data. */
3613    for (i = iLo; i <= iHi; i++) {
3614       if (nsegments[i].kind != SkFree && nsegments[i].kind != SkResvn) {
3615          /* V has a mapping, kernel doesn't.  Add to css_local[],
3616             directives to chop off the part of the V mapping that
3617             falls within the gap that the kernel tells us is
3618             present. */
3619          ChangedSeg* cs = &css_local[css_used_local];
3620          if (css_used_local < css_size_local) {
3621             cs->is_added = False;
3622             cs->start    = Addr__max(nsegments[i].start, addr);
3623             cs->end      = Addr__min(nsegments[i].end,   addr + len - 1);
3624             aspacem_assert(VG_IS_PAGE_ALIGNED(cs->start));
3625             aspacem_assert(VG_IS_PAGE_ALIGNED(cs->end+1));
3626             /* I don't think the following should fail.  But if it
3627                does, just omit the css_used_local++ in the cases where
3628                it doesn't hold. */
3629             aspacem_assert(cs->start < cs->end);
3630             cs->prot     = 0;
3631             cs->offset   = 0;
3632             css_used_local++;
3633          } else {
3634             css_overflowed = True;
3635          }
3636       }
3637    }
3638 }
3639 
3640 
3641 // Returns False if 'css' wasn't big enough.
VG_(get_changed_segments)3642 Bool VG_(get_changed_segments)(
3643       const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
3644       Int css_size, /*OUT*/Int* css_used)
3645 {
3646    static UInt stats_synccalls = 1;
3647    aspacem_assert(when && where);
3648 
3649    if (0)
3650       VG_(debugLog)(0,"aspacem",
3651          "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
3652          stats_synccalls++, stats_machcalls, when, where
3653       );
3654 
3655    css_overflowed = False;
3656    css_local = css;
3657    css_size_local = css_size;
3658    css_used_local = 0;
3659 
3660    // Get the list of segs that need to be added/removed.
3661    parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
3662 
3663    *css_used = css_used_local;
3664 
3665    if (css_overflowed) {
3666       aspacem_assert(css_used_local == css_size_local);
3667    }
3668 
3669    return !css_overflowed;
3670 }
3671 
3672 #endif // defined(VGO_darwin)
3673 
3674 /*------END-procmaps-parser-for-Darwin---------------------------*/
3675 
3676 #endif // defined(VGO_linux) || defined(VGO_darwin)
3677 
3678 /*--------------------------------------------------------------------*/
3679 /*--- end                                                          ---*/
3680 /*--------------------------------------------------------------------*/
3681