1
2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: stuff common to all platforms ---*/
4 /*--- ---*/
5 /*--- m_aspacemgr-common.c ---*/
6 /*--------------------------------------------------------------------*/
7
8 /*
9 This file is part of Valgrind, a dynamic binary instrumentation
10 framework.
11
12 Copyright (C) 2006-2012 OpenWorks LLP
13 info@open-works.co.uk
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31 */
32
33 /* *************************************************************
34 DO NOT INCLUDE ANY OTHER FILES HERE.
35 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
36 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
37 ************************************************************* */
38
39 #include "priv_aspacemgr.h"
40 #include "config.h"
41
42
43 /*-----------------------------------------------------------------*/
44 /*--- ---*/
45 /*--- Stuff to make aspacem almost completely independent of ---*/
46 /*--- the rest of Valgrind. ---*/
47 /*--- ---*/
48 /*-----------------------------------------------------------------*/
49
50 //--------------------------------------------------------------
51 // Simple assert and assert-like fns, which avoid dependence on
52 // m_libcassert, and hence on the entire debug-info reader swamp
53
54 __attribute__ ((noreturn))
ML_(am_exit)55 void ML_(am_exit)( Int status )
56 {
57 # if defined(VGO_linux)
58 (void)VG_(do_syscall1)(__NR_exit_group, status);
59 # endif
60 (void)VG_(do_syscall1)(__NR_exit, status);
61 /* Why are we still alive here? */
62 /*NOTREACHED*/
63 *(volatile Int *)0 = 'x';
64 aspacem_assert(2+2 == 5);
65 }
66
ML_(am_barf)67 void ML_(am_barf) ( HChar* what )
68 {
69 VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
70 VG_(debugLog)(0, "aspacem", "Exiting now.\n");
71 ML_(am_exit)(1);
72 }
73
ML_(am_barf_toolow)74 void ML_(am_barf_toolow) ( HChar* what )
75 {
76 VG_(debugLog)(0, "aspacem",
77 "Valgrind: FATAL: %s is too low.\n", what);
78 VG_(debugLog)(0, "aspacem", " Increase it and rebuild. "
79 "Exiting now.\n");
80 ML_(am_exit)(1);
81 }
82
ML_(am_assert_fail)83 void ML_(am_assert_fail)( const HChar* expr,
84 const Char* file,
85 Int line,
86 const Char* fn )
87 {
88 VG_(debugLog)(0, "aspacem",
89 "Valgrind: FATAL: aspacem assertion failed:\n");
90 VG_(debugLog)(0, "aspacem", " %s\n", expr);
91 VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn);
92 VG_(debugLog)(0, "aspacem", "Exiting now.\n");
93 ML_(am_exit)(1);
94 }
95
ML_(am_getpid)96 Int ML_(am_getpid)( void )
97 {
98 SysRes sres = VG_(do_syscall0)(__NR_getpid);
99 aspacem_assert(!sr_isError(sres));
100 return sr_Res(sres);
101 }
102
103
104 //--------------------------------------------------------------
105 // A simple sprintf implementation, so as to avoid dependence on
106 // m_libcprint.
107
local_add_to_aspacem_sprintf_buf(HChar c,void * p)108 static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p )
109 {
110 HChar** aspacem_sprintf_ptr = p;
111 *(*aspacem_sprintf_ptr)++ = c;
112 }
113
114 static
local_vsprintf(HChar * buf,const HChar * format,va_list vargs)115 UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
116 {
117 Int ret;
118 Char *aspacem_sprintf_ptr = buf;
119
120 ret = VG_(debugLog_vprintf)
121 ( local_add_to_aspacem_sprintf_buf,
122 &aspacem_sprintf_ptr, format, vargs );
123 local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
124
125 return ret;
126 }
127
ML_(am_sprintf)128 UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... )
129 {
130 UInt ret;
131 va_list vargs;
132
133 va_start(vargs,format);
134 ret = local_vsprintf(buf, format, vargs);
135 va_end(vargs);
136
137 return ret;
138 }
139
140
141 //--------------------------------------------------------------
142 // Direct access to a handful of syscalls. This avoids dependence on
143 // m_libc*. THESE DO NOT UPDATE THE aspacem-internal DATA
144 // STRUCTURES (SEGMENT ARRAY). DO NOT USE THEM UNLESS YOU KNOW WHAT
145 // YOU ARE DOING.
146
147 /* --- Pertaining to mappings --- */
148
149 /* Note: this is VG_, not ML_. */
VG_(am_do_mmap_NO_NOTIFY)150 SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
151 UInt flags, Int fd, Off64T offset)
152 {
153 SysRes res;
154 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
155 # if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
156 || defined(VGP_arm_linux)
157 /* mmap2 uses 4096 chunks even if actual page size is bigger. */
158 aspacem_assert((offset % 4096) == 0);
159 res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
160 prot, flags, fd, offset / 4096);
161 # elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
162 || defined(VGP_s390x_linux) || defined(VGP_mips32_linux)
163 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
164 prot, flags, fd, offset);
165 # elif defined(VGP_x86_darwin)
166 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
167 fd = -1; // MAP_ANON with fd==0 is EINVAL
168 }
169 res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
170 prot, flags, fd, offset & 0xffffffff, offset >> 32);
171 # elif defined(VGP_amd64_darwin)
172 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
173 fd = -1; // MAP_ANON with fd==0 is EINVAL
174 }
175 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
176 prot, flags, (UInt)fd, offset);
177 # else
178 # error Unknown platform
179 # endif
180 return res;
181 }
182
183 static
local_do_mprotect_NO_NOTIFY(Addr start,SizeT length,UInt prot)184 SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
185 {
186 return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
187 }
188
ML_(am_do_munmap_NO_NOTIFY)189 SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length)
190 {
191 return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
192 }
193
194 #if HAVE_MREMAP
195 /* The following are used only to implement mremap(). */
196
ML_(am_do_extend_mapping_NO_NOTIFY)197 SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
198 Addr old_addr,
199 SizeT old_len,
200 SizeT new_len
201 )
202 {
203 /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
204 new_len, WITHOUT moving it. If it can't be extended in place,
205 fail. */
206 # if defined(VGO_linux)
207 return VG_(do_syscall5)(
208 __NR_mremap,
209 old_addr, old_len, new_len,
210 0/*flags, meaning: must be at old_addr, else FAIL */,
211 0/*new_addr, is ignored*/
212 );
213 # else
214 # error Unknown OS
215 # endif
216 }
217
ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)218 SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
219 Addr old_addr, Addr old_len,
220 Addr new_addr, Addr new_len
221 )
222 {
223 /* Move the mapping old_addr .. old_addr+old_len-1 to the new
224 location and with the new length. Only needs to handle the case
225 where the two areas do not overlap, neither length is zero, and
226 all args are page aligned. */
227 # if defined(VGO_linux)
228 return VG_(do_syscall5)(
229 __NR_mremap,
230 old_addr, old_len, new_len,
231 VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
232 new_addr
233 );
234 # else
235 # error Unknown OS
236 # endif
237 }
238
239 #endif
240
241 /* --- Pertaining to files --- */
242
ML_(am_open)243 SysRes ML_(am_open) ( const Char* pathname, Int flags, Int mode )
244 {
245 SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
246 return res;
247 }
248
ML_(am_read)249 Int ML_(am_read) ( Int fd, void* buf, Int count)
250 {
251 SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
252 return sr_isError(res) ? -1 : sr_Res(res);
253 }
254
ML_(am_close)255 void ML_(am_close) ( Int fd )
256 {
257 (void)VG_(do_syscall1)(__NR_close, fd);
258 }
259
ML_(am_readlink)260 Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz)
261 {
262 SysRes res;
263 res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
264 return sr_isError(res) ? -1 : sr_Res(res);
265 }
266
ML_(am_fcntl)267 Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
268 {
269 # if defined(VGO_linux)
270 SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
271 # elif defined(VGO_darwin)
272 SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
273 # else
274 # error "Unknown OS"
275 # endif
276 return sr_isError(res) ? -1 : sr_Res(res);
277 }
278
279 /* Get the dev, inode and mode info for a file descriptor, if
280 possible. Returns True on success. */
ML_(am_get_fd_d_i_m)281 Bool ML_(am_get_fd_d_i_m)( Int fd,
282 /*OUT*/ULong* dev,
283 /*OUT*/ULong* ino, /*OUT*/UInt* mode )
284 {
285 SysRes res;
286 struct vki_stat buf;
287 # if defined(VGO_linux) && defined(__NR_fstat64)
288 /* Try fstat64 first as it can cope with minor and major device
289 numbers outside the 0-255 range and it works properly for x86
290 binaries on amd64 systems where fstat seems to be broken. */
291 struct vki_stat64 buf64;
292 res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64);
293 if (!sr_isError(res)) {
294 *dev = (ULong)buf64.st_dev;
295 *ino = (ULong)buf64.st_ino;
296 *mode = (UInt) buf64.st_mode;
297 return True;
298 }
299 # endif
300 res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
301 if (!sr_isError(res)) {
302 *dev = (ULong)buf.st_dev;
303 *ino = (ULong)buf.st_ino;
304 *mode = (UInt) buf.st_mode;
305 return True;
306 }
307 return False;
308 }
309
ML_(am_resolve_filename)310 Bool ML_(am_resolve_filename) ( Int fd, /*OUT*/HChar* buf, Int nbuf )
311 {
312 #if defined(VGO_linux)
313 Int i;
314 HChar tmp[64];
315 for (i = 0; i < nbuf; i++) buf[i] = 0;
316 ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd);
317 if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
318 return True;
319 else
320 return False;
321
322 #elif defined(VGO_darwin)
323 HChar tmp[VKI_MAXPATHLEN+1];
324 if (0 == ML_(am_fcntl)(fd, VKI_F_GETPATH, (UWord)tmp)) {
325 if (nbuf > 0) {
326 VG_(strncpy)( buf, tmp, nbuf < sizeof(tmp) ? nbuf : sizeof(tmp) );
327 buf[nbuf-1] = 0;
328 }
329 if (tmp[0] == '/') return True;
330 }
331 return False;
332
333 # else
334 # error Unknown OS
335 # endif
336 }
337
338
339
340
341 /*-----------------------------------------------------------------*/
342 /*--- ---*/
343 /*--- Manage stacks for Valgrind itself. ---*/
344 /*--- ---*/
345 /*-----------------------------------------------------------------*/
346
347 /* Allocate and initialise a VgStack (anonymous valgrind space).
348 Protect the stack active area and the guard areas appropriately.
349 Returns NULL on failure, else the address of the bottom of the
350 stack. On success, also sets *initial_sp to what the stack pointer
351 should be set to. */
352
VG_(am_alloc_VgStack)353 VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
354 {
355 Int szB;
356 SysRes sres;
357 VgStack* stack;
358 UInt* p;
359 Int i;
360
361 /* Allocate the stack. */
362 szB = VG_STACK_GUARD_SZB
363 + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;
364
365 sres = VG_(am_mmap_anon_float_valgrind)( szB );
366 if (sr_isError(sres))
367 return NULL;
368
369 stack = (VgStack*)(AddrH)sr_Res(sres);
370
371 aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
372 aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
373
374 /* Protect the guard areas. */
375 sres = local_do_mprotect_NO_NOTIFY(
376 (Addr) &stack[0],
377 VG_STACK_GUARD_SZB, VKI_PROT_NONE
378 );
379 if (sr_isError(sres)) goto protect_failed;
380 VG_(am_notify_mprotect)(
381 (Addr) &stack->bytes[0],
382 VG_STACK_GUARD_SZB, VKI_PROT_NONE
383 );
384
385 sres = local_do_mprotect_NO_NOTIFY(
386 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
387 VG_STACK_GUARD_SZB, VKI_PROT_NONE
388 );
389 if (sr_isError(sres)) goto protect_failed;
390 VG_(am_notify_mprotect)(
391 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
392 VG_STACK_GUARD_SZB, VKI_PROT_NONE
393 );
394
395 /* Looks good. Fill the active area with junk so we can later
396 tell how much got used. */
397
398 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
399 for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
400 p[i] = 0xDEADBEEF;
401
402 *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
403 *initial_sp -= 8;
404 *initial_sp &= ~((Addr)0x1F); /* 32-align it */
405
406 VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
407 (ULong)(Addr)stack, szB);
408 ML_(am_do_sanity_check)();
409 return stack;
410
411 protect_failed:
412 /* The stack was allocated, but we can't protect it. Unmap it and
413 return NULL (failure). */
414 (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
415 ML_(am_do_sanity_check)();
416 return NULL;
417 }
418
419
420 /* Figure out how many bytes of the stack's active area have not
421 been used. Used for estimating if we are close to overflowing it. */
422
VG_(am_get_VgStack_unused_szB)423 SizeT VG_(am_get_VgStack_unused_szB)( VgStack* stack, SizeT limit )
424 {
425 SizeT i;
426 UInt* p;
427
428 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
429 for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++) {
430 if (p[i] != 0xDEADBEEF)
431 break;
432 if (i * sizeof(UInt) >= limit)
433 break;
434 }
435
436 return i * sizeof(UInt);
437 }
438
439
440 /*--------------------------------------------------------------------*/
441 /*--- end ---*/
442 /*--------------------------------------------------------------------*/
443