1
2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: stuff common to all platforms ---*/
4 /*--- ---*/
5 /*--- m_aspacemgr-common.c ---*/
6 /*--------------------------------------------------------------------*/
7
8 /*
9 This file is part of Valgrind, a dynamic binary instrumentation
10 framework.
11
12 Copyright (C) 2006-2013 OpenWorks LLP
13 info@open-works.co.uk
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31 */
32
33 /* *************************************************************
34 DO NOT INCLUDE ANY OTHER FILES HERE.
35 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
36 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
37 ************************************************************* */
38
39 #include "priv_aspacemgr.h"
40 #include "config.h"
41
42
43 /*-----------------------------------------------------------------*/
44 /*--- ---*/
45 /*--- Stuff to make aspacem almost completely independent of ---*/
46 /*--- the rest of Valgrind. ---*/
47 /*--- ---*/
48 /*-----------------------------------------------------------------*/
49
50 //--------------------------------------------------------------
51 // Simple assert and assert-like fns, which avoid dependence on
52 // m_libcassert, and hence on the entire debug-info reader swamp
53
54 __attribute__ ((noreturn))
ML_(am_exit)55 void ML_(am_exit)( Int status )
56 {
57 # if defined(VGO_linux)
58 (void)VG_(do_syscall1)(__NR_exit_group, status);
59 # endif
60 (void)VG_(do_syscall1)(__NR_exit, status);
61 /* Why are we still alive here? */
62 /*NOTREACHED*/
63 *(volatile Int *)0 = 'x';
64 aspacem_assert(2+2 == 5);
65 }
66
ML_(am_barf)67 void ML_(am_barf) ( const HChar* what )
68 {
69 VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
70 VG_(debugLog)(0, "aspacem", "Exiting now.\n");
71 ML_(am_exit)(1);
72 }
73
ML_(am_barf_toolow)74 void ML_(am_barf_toolow) ( const HChar* what )
75 {
76 VG_(debugLog)(0, "aspacem",
77 "Valgrind: FATAL: %s is too low.\n", what);
78 VG_(debugLog)(0, "aspacem", " Increase it and rebuild. "
79 "Exiting now.\n");
80 ML_(am_exit)(1);
81 }
82
ML_(am_assert_fail)83 void ML_(am_assert_fail)( const HChar* expr,
84 const HChar* file,
85 Int line,
86 const HChar* fn )
87 {
88 VG_(debugLog)(0, "aspacem",
89 "Valgrind: FATAL: aspacem assertion failed:\n");
90 VG_(debugLog)(0, "aspacem", " %s\n", expr);
91 VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn);
92 VG_(debugLog)(0, "aspacem", "Exiting now.\n");
93 ML_(am_exit)(1);
94 }
95
ML_(am_getpid)96 Int ML_(am_getpid)( void )
97 {
98 SysRes sres = VG_(do_syscall0)(__NR_getpid);
99 aspacem_assert(!sr_isError(sres));
100 return sr_Res(sres);
101 }
102
103
104 //--------------------------------------------------------------
105 // A simple sprintf implementation, so as to avoid dependence on
106 // m_libcprint.
107
local_add_to_aspacem_sprintf_buf(HChar c,void * p)108 static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p )
109 {
110 HChar** aspacem_sprintf_ptr = p;
111 *(*aspacem_sprintf_ptr)++ = c;
112 }
113
114 static
local_vsprintf(HChar * buf,const HChar * format,va_list vargs)115 UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
116 {
117 Int ret;
118 HChar *aspacem_sprintf_ptr = buf;
119
120 ret = VG_(debugLog_vprintf)
121 ( local_add_to_aspacem_sprintf_buf,
122 &aspacem_sprintf_ptr, format, vargs );
123 local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
124
125 return ret;
126 }
127
ML_(am_sprintf)128 UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... )
129 {
130 UInt ret;
131 va_list vargs;
132
133 va_start(vargs,format);
134 ret = local_vsprintf(buf, format, vargs);
135 va_end(vargs);
136
137 return ret;
138 }
139
140
141 //--------------------------------------------------------------
142 // Direct access to a handful of syscalls. This avoids dependence on
143 // m_libc*. THESE DO NOT UPDATE THE aspacem-internal DATA
144 // STRUCTURES (SEGMENT ARRAY). DO NOT USE THEM UNLESS YOU KNOW WHAT
145 // YOU ARE DOING.
146
147 /* --- Pertaining to mappings --- */
148
149 /* Note: this is VG_, not ML_. */
VG_(am_do_mmap_NO_NOTIFY)150 SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
151 UInt flags, Int fd, Off64T offset)
152 {
153 SysRes res;
154 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
155
156 # if defined(VGP_arm64_linux)
157 res = VG_(do_syscall6)(__NR3264_mmap, (UWord)start, length,
158 prot, flags, fd, offset);
159 # elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
160 || defined(VGP_arm_linux)
161 /* mmap2 uses 4096 chunks even if actual page size is bigger. */
162 aspacem_assert((offset % 4096) == 0);
163 res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
164 prot, flags, fd, offset / 4096);
165 # elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
166 || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
167 || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
168 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
169 prot, flags, fd, offset);
170 # elif defined(VGP_x86_darwin)
171 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
172 fd = -1; // MAP_ANON with fd==0 is EINVAL
173 }
174 res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
175 prot, flags, fd, offset & 0xffffffff, offset >> 32);
176 # elif defined(VGP_amd64_darwin)
177 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
178 fd = -1; // MAP_ANON with fd==0 is EINVAL
179 }
180 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
181 prot, flags, (UInt)fd, offset);
182 # else
183 # error Unknown platform
184 # endif
185 return res;
186 }
187
188 static
local_do_mprotect_NO_NOTIFY(Addr start,SizeT length,UInt prot)189 SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
190 {
191 return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
192 }
193
ML_(am_do_munmap_NO_NOTIFY)194 SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length)
195 {
196 return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
197 }
198
199 #if HAVE_MREMAP
200 /* The following are used only to implement mremap(). */
201
ML_(am_do_extend_mapping_NO_NOTIFY)202 SysRes ML_(am_do_extend_mapping_NO_NOTIFY)(
203 Addr old_addr,
204 SizeT old_len,
205 SizeT new_len
206 )
207 {
208 /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
209 new_len, WITHOUT moving it. If it can't be extended in place,
210 fail. */
211 # if defined(VGO_linux)
212 return VG_(do_syscall5)(
213 __NR_mremap,
214 old_addr, old_len, new_len,
215 0/*flags, meaning: must be at old_addr, else FAIL */,
216 0/*new_addr, is ignored*/
217 );
218 # else
219 # error Unknown OS
220 # endif
221 }
222
ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)223 SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)(
224 Addr old_addr, Addr old_len,
225 Addr new_addr, Addr new_len
226 )
227 {
228 /* Move the mapping old_addr .. old_addr+old_len-1 to the new
229 location and with the new length. Only needs to handle the case
230 where the two areas do not overlap, neither length is zero, and
231 all args are page aligned. */
232 # if defined(VGO_linux)
233 return VG_(do_syscall5)(
234 __NR_mremap,
235 old_addr, old_len, new_len,
236 VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
237 new_addr
238 );
239 # else
240 # error Unknown OS
241 # endif
242 }
243
244 #endif
245
246 /* --- Pertaining to files --- */
247
ML_(am_open)248 SysRes ML_(am_open) ( const HChar* pathname, Int flags, Int mode )
249 {
250 # if defined(VGP_arm64_linux)
251 /* ARM64 wants to use __NR_openat rather than __NR_open. */
252 SysRes res = VG_(do_syscall4)(__NR_openat,
253 VKI_AT_FDCWD, (UWord)pathname, flags, mode);
254 # else
255 SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
256 # endif
257 return res;
258 }
259
ML_(am_read)260 Int ML_(am_read) ( Int fd, void* buf, Int count)
261 {
262 SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
263 return sr_isError(res) ? -1 : sr_Res(res);
264 }
265
ML_(am_close)266 void ML_(am_close) ( Int fd )
267 {
268 (void)VG_(do_syscall1)(__NR_close, fd);
269 }
270
ML_(am_readlink)271 Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz)
272 {
273 SysRes res;
274 # if defined(VGP_arm64_linux)
275 res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
276 (UWord)path, (UWord)buf, bufsiz);
277 # else
278 res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
279 # endif
280 return sr_isError(res) ? -1 : sr_Res(res);
281 }
282
ML_(am_fcntl)283 Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
284 {
285 # if defined(VGO_linux)
286 SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
287 # elif defined(VGO_darwin)
288 SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
289 # else
290 # error "Unknown OS"
291 # endif
292 return sr_isError(res) ? -1 : sr_Res(res);
293 }
294
295 /* Get the dev, inode and mode info for a file descriptor, if
296 possible. Returns True on success. */
ML_(am_get_fd_d_i_m)297 Bool ML_(am_get_fd_d_i_m)( Int fd,
298 /*OUT*/ULong* dev,
299 /*OUT*/ULong* ino, /*OUT*/UInt* mode )
300 {
301 SysRes res;
302 struct vki_stat buf;
303 # if defined(VGO_linux) && defined(__NR_fstat64)
304 /* Try fstat64 first as it can cope with minor and major device
305 numbers outside the 0-255 range and it works properly for x86
306 binaries on amd64 systems where fstat seems to be broken. */
307 struct vki_stat64 buf64;
308 res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64);
309 if (!sr_isError(res)) {
310 *dev = (ULong)buf64.st_dev;
311 *ino = (ULong)buf64.st_ino;
312 *mode = (UInt) buf64.st_mode;
313 return True;
314 }
315 # endif
316 res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
317 if (!sr_isError(res)) {
318 *dev = (ULong)buf.st_dev;
319 *ino = (ULong)buf.st_ino;
320 *mode = (UInt) buf.st_mode;
321 return True;
322 }
323 return False;
324 }
325
ML_(am_resolve_filename)326 Bool ML_(am_resolve_filename) ( Int fd, /*OUT*/HChar* buf, Int nbuf )
327 {
328 #if defined(VGO_linux)
329 Int i;
330 HChar tmp[64];
331 for (i = 0; i < nbuf; i++) buf[i] = 0;
332 ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd);
333 if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
334 return True;
335 else
336 return False;
337
338 #elif defined(VGO_darwin)
339 HChar tmp[VKI_MAXPATHLEN+1];
340 if (0 == ML_(am_fcntl)(fd, VKI_F_GETPATH, (UWord)tmp)) {
341 if (nbuf > 0) {
342 VG_(strncpy)( buf, tmp, nbuf < sizeof(tmp) ? nbuf : sizeof(tmp) );
343 buf[nbuf-1] = 0;
344 }
345 if (tmp[0] == '/') return True;
346 }
347 return False;
348
349 # else
350 # error Unknown OS
351 # endif
352 }
353
354
355
356
357 /*-----------------------------------------------------------------*/
358 /*--- ---*/
359 /*--- Manage stacks for Valgrind itself. ---*/
360 /*--- ---*/
361 /*-----------------------------------------------------------------*/
362
363 /* Allocate and initialise a VgStack (anonymous valgrind space).
364 Protect the stack active area and the guard areas appropriately.
365 Returns NULL on failure, else the address of the bottom of the
366 stack. On success, also sets *initial_sp to what the stack pointer
367 should be set to. */
368
VG_(am_alloc_VgStack)369 VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
370 {
371 Int szB;
372 SysRes sres;
373 VgStack* stack;
374 UInt* p;
375 Int i;
376
377 /* Allocate the stack. */
378 szB = VG_STACK_GUARD_SZB
379 + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;
380
381 sres = VG_(am_mmap_anon_float_valgrind)( szB );
382 if (sr_isError(sres))
383 return NULL;
384
385 stack = (VgStack*)(AddrH)sr_Res(sres);
386
387 aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
388 aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
389
390 /* Protect the guard areas. */
391 sres = local_do_mprotect_NO_NOTIFY(
392 (Addr) &stack[0],
393 VG_STACK_GUARD_SZB, VKI_PROT_NONE
394 );
395 if (sr_isError(sres)) goto protect_failed;
396 VG_(am_notify_mprotect)(
397 (Addr) &stack->bytes[0],
398 VG_STACK_GUARD_SZB, VKI_PROT_NONE
399 );
400
401 sres = local_do_mprotect_NO_NOTIFY(
402 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
403 VG_STACK_GUARD_SZB, VKI_PROT_NONE
404 );
405 if (sr_isError(sres)) goto protect_failed;
406 VG_(am_notify_mprotect)(
407 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
408 VG_STACK_GUARD_SZB, VKI_PROT_NONE
409 );
410
411 /* Looks good. Fill the active area with junk so we can later
412 tell how much got used. */
413
414 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
415 for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
416 p[i] = 0xDEADBEEF;
417
418 *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
419 *initial_sp -= 8;
420 *initial_sp &= ~((Addr)0x1F); /* 32-align it */
421
422 VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
423 (ULong)(Addr)stack, szB);
424 ML_(am_do_sanity_check)();
425 return stack;
426
427 protect_failed:
428 /* The stack was allocated, but we can't protect it. Unmap it and
429 return NULL (failure). */
430 (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB );
431 ML_(am_do_sanity_check)();
432 return NULL;
433 }
434
435
436 /* Figure out how many bytes of the stack's active area have not
437 been used. Used for estimating if we are close to overflowing it. */
438
VG_(am_get_VgStack_unused_szB)439 SizeT VG_(am_get_VgStack_unused_szB)( VgStack* stack, SizeT limit )
440 {
441 SizeT i;
442 UInt* p;
443
444 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
445 for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++) {
446 if (p[i] != 0xDEADBEEF)
447 break;
448 if (i * sizeof(UInt) >= limit)
449 break;
450 }
451
452 return i * sizeof(UInt);
453 }
454
455
456 /*--------------------------------------------------------------------*/
457 /*--- end ---*/
458 /*--------------------------------------------------------------------*/
459