• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_posix.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements POSIX-specific functions from
11 // sanitizer_posix.h.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_platform.h"
15 
16 #if SANITIZER_POSIX
17 
18 #include "sanitizer_common.h"
19 #include "sanitizer_file.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
24 
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <signal.h>
28 #include <sys/mman.h>
29 
30 #if SANITIZER_FREEBSD
31 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
32 // that, it was never implemented.  So just define it to zero.
33 #undef  MAP_NORESERVE
34 #define MAP_NORESERVE 0
35 #endif
36 
37 namespace __sanitizer {
38 
39 // ------------- sanitizer_common.h
GetMmapGranularity()40 uptr GetMmapGranularity() {
41   return GetPageSize();
42 }
43 
MmapOrDie(uptr size,const char * mem_type,bool raw_report)44 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
45   size = RoundUpTo(size, GetPageSizeCached());
46   uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
47                        MAP_PRIVATE | MAP_ANON, mem_type);
48   int reserrno;
49   if (UNLIKELY(internal_iserror(res, &reserrno)))
50     ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
51   IncreaseTotalMmap(size);
52   return (void *)res;
53 }
54 
UnmapOrDie(void * addr,uptr size)55 void UnmapOrDie(void *addr, uptr size) {
56   if (!addr || !size) return;
57   uptr res = internal_munmap(addr, size);
58   if (UNLIKELY(internal_iserror(res))) {
59     Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
60            SanitizerToolName, size, size, addr);
61     CHECK("unable to unmap" && 0);
62   }
63   DecreaseTotalMmap(size);
64 }
65 
MmapOrDieOnFatalError(uptr size,const char * mem_type)66 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
67   size = RoundUpTo(size, GetPageSizeCached());
68   uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
69                        MAP_PRIVATE | MAP_ANON, mem_type);
70   int reserrno;
71   if (UNLIKELY(internal_iserror(res, &reserrno))) {
72     if (reserrno == ENOMEM)
73       return nullptr;
74     ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
75   }
76   IncreaseTotalMmap(size);
77   return (void *)res;
78 }
79 
80 // We want to map a chunk of address space aligned to 'alignment'.
81 // We do it by mapping a bit more and then unmapping redundant pieces.
82 // We probably can do it with fewer syscalls in some OS-dependent way.
MmapAlignedOrDieOnFatalError(uptr size,uptr alignment,const char * mem_type)83 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
84                                    const char *mem_type) {
85   CHECK(IsPowerOfTwo(size));
86   CHECK(IsPowerOfTwo(alignment));
87   uptr map_size = size + alignment;
88   uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
89   if (UNLIKELY(!map_res))
90     return nullptr;
91   uptr map_end = map_res + map_size;
92   uptr res = map_res;
93   if (!IsAligned(res, alignment)) {
94     res = (map_res + alignment - 1) & ~(alignment - 1);
95     UnmapOrDie((void*)map_res, res - map_res);
96   }
97   uptr end = res + size;
98   if (end != map_end)
99     UnmapOrDie((void*)end, map_end - end);
100   return (void*)res;
101 }
102 
MmapNoReserveOrDie(uptr size,const char * mem_type)103 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
104   size = RoundUpTo(size, GetPageSizeCached());
105   uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
106                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
107   int reserrno;
108   if (UNLIKELY(internal_iserror(p, &reserrno)))
109     ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
110   IncreaseTotalMmap(size);
111   return (void *)p;
112 }
113 
MmapFixedImpl(uptr fixed_addr,uptr size,bool tolerate_enomem,const char * name)114 static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
115                            const char *name) {
116   size = RoundUpTo(size, GetPageSizeCached());
117   fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
118   uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
119                      MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
120   int reserrno;
121   if (UNLIKELY(internal_iserror(p, &reserrno))) {
122     if (tolerate_enomem && reserrno == ENOMEM)
123       return nullptr;
124     char mem_type[40];
125     internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
126                       fixed_addr);
127     ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
128   }
129   IncreaseTotalMmap(size);
130   return (void *)p;
131 }
132 
MmapFixedOrDie(uptr fixed_addr,uptr size,const char * name)133 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
134   return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
135 }
136 
MmapFixedOrDieOnFatalError(uptr fixed_addr,uptr size,const char * name)137 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
138   return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
139 }
140 
MprotectNoAccess(uptr addr,uptr size)141 bool MprotectNoAccess(uptr addr, uptr size) {
142   return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
143 }
144 
MprotectReadOnly(uptr addr,uptr size)145 bool MprotectReadOnly(uptr addr, uptr size) {
146   return 0 == internal_mprotect((void *)addr, size, PROT_READ);
147 }
148 
149 #if !SANITIZER_MAC
MprotectMallocZones(void * addr,int prot)150 void MprotectMallocZones(void *addr, int prot) {}
151 #endif
152 
OpenFile(const char * filename,FileAccessMode mode,error_t * errno_p)153 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
154   if (ShouldMockFailureToOpen(filename))
155     return kInvalidFd;
156   int flags;
157   switch (mode) {
158     case RdOnly: flags = O_RDONLY; break;
159     case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
160     case RdWr: flags = O_RDWR | O_CREAT; break;
161   }
162   fd_t res = internal_open(filename, flags, 0660);
163   if (internal_iserror(res, errno_p))
164     return kInvalidFd;
165   return ReserveStandardFds(res);
166 }
167 
CloseFile(fd_t fd)168 void CloseFile(fd_t fd) {
169   internal_close(fd);
170 }
171 
ReadFromFile(fd_t fd,void * buff,uptr buff_size,uptr * bytes_read,error_t * error_p)172 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
173                   error_t *error_p) {
174   uptr res = internal_read(fd, buff, buff_size);
175   if (internal_iserror(res, error_p))
176     return false;
177   if (bytes_read)
178     *bytes_read = res;
179   return true;
180 }
181 
WriteToFile(fd_t fd,const void * buff,uptr buff_size,uptr * bytes_written,error_t * error_p)182 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
183                  error_t *error_p) {
184   uptr res = internal_write(fd, buff, buff_size);
185   if (internal_iserror(res, error_p))
186     return false;
187   if (bytes_written)
188     *bytes_written = res;
189   return true;
190 }
191 
MapFileToMemory(const char * file_name,uptr * buff_size)192 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
193   fd_t fd = OpenFile(file_name, RdOnly);
194   CHECK(fd != kInvalidFd);
195   uptr fsize = internal_filesize(fd);
196   CHECK_NE(fsize, (uptr)-1);
197   CHECK_GT(fsize, 0);
198   *buff_size = RoundUpTo(fsize, GetPageSizeCached());
199   uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
200   return internal_iserror(map) ? nullptr : (void *)map;
201 }
202 
MapWritableFileToMemory(void * addr,uptr size,fd_t fd,OFF_T offset)203 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
204   uptr flags = MAP_SHARED;
205   if (addr) flags |= MAP_FIXED;
206   uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
207   int mmap_errno = 0;
208   if (internal_iserror(p, &mmap_errno)) {
209     Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
210            fd, (long long)offset, size, p, mmap_errno);
211     return nullptr;
212   }
213   return (void *)p;
214 }
215 
IntervalsAreSeparate(uptr start1,uptr end1,uptr start2,uptr end2)216 static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
217                                         uptr start2, uptr end2) {
218   CHECK(start1 <= end1);
219   CHECK(start2 <= end2);
220   return (end1 < start2) || (end2 < start1);
221 }
222 
223 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
224 // When the shadow is mapped only a single thread usually exists (plus maybe
225 // several worker threads on Mac, which aren't expected to map big chunks of
226 // memory).
MemoryRangeIsAvailable(uptr range_start,uptr range_end)227 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
228   MemoryMappingLayout proc_maps(/*cache_enabled*/true);
229   if (proc_maps.Error())
230     return true; // and hope for the best
231   MemoryMappedSegment segment;
232   while (proc_maps.Next(&segment)) {
233     if (segment.start == segment.end) continue;  // Empty range.
234     CHECK_NE(0, segment.end);
235     if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
236                               range_end))
237       return false;
238   }
239   return true;
240 }
241 
242 #if !SANITIZER_MAC
DumpProcessMap()243 void DumpProcessMap() {
244   MemoryMappingLayout proc_maps(/*cache_enabled*/true);
245   const sptr kBufSize = 4095;
246   char *filename = (char*)MmapOrDie(kBufSize, __func__);
247   MemoryMappedSegment segment(filename, kBufSize);
248   Report("Process memory map follows:\n");
249   while (proc_maps.Next(&segment)) {
250     Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
251            segment.filename);
252   }
253   Report("End of process memory map.\n");
254   UnmapOrDie(filename, kBufSize);
255 }
256 #endif
257 
GetPwd()258 const char *GetPwd() {
259   return GetEnv("PWD");
260 }
261 
IsPathSeparator(const char c)262 bool IsPathSeparator(const char c) {
263   return c == '/';
264 }
265 
IsAbsolutePath(const char * path)266 bool IsAbsolutePath(const char *path) {
267   return path != nullptr && IsPathSeparator(path[0]);
268 }
269 
Write(const char * buffer,uptr length)270 void ReportFile::Write(const char *buffer, uptr length) {
271   SpinMutexLock l(mu);
272   ReopenIfNecessary();
273   internal_write(fd, buffer, length);
274 }
275 
GetCodeRangeForFile(const char * module,uptr * start,uptr * end)276 bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
277   MemoryMappingLayout proc_maps(/*cache_enabled*/false);
278   InternalScopedString buff(kMaxPathLength);
279   MemoryMappedSegment segment(buff.data(), kMaxPathLength);
280   while (proc_maps.Next(&segment)) {
281     if (segment.IsExecutable() &&
282         internal_strcmp(module, segment.filename) == 0) {
283       *start = segment.start;
284       *end = segment.end;
285       return true;
286     }
287   }
288   return false;
289 }
290 
GetAddress() const291 uptr SignalContext::GetAddress() const {
292   auto si = static_cast<const siginfo_t *>(siginfo);
293   return (uptr)si->si_addr;
294 }
295 
IsMemoryAccess() const296 bool SignalContext::IsMemoryAccess() const {
297   auto si = static_cast<const siginfo_t *>(siginfo);
298   return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;
299 }
300 
GetType() const301 int SignalContext::GetType() const {
302   return static_cast<const siginfo_t *>(siginfo)->si_signo;
303 }
304 
Describe() const305 const char *SignalContext::Describe() const {
306   switch (GetType()) {
307     case SIGFPE:
308       return "FPE";
309     case SIGILL:
310       return "ILL";
311     case SIGABRT:
312       return "ABRT";
313     case SIGSEGV:
314       return "SEGV";
315     case SIGBUS:
316       return "BUS";
317     case SIGTRAP:
318       return "TRAP";
319   }
320   return "UNKNOWN SIGNAL";
321 }
322 
ReserveStandardFds(fd_t fd)323 fd_t ReserveStandardFds(fd_t fd) {
324   CHECK_GE(fd, 0);
325   if (fd > 2)
326     return fd;
327   bool used[3];
328   internal_memset(used, 0, sizeof(used));
329   while (fd <= 2) {
330     used[fd] = true;
331     fd = internal_dup(fd);
332   }
333   for (int i = 0; i <= 2; ++i)
334     if (used[i])
335       internal_close(i);
336   return fd;
337 }
338 
ShouldMockFailureToOpen(const char * path)339 bool ShouldMockFailureToOpen(const char *path) {
340   return common_flags()->test_only_emulate_no_memorymap &&
341          internal_strncmp(path, "/proc/", 6) == 0;
342 }
343 
344 #if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
GetNamedMappingFd(const char * name,uptr size,int * flags)345 int GetNamedMappingFd(const char *name, uptr size, int *flags) {
346   if (!common_flags()->decorate_proc_maps || !name)
347     return -1;
348   char shmname[200];
349   CHECK(internal_strlen(name) < sizeof(shmname) - 10);
350   internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
351                     internal_getpid(), name);
352   int o_cloexec = 0;
353 #if defined(O_CLOEXEC)
354   o_cloexec = O_CLOEXEC;
355 #endif
356   int fd = ReserveStandardFds(
357       internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
358   CHECK_GE(fd, 0);
359   int res = internal_ftruncate(fd, size);
360 #if !defined(O_CLOEXEC)
361   res = fcntl(fd, F_SETFD, FD_CLOEXEC);
362   CHECK_EQ(0, res);
363 #endif
364   CHECK_EQ(0, res);
365   res = internal_unlink(shmname);
366   CHECK_EQ(0, res);
367   *flags &= ~(MAP_ANON | MAP_ANONYMOUS);
368   return fd;
369 }
370 #else
GetNamedMappingFd(const char * name,uptr size,int * flags)371 int GetNamedMappingFd(const char *name, uptr size, int *flags) {
372   return -1;
373 }
374 #endif
375 
376 #if SANITIZER_ANDROID
377 #define PR_SET_VMA 0x53564d41
378 #define PR_SET_VMA_ANON_NAME 0
DecorateMapping(uptr addr,uptr size,const char * name)379 void DecorateMapping(uptr addr, uptr size, const char *name) {
380   if (!common_flags()->decorate_proc_maps || !name)
381     return;
382   internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);
383 }
384 #else
DecorateMapping(uptr addr,uptr size,const char * name)385 void DecorateMapping(uptr addr, uptr size, const char *name) {
386 }
387 #endif
388 
MmapNamed(void * addr,uptr length,int prot,int flags,const char * name)389 uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {
390   int fd = GetNamedMappingFd(name, length, &flags);
391   uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
392   if (!internal_iserror(res))
393     DecorateMapping(res, length, name);
394   return res;
395 }
396 
397 
398 } // namespace __sanitizer
399 
400 #endif // SANITIZER_POSIX
401