• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines some functions for various memory management utilities.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Unix.h"
15#include "llvm/Support/DataTypes.h"
16#include "llvm/Support/ErrorHandling.h"
17#include "llvm/Support/Process.h"
18
19#ifdef HAVE_SYS_MMAN_H
20#include <sys/mman.h>
21#endif
22
23#ifdef __APPLE__
24#include <mach/mach.h>
25#endif
26
27#if defined(__mips__)
28#  if defined(__OpenBSD__)
29#    include <mips64/sysarch.h>
30#  else
31#    include <sys/cachectl.h>
32#  endif
33#endif
34
35#ifdef __APPLE__
36extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
37#else
38extern "C" void __clear_cache(void *, void*);
39#endif
40
41namespace {
42
43int getPosixProtectionFlags(unsigned Flags) {
44  switch (Flags) {
45  case llvm::sys::Memory::MF_READ:
46    return PROT_READ;
47  case llvm::sys::Memory::MF_WRITE:
48    return PROT_WRITE;
49  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
50    return PROT_READ | PROT_WRITE;
51  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
52    return PROT_READ | PROT_EXEC;
53  case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE |
54      llvm::sys::Memory::MF_EXEC:
55    return PROT_READ | PROT_WRITE | PROT_EXEC;
56  case llvm::sys::Memory::MF_EXEC:
57#if defined(__FreeBSD__)
58    // On PowerPC, having an executable page that has no read permission
59    // can have unintended consequences.  The function InvalidateInstruction-
60    // Cache uses instructions dcbf and icbi, both of which are treated by
61    // the processor as loads.  If the page has no read permissions,
62    // executing these instructions will result in a segmentation fault.
63    // Somehow, this problem is not present on Linux, but it does happen
64    // on FreeBSD.
65    return PROT_READ | PROT_EXEC;
66#else
67    return PROT_EXEC;
68#endif
69  default:
70    llvm_unreachable("Illegal memory protection flag specified!");
71  }
72  // Provide a default return value as required by some compilers.
73  return PROT_NONE;
74}
75
76} // anonymous namespace
77
78namespace llvm {
79namespace sys {
80
81MemoryBlock
82Memory::allocateMappedMemory(size_t NumBytes,
83                             const MemoryBlock *const NearBlock,
84                             unsigned PFlags,
85                             std::error_code &EC) {
86  EC = std::error_code();
87  if (NumBytes == 0)
88    return MemoryBlock();
89
90  static const size_t PageSize = Process::getPageSize();
91  const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
92
93  int fd = -1;
94#ifdef NEED_DEV_ZERO_FOR_MMAP
95  static int zero_fd = open("/dev/zero", O_RDWR);
96  if (zero_fd == -1) {
97    EC = std::error_code(errno, std::generic_category());
98    return MemoryBlock();
99  }
100  fd = zero_fd;
101#endif
102
103  int MMFlags = MAP_PRIVATE |
104#ifdef HAVE_MMAP_ANONYMOUS
105  MAP_ANONYMOUS
106#else
107  MAP_ANON
108#endif
109  ; // Ends statement above
110
111  int Protect = getPosixProtectionFlags(PFlags);
112
113  // Use any near hint and the page size to set a page-aligned starting address
114  uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
115                                      NearBlock->size() : 0;
116  if (Start && Start % PageSize)
117    Start += PageSize - Start % PageSize;
118
119  void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
120                      Protect, MMFlags, fd, 0);
121  if (Addr == MAP_FAILED) {
122    if (NearBlock) //Try again without a near hint
123      return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
124
125    EC = std::error_code(errno, std::generic_category());
126    return MemoryBlock();
127  }
128
129  MemoryBlock Result;
130  Result.Address = Addr;
131  Result.Size = NumPages*PageSize;
132
133  if (PFlags & MF_EXEC)
134    Memory::InvalidateInstructionCache(Result.Address, Result.Size);
135
136  return Result;
137}
138
139std::error_code
140Memory::releaseMappedMemory(MemoryBlock &M) {
141  if (M.Address == nullptr || M.Size == 0)
142    return std::error_code();
143
144  if (0 != ::munmap(M.Address, M.Size))
145    return std::error_code(errno, std::generic_category());
146
147  M.Address = nullptr;
148  M.Size = 0;
149
150  return std::error_code();
151}
152
153std::error_code
154Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
155  static const size_t PageSize = Process::getPageSize();
156  if (M.Address == nullptr || M.Size == 0)
157    return std::error_code();
158
159  if (!Flags)
160    return std::error_code(EINVAL, std::generic_category());
161
162  int Protect = getPosixProtectionFlags(Flags);
163
164  int Result = ::mprotect((void*)((uintptr_t)M.Address & ~(PageSize-1)), PageSize*((M.Size+PageSize-1)/PageSize), Protect);
165  if (Result != 0)
166    return std::error_code(errno, std::generic_category());
167
168  if (Flags & MF_EXEC)
169    Memory::InvalidateInstructionCache(M.Address, M.Size);
170
171  return std::error_code();
172}
173
174/// AllocateRWX - Allocate a slab of memory with read/write/execute
175/// permissions.  This is typically used for JIT applications where we want
176/// to emit code to the memory then jump to it.  Getting this type of memory
177/// is very OS specific.
178///
179MemoryBlock
180Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
181                    std::string *ErrMsg) {
182  if (NumBytes == 0) return MemoryBlock();
183
184  static const size_t PageSize = Process::getPageSize();
185  size_t NumPages = (NumBytes+PageSize-1)/PageSize;
186
187  int fd = -1;
188#ifdef NEED_DEV_ZERO_FOR_MMAP
189  static int zero_fd = open("/dev/zero", O_RDWR);
190  if (zero_fd == -1) {
191    MakeErrMsg(ErrMsg, "Can't open /dev/zero device");
192    return MemoryBlock();
193  }
194  fd = zero_fd;
195#endif
196
197  int flags = MAP_PRIVATE |
198#ifdef HAVE_MMAP_ANONYMOUS
199  MAP_ANONYMOUS
200#else
201  MAP_ANON
202#endif
203  ;
204
205  void* start = NearBlock ? (unsigned char*)NearBlock->base() +
206                            NearBlock->size() : nullptr;
207
208#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
209  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
210                    flags, fd, 0);
211#else
212  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
213                    flags, fd, 0);
214#endif
215  if (pa == MAP_FAILED) {
216    if (NearBlock) //Try again without a near hint
217      return AllocateRWX(NumBytes, nullptr);
218
219    MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
220    return MemoryBlock();
221  }
222
223#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
224  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
225                                (vm_size_t)(PageSize*NumPages), 0,
226                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
227  if (KERN_SUCCESS != kr) {
228    MakeErrMsg(ErrMsg, "vm_protect max RX failed");
229    return MemoryBlock();
230  }
231
232  kr = vm_protect(mach_task_self(), (vm_address_t)pa,
233                  (vm_size_t)(PageSize*NumPages), 0,
234                  VM_PROT_READ | VM_PROT_WRITE);
235  if (KERN_SUCCESS != kr) {
236    MakeErrMsg(ErrMsg, "vm_protect RW failed");
237    return MemoryBlock();
238  }
239#endif
240
241  MemoryBlock result;
242  result.Address = pa;
243  result.Size = NumPages*PageSize;
244
245  return result;
246}
247
248bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
249  if (M.Address == nullptr || M.Size == 0) return false;
250  if (0 != ::munmap(M.Address, M.Size))
251    return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
252  return false;
253}
254
255bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
256#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
257  if (M.Address == 0 || M.Size == 0) return false;
258  Memory::InvalidateInstructionCache(M.Address, M.Size);
259  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
260    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
261  return KERN_SUCCESS == kr;
262#else
263  return true;
264#endif
265}
266
267bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
268  if (M.Address == nullptr || M.Size == 0) return false;
269  Memory::InvalidateInstructionCache(M.Address, M.Size);
270#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
271  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
272    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
273  return KERN_SUCCESS == kr;
274#else
275  return true;
276#endif
277}
278
279bool Memory::setRangeWritable(const void *Addr, size_t Size) {
280#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
281  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
282                                (vm_size_t)Size, 0,
283                                VM_PROT_READ | VM_PROT_WRITE);
284  return KERN_SUCCESS == kr;
285#else
286  return true;
287#endif
288}
289
290bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
291#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
292  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
293                                (vm_size_t)Size, 0,
294                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
295  return KERN_SUCCESS == kr;
296#else
297  return true;
298#endif
299}
300
301/// InvalidateInstructionCache - Before the JIT can run a block of code
302/// that has been emitted it must invalidate the instruction cache on some
303/// platforms.
304void Memory::InvalidateInstructionCache(const void *Addr,
305                                        size_t Len) {
306
307// icache invalidation for PPC and ARM.
308#if defined(__APPLE__)
309
310#  if (defined(__POWERPC__) || defined (__ppc__) || \
311       defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
312       defined(__arm64__))
313  sys_icache_invalidate(const_cast<void *>(Addr), Len);
314#  endif
315
316#else
317
318#  if (defined(__POWERPC__) || defined (__ppc__) || \
319       defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
320  const size_t LineSize = 32;
321
322  const intptr_t Mask = ~(LineSize - 1);
323  const intptr_t StartLine = ((intptr_t) Addr) & Mask;
324  const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
325
326  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
327    asm volatile("dcbf 0, %0" : : "r"(Line));
328  asm volatile("sync");
329
330  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
331    asm volatile("icbi 0, %0" : : "r"(Line));
332  asm volatile("isync");
333#  elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \
334        defined(__GNUC__)
335  // FIXME: Can we safely always call this for __GNUC__ everywhere?
336  const char *Start = static_cast<const char *>(Addr);
337  const char *End = Start + Len;
338  __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
339#  endif
340
341#endif  // end apple
342
343  ValgrindDiscardTranslations(Addr, Len);
344}
345
346} // namespace sys
347} // namespace llvm
348