• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines some functions for various memory management utilities.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Unix.h"
15#include "llvm/Support/DataTypes.h"
16#include "llvm/Support/Process.h"
17
18#ifdef HAVE_SYS_MMAN_H
19#include <sys/mman.h>
20#endif
21
22#ifdef __APPLE__
23#include <mach/mach.h>
24#endif
25
26/// AllocateRWX - Allocate a slab of memory with read/write/execute
27/// permissions.  This is typically used for JIT applications where we want
28/// to emit code to the memory then jump to it.  Getting this type of memory
29/// is very OS specific.
30///
31llvm::sys::MemoryBlock
32llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
33                               std::string *ErrMsg) {
34  if (NumBytes == 0) return MemoryBlock();
35
36  size_t pageSize = Process::GetPageSize();
37  size_t NumPages = (NumBytes+pageSize-1)/pageSize;
38
39  int fd = -1;
40#ifdef NEED_DEV_ZERO_FOR_MMAP
41  static int zero_fd = open("/dev/zero", O_RDWR);
42  if (zero_fd == -1) {
43    MakeErrMsg(ErrMsg, "Can't open /dev/zero device");
44    return MemoryBlock();
45  }
46  fd = zero_fd;
47#endif
48
49  int flags = MAP_PRIVATE |
50#ifdef HAVE_MMAP_ANONYMOUS
51  MAP_ANONYMOUS
52#else
53  MAP_ANON
54#endif
55  ;
56
57  void* start = NearBlock ? (unsigned char*)NearBlock->base() +
58                            NearBlock->size() : 0;
59
60#if defined(__APPLE__) && defined(__arm__)
61  void *pa = ::mmap(start, pageSize*NumPages, PROT_READ|PROT_EXEC,
62                    flags, fd, 0);
63#else
64  void *pa = ::mmap(start, pageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
65                    flags, fd, 0);
66#endif
67  if (pa == MAP_FAILED) {
68    if (NearBlock) //Try again without a near hint
69      return AllocateRWX(NumBytes, 0);
70
71    MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
72    return MemoryBlock();
73  }
74
75#if defined(__APPLE__) && defined(__arm__)
76  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
77                                (vm_size_t)(pageSize*NumPages), 0,
78                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
79  if (KERN_SUCCESS != kr) {
80    MakeErrMsg(ErrMsg, "vm_protect max RX failed");
81    return sys::MemoryBlock();
82  }
83
84  kr = vm_protect(mach_task_self(), (vm_address_t)pa,
85                  (vm_size_t)(pageSize*NumPages), 0,
86                  VM_PROT_READ | VM_PROT_WRITE);
87  if (KERN_SUCCESS != kr) {
88    MakeErrMsg(ErrMsg, "vm_protect RW failed");
89    return sys::MemoryBlock();
90  }
91#endif
92
93  MemoryBlock result;
94  result.Address = pa;
95  result.Size = NumPages*pageSize;
96
97  return result;
98}
99
100bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
101  if (M.Address == 0 || M.Size == 0) return false;
102  if (0 != ::munmap(M.Address, M.Size))
103    return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
104  return false;
105}
106
107bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
108#if defined(__APPLE__) && defined(__arm__)
109  if (M.Address == 0 || M.Size == 0) return false;
110  sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
111  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
112    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
113  return KERN_SUCCESS == kr;
114#else
115  return true;
116#endif
117}
118
119bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
120#if defined(__APPLE__) && defined(__arm__)
121  if (M.Address == 0 || M.Size == 0) return false;
122  sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
123  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
124    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
125  return KERN_SUCCESS == kr;
126#else
127  return true;
128#endif
129}
130
131bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
132#if defined(__APPLE__) && defined(__arm__)
133  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
134                                (vm_size_t)Size, 0,
135                                VM_PROT_READ | VM_PROT_WRITE);
136  return KERN_SUCCESS == kr;
137#else
138  return true;
139#endif
140}
141
142bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
143#if defined(__APPLE__) && defined(__arm__)
144  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
145                                (vm_size_t)Size, 0,
146                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
147  return KERN_SUCCESS == kr;
148#else
149  return true;
150#endif
151}
152