• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the section-based memory manager used by the MCJIT
11 // execution engine and RuntimeDyld
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ExecutionEngine/SectionMemoryManager.h"
16 #include "llvm/Config/config.h"
17 #include "llvm/Support/MathExtras.h"
18 #include "llvm/Support/Process.h"
19 
20 namespace llvm {
21 
allocateDataSection(uintptr_t Size,unsigned Alignment,unsigned SectionID,StringRef SectionName,bool IsReadOnly)22 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
23                                                    unsigned Alignment,
24                                                    unsigned SectionID,
25                                                    StringRef SectionName,
26                                                    bool IsReadOnly) {
27   if (IsReadOnly)
28     return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
29                            Size, Alignment);
30   return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
31                          Alignment);
32 }
33 
allocateCodeSection(uintptr_t Size,unsigned Alignment,unsigned SectionID,StringRef SectionName)34 uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
35                                                    unsigned Alignment,
36                                                    unsigned SectionID,
37                                                    StringRef SectionName) {
38   return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
39                          Alignment);
40 }
41 
allocateSection(SectionMemoryManager::AllocationPurpose Purpose,uintptr_t Size,unsigned Alignment)42 uint8_t *SectionMemoryManager::allocateSection(
43     SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
44     unsigned Alignment) {
45   if (!Alignment)
46     Alignment = 16;
47 
48   assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
49 
50   uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
51   uintptr_t Addr = 0;
52 
53   MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
54     switch (Purpose) {
55     case AllocationPurpose::Code:
56       return CodeMem;
57     case AllocationPurpose::ROData:
58       return RODataMem;
59     case AllocationPurpose::RWData:
60       return RWDataMem;
61     }
62     llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
63   }();
64 
65   // Look in the list of free memory regions and use a block there if one
66   // is available.
67   for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
68     if (FreeMB.Free.size() >= RequiredSize) {
69       Addr = (uintptr_t)FreeMB.Free.base();
70       uintptr_t EndOfBlock = Addr + FreeMB.Free.size();
71       // Align the address.
72       Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
73 
74       if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
75         // The part of the block we're giving out to the user is now pending
76         MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
77 
78         // Remember this pending block, such that future allocations can just
79         // modify it rather than creating a new one
80         FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
81       } else {
82         sys::MemoryBlock &PendingMB =
83             MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
84         PendingMB = sys::MemoryBlock(PendingMB.base(),
85                                      Addr + Size - (uintptr_t)PendingMB.base());
86       }
87 
88       // Remember how much free space is now left in this block
89       FreeMB.Free =
90           sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
91       return (uint8_t *)Addr;
92     }
93   }
94 
95   // No pre-allocated free block was large enough. Allocate a new memory region.
96   // Note that all sections get allocated as read-write.  The permissions will
97   // be updated later based on memory group.
98   //
99   // FIXME: It would be useful to define a default allocation size (or add
100   // it as a constructor parameter) to minimize the number of allocations.
101   //
102   // FIXME: Initialize the Near member for each memory group to avoid
103   // interleaving.
104   std::error_code ec;
105   sys::MemoryBlock MB = MMapper.allocateMappedMemory(
106       Purpose, RequiredSize, &MemGroup.Near,
107       sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
108   if (ec) {
109     // FIXME: Add error propagation to the interface.
110     return nullptr;
111   }
112 
113   // Save this address as the basis for our next request
114   MemGroup.Near = MB;
115 
116   // Remember that we allocated this memory
117   MemGroup.AllocatedMem.push_back(MB);
118   Addr = (uintptr_t)MB.base();
119   uintptr_t EndOfBlock = Addr + MB.size();
120 
121   // Align the address.
122   Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
123 
124   // The part of the block we're giving out to the user is now pending
125   MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
126 
127   // The allocateMappedMemory may allocate much more memory than we need. In
128   // this case, we store the unused memory as a free memory block.
129   unsigned FreeSize = EndOfBlock - Addr - Size;
130   if (FreeSize > 16) {
131     FreeMemBlock FreeMB;
132     FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
133     FreeMB.PendingPrefixIndex = (unsigned)-1;
134     MemGroup.FreeMem.push_back(FreeMB);
135   }
136 
137   // Return aligned address
138   return (uint8_t *)Addr;
139 }
140 
finalizeMemory(std::string * ErrMsg)141 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
142   // FIXME: Should in-progress permissions be reverted if an error occurs?
143   std::error_code ec;
144 
145   // Make code memory executable.
146   ec = applyMemoryGroupPermissions(CodeMem,
147                                    sys::Memory::MF_READ | sys::Memory::MF_EXEC);
148   if (ec) {
149     if (ErrMsg) {
150       *ErrMsg = ec.message();
151     }
152     return true;
153   }
154 
155   // Make read-only data memory read-only.
156   ec = applyMemoryGroupPermissions(RODataMem,
157                                    sys::Memory::MF_READ | sys::Memory::MF_EXEC);
158   if (ec) {
159     if (ErrMsg) {
160       *ErrMsg = ec.message();
161     }
162     return true;
163   }
164 
165   // Read-write data memory already has the correct permissions
166 
167   // Some platforms with separate data cache and instruction cache require
168   // explicit cache flush, otherwise JIT code manipulations (like resolved
169   // relocations) will get to the data cache but not to the instruction cache.
170   invalidateInstructionCache();
171 
172   return false;
173 }
174 
trimBlockToPageSize(sys::MemoryBlock M)175 static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
176   static const size_t PageSize = sys::Process::getPageSize();
177 
178   size_t StartOverlap =
179       (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
180 
181   size_t TrimmedSize = M.size();
182   TrimmedSize -= StartOverlap;
183   TrimmedSize -= TrimmedSize % PageSize;
184 
185   sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
186                            TrimmedSize);
187 
188   assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
189   assert((Trimmed.size() % PageSize) == 0);
190   assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size());
191 
192   return Trimmed;
193 }
194 
195 std::error_code
applyMemoryGroupPermissions(MemoryGroup & MemGroup,unsigned Permissions)196 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
197                                                   unsigned Permissions) {
198   for (sys::MemoryBlock &MB : MemGroup.PendingMem)
199     if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
200       return EC;
201 
202   MemGroup.PendingMem.clear();
203 
204   // Now go through free blocks and trim any of them that don't span the entire
205   // page because one of the pending blocks may have overlapped it.
206   for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
207     FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
208     // We cleared the PendingMem list, so all these pointers are now invalid
209     FreeMB.PendingPrefixIndex = (unsigned)-1;
210   }
211 
212   // Remove all blocks which are now empty
213   MemGroup.FreeMem.erase(
214       remove_if(MemGroup.FreeMem,
215                 [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }),
216       MemGroup.FreeMem.end());
217 
218   return std::error_code();
219 }
220 
invalidateInstructionCache()221 void SectionMemoryManager::invalidateInstructionCache() {
222   for (sys::MemoryBlock &Block : CodeMem.PendingMem)
223     sys::Memory::InvalidateInstructionCache(Block.base(), Block.size());
224 }
225 
~SectionMemoryManager()226 SectionMemoryManager::~SectionMemoryManager() {
227   for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
228     for (sys::MemoryBlock &Block : Group->AllocatedMem)
229       MMapper.releaseMappedMemory(Block);
230   }
231 }
232 
~MemoryMapper()233 SectionMemoryManager::MemoryMapper::~MemoryMapper() {}
234 
anchor()235 void SectionMemoryManager::anchor() {}
236 
237 namespace {
238 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
239 // into sys::Memory.
240 class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
241 public:
242   sys::MemoryBlock
allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,size_t NumBytes,const sys::MemoryBlock * const NearBlock,unsigned Flags,std::error_code & EC)243   allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
244                        size_t NumBytes, const sys::MemoryBlock *const NearBlock,
245                        unsigned Flags, std::error_code &EC) override {
246     return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
247   }
248 
protectMappedMemory(const sys::MemoryBlock & Block,unsigned Flags)249   std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
250                                       unsigned Flags) override {
251     return sys::Memory::protectMappedMemory(Block, Flags);
252   }
253 
releaseMappedMemory(sys::MemoryBlock & M)254   std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
255     return sys::Memory::releaseMappedMemory(M);
256   }
257 };
258 
259 DefaultMMapper DefaultMMapperInstance;
260 } // namespace
261 
SectionMemoryManager(MemoryMapper * MM)262 SectionMemoryManager::SectionMemoryManager(MemoryMapper *MM)
263     : MMapper(MM ? *MM : DefaultMMapperInstance) {}
264 
265 } // namespace llvm
266