1 // Copyright 2019 The Marl Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "marl/memory.h"
16
17 #include "marl/debug.h"
18 #include "marl/sanitizers.h"
19
20 #include <cstring>
21
22 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
23 #include <sys/mman.h>
24 #include <unistd.h>
25 namespace {
26 // This was a static in pageSize(), but due to the following TSAN false-positive
27 // bug, this has been moved out to a global.
28 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68338
29 const size_t kPageSize = sysconf(_SC_PAGESIZE);
pageSize()30 inline size_t pageSize() {
31 return kPageSize;
32 }
allocatePages(size_t count)33 inline void* allocatePages(size_t count) {
34 auto mapping = mmap(nullptr, count * pageSize(), PROT_READ | PROT_WRITE,
35 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
36 MARL_ASSERT(mapping != MAP_FAILED, "Failed to allocate %d pages", int(count));
37 if (mapping == MAP_FAILED) {
38 mapping = nullptr;
39 }
40 return mapping;
41 }
freePages(void * ptr,size_t count)42 inline void freePages(void* ptr, size_t count) {
43 auto res = munmap(ptr, count * pageSize());
44 (void)res;
45 MARL_ASSERT(res == 0, "Failed to free %d pages at %p", int(count), ptr);
46 }
protectPage(void * addr)47 inline void protectPage(void* addr) {
48 auto res = mprotect(addr, pageSize(), PROT_NONE);
49 (void)res;
50 MARL_ASSERT(res == 0, "Failed to protect page at %p", addr);
51 }
52 } // anonymous namespace
53 #elif defined(__Fuchsia__)
54 #include <unistd.h>
55 #include <zircon/process.h>
56 #include <zircon/syscalls.h>
57 namespace {
58 // This was a static in pageSize(), but due to the following TSAN false-positive
59 // bug, this has been moved out to a global.
60 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68338
61 const size_t kPageSize = sysconf(_SC_PAGESIZE);
pageSize()62 inline size_t pageSize() {
63 return kPageSize;
64 }
allocatePages(size_t count)65 inline void* allocatePages(size_t count) {
66 auto length = count * kPageSize;
67 zx_handle_t vmo;
68 if (zx_vmo_create(length, 0, &vmo) != ZX_OK) {
69 return nullptr;
70 }
71 zx_vaddr_t reservation;
72 zx_status_t status =
73 zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
74 vmo, 0, length, &reservation);
75 zx_handle_close(vmo);
76 (void)status;
77 MARL_ASSERT(status == ZX_OK, "Failed to allocate %d pages", int(count));
78 return reinterpret_cast<void*>(reservation);
79 }
freePages(void * ptr,size_t count)80 inline void freePages(void* ptr, size_t count) {
81 auto length = count * kPageSize;
82 zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(),
83 reinterpret_cast<zx_vaddr_t>(ptr), length);
84 (void)status;
85 MARL_ASSERT(status == ZX_OK, "Failed to free %d pages at %p", int(count),
86 ptr);
87 }
protectPage(void * addr)88 inline void protectPage(void* addr) {
89 zx_status_t status = zx_vmar_protect(
90 zx_vmar_root_self(), 0, reinterpret_cast<zx_vaddr_t>(addr), kPageSize);
91 (void)status;
92 MARL_ASSERT(status == ZX_OK, "Failed to protect page at %p", addr);
93 }
94 } // anonymous namespace
95 #elif defined(_WIN32)
96 #define WIN32_LEAN_AND_MEAN 1
97 #include <Windows.h>
98 namespace {
pageSize()99 inline size_t pageSize() {
100 static auto size = [] {
101 SYSTEM_INFO systemInfo = {};
102 GetSystemInfo(&systemInfo);
103 return systemInfo.dwPageSize;
104 }();
105 return size;
106 }
allocatePages(size_t count)107 inline void* allocatePages(size_t count) {
108 auto mapping = VirtualAlloc(nullptr, count * pageSize(),
109 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
110 MARL_ASSERT(mapping != nullptr, "Failed to allocate %d pages", int(count));
111 return mapping;
112 }
freePages(void * ptr,size_t count)113 inline void freePages(void* ptr, size_t count) {
114 (void)count;
115 auto res = VirtualFree(ptr, 0, MEM_RELEASE);
116 (void)res;
117 MARL_ASSERT(res != 0, "Failed to free %d pages at %p", int(count), ptr);
118 }
protectPage(void * addr)119 inline void protectPage(void* addr) {
120 DWORD oldVal = 0;
121 auto res = VirtualProtect(addr, pageSize(), PAGE_NOACCESS, &oldVal);
122 (void)res;
123 MARL_ASSERT(res != 0, "Failed to protect page at %p", addr);
124 }
125 } // anonymous namespace
126 #else
127 #error "Page based allocation not implemented for this platform"
128 #endif
129
130 namespace {
131
132 // pagedMalloc() allocates size bytes of uninitialized storage with the
133 // specified minimum byte alignment using OS specific page mapping calls.
134 // If guardLow is true then reads or writes to the page below the returned
135 // address will cause a page fault.
136 // If guardHigh is true then reads or writes to the page above the allocated
137 // block will cause a page fault.
138 // The pointer returned must be freed with pagedFree().
pagedMalloc(size_t alignment,size_t size,bool guardLow,bool guardHigh)139 void* pagedMalloc(size_t alignment,
140 size_t size,
141 bool guardLow,
142 bool guardHigh) {
143 (void)alignment;
144 MARL_ASSERT(alignment < pageSize(),
145 "alignment (0x%x) must be less than the page size (0x%x)",
146 int(alignment), int(pageSize()));
147 auto numRequestedPages = (size + pageSize() - 1) / pageSize();
148 auto numTotalPages =
149 numRequestedPages + (guardLow ? 1 : 0) + (guardHigh ? 1 : 0);
150 auto mem = reinterpret_cast<uint8_t*>(allocatePages(numTotalPages));
151 if (guardLow) {
152 protectPage(mem);
153 mem += pageSize();
154 }
155 if (guardHigh) {
156 protectPage(mem + numRequestedPages * pageSize());
157 }
158 return mem;
159 }
160
161 // pagedFree() frees the memory allocated with pagedMalloc().
pagedFree(void * ptr,size_t alignment,size_t size,bool guardLow,bool guardHigh)162 void pagedFree(void* ptr,
163 size_t alignment,
164 size_t size,
165 bool guardLow,
166 bool guardHigh) {
167 (void)alignment;
168 MARL_ASSERT(alignment < pageSize(),
169 "alignment (0x%x) must be less than the page size (0x%x)",
170 int(alignment), int(pageSize()));
171 auto numRequestedPages = (size + pageSize() - 1) / pageSize();
172 auto numTotalPages =
173 numRequestedPages + (guardLow ? 1 : 0) + (guardHigh ? 1 : 0);
174 if (guardLow) {
175 ptr = reinterpret_cast<uint8_t*>(ptr) - pageSize();
176 }
177 freePages(ptr, numTotalPages);
178 }
179
180 // alignedMalloc() allocates size bytes of uninitialized storage with the
181 // specified minimum byte alignment. The pointer returned must be freed with
182 // alignedFree().
alignedMalloc(size_t alignment,size_t size)183 inline void* alignedMalloc(size_t alignment, size_t size) {
184 size_t allocSize = size + alignment + sizeof(void*);
185 auto allocation = malloc(allocSize);
186 auto aligned = reinterpret_cast<uint8_t*>(marl::alignUp(
187 reinterpret_cast<uintptr_t>(allocation), alignment)); // align
188 memcpy(aligned + size, &allocation, sizeof(void*)); // pointer-to-allocation
189 return aligned;
190 }
191
192 // alignedFree() frees memory allocated by alignedMalloc.
alignedFree(void * ptr,size_t size)193 inline void alignedFree(void* ptr, size_t size) {
194 void* base;
195 memcpy(&base, reinterpret_cast<uint8_t*>(ptr) + size, sizeof(size_t));
196 free(base);
197 }
198
199 class DefaultAllocator : public marl::Allocator {
200 public:
201 static DefaultAllocator instance;
202
allocate(const marl::Allocation::Request & request)203 virtual marl::Allocation allocate(
204 const marl::Allocation::Request& request) override {
205 void* ptr = nullptr;
206
207 if (request.useGuards) {
208 ptr = ::pagedMalloc(request.alignment, request.size, true, true);
209 } else if (request.alignment > 1U) {
210 ptr = ::alignedMalloc(request.alignment, request.size);
211 } else {
212 ptr = ::malloc(request.size);
213 }
214
215 MARL_ASSERT(ptr != nullptr, "Allocation failed");
216 MARL_ASSERT(reinterpret_cast<uintptr_t>(ptr) % request.alignment == 0,
217 "Allocation gave incorrect alignment");
218
219 marl::Allocation allocation;
220 allocation.ptr = ptr;
221 allocation.request = request;
222 return allocation;
223 }
224
free(const marl::Allocation & allocation)225 virtual void free(const marl::Allocation& allocation) override {
226 if (allocation.request.useGuards) {
227 ::pagedFree(allocation.ptr, allocation.request.alignment,
228 allocation.request.size, true, true);
229 } else if (allocation.request.alignment > 1U) {
230 ::alignedFree(allocation.ptr, allocation.request.size);
231 } else {
232 ::free(allocation.ptr);
233 }
234 }
235 };
236
237 DefaultAllocator DefaultAllocator::instance;
238
239 } // anonymous namespace
240
241 namespace marl {
242
243 Allocator* Allocator::Default = &DefaultAllocator::instance;
244
pageSize()245 size_t pageSize() {
246 return ::pageSize();
247 }
248
249 } // namespace marl
250