1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "os/mem.h"
17 #include "macros.h"
18 #include "utils/type_helpers.h"
19 #include "utils/asan_interface.h"
20 #include "utils/logger.h"
21
22 #include <limits>
23
24 #include <windows.h>
25 #include <cerrno>
26 #include <io.h>
27
28 #include <sysinfoapi.h>
29 #include <type_traits>
30
31 #define MAP_FAILED (reinterpret_cast<void *>(-1))
32
33 namespace ark::os::mem {
34
mem_errno(const DWORD err,const int deferr)35 static int mem_errno(const DWORD err, const int deferr)
36 {
37 return err == 0 ? deferr : err;
38 }
39
mem_protection_flags_for_page(const int prot)40 static DWORD mem_protection_flags_for_page(const int prot)
41 {
42 DWORD flags = 0;
43
44 if (static_cast<unsigned>(prot) == MMAP_PROT_NONE) {
45 return flags;
46 }
47
48 if ((static_cast<unsigned>(prot) & MMAP_PROT_EXEC) != 0) {
49 flags = ((static_cast<unsigned>(prot) & MMAP_PROT_WRITE) != 0) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
50 } else {
51 flags = ((static_cast<unsigned>(prot) & MMAP_PROT_WRITE) != 0) ? PAGE_READWRITE : PAGE_READONLY;
52 }
53
54 return flags;
55 }
56
mem_protection_flags_for_file(const int prot,const uint32_t mapFlags)57 static DWORD mem_protection_flags_for_file(const int prot, const uint32_t mapFlags)
58 {
59 DWORD flags = 0;
60 if (prot == MMAP_PROT_NONE) {
61 return flags;
62 }
63
64 /* Notice that only single FILE_MAP_COPY flag can ensure a copy-on-write mapping which
65 * MMAP_FLAG_PRIVATE needs. It can't be bitwise OR'ed with FILE_MAP_ALL_ACCESS, FILE_MAP_READ
66 * or FILE_MAP_WRITE. Or else it will be converted to PAGE_READONLY or PAGE_READWRITE, and make
67 * the changes synced back to the original file.
68 */
69 if ((mapFlags & MMAP_FLAG_PRIVATE) != 0) {
70 return FILE_MAP_COPY;
71 }
72
73 if ((static_cast<unsigned>(prot) & MMAP_PROT_READ) != 0) {
74 flags |= FILE_MAP_READ;
75 }
76 if ((static_cast<unsigned>(prot) & MMAP_PROT_WRITE) != 0) {
77 flags |= FILE_MAP_WRITE;
78 }
79 if ((static_cast<unsigned>(prot) & MMAP_PROT_EXEC) != 0) {
80 flags |= FILE_MAP_EXECUTE;
81 }
82
83 return flags;
84 }
85
mem_select_lower_bound(off_t off)86 static DWORD mem_select_lower_bound(off_t off)
87 {
88 using uoff_t = std::make_unsigned_t<off_t>;
89 return (sizeof(off_t) <= sizeof(DWORD)) ? static_cast<DWORD>(off)
90 : static_cast<DWORD>(static_cast<uoff_t>(off) & 0xFFFFFFFFL);
91 }
92
mem_select_upper_bound(off_t off)93 static DWORD mem_select_upper_bound(off_t off)
94 {
95 constexpr uint32_t OFFSET_DWORD = 32;
96 using uoff_t = std::make_unsigned_t<off_t>;
97 return (sizeof(off_t) <= sizeof(DWORD))
98 ? static_cast<DWORD>(0)
99 : static_cast<DWORD>((static_cast<uoff_t>(off) >> OFFSET_DWORD) & 0xFFFFFFFFL);
100 }
101
mmap(void * addr,size_t len,uint32_t prot,int flags,int fildes,off_t off)102 void *mmap([[maybe_unused]] void *addr, size_t len, uint32_t prot, int flags, int fildes, off_t off)
103 {
104 errno = 0;
105
106 // Skip unsupported combinations of flags:
107 if (len == 0 || (static_cast<unsigned>(flags) & MMAP_FLAG_FIXED) != 0 || prot == MMAP_PROT_EXEC) {
108 errno = EINVAL;
109 return MAP_FAILED;
110 }
111
112 HANDLE h = ((static_cast<unsigned>(flags) & MMAP_FLAG_ANONYMOUS) == 0)
113 ? reinterpret_cast<HANDLE>(_get_osfhandle(fildes))
114 : INVALID_HANDLE_VALUE;
115 if ((static_cast<unsigned>(flags) & MMAP_FLAG_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) {
116 errno = EBADF;
117 return MAP_FAILED;
118 }
119
120 const auto protPage = mem_protection_flags_for_page(prot);
121 const off_t maxSize = off + static_cast<off_t>(len);
122 const auto maxSizeLow = mem_select_lower_bound(maxSize);
123 const auto maxSizeHigh = mem_select_upper_bound(maxSize);
124 HANDLE fm = CreateFileMapping(h, nullptr, protPage, maxSizeHigh, maxSizeLow, nullptr);
125 if (fm == nullptr) {
126 errno = mem_errno(GetLastError(), EPERM);
127 return MAP_FAILED;
128 }
129
130 const auto protFile = mem_protection_flags_for_file(prot, flags);
131 const auto fileOffLow = mem_select_lower_bound(off);
132 const auto fileOffHigh = mem_select_upper_bound(off);
133 void *map = MapViewOfFile(fm, protFile, fileOffHigh, fileOffLow, len);
134 CloseHandle(fm);
135 if (map == nullptr) {
136 errno = mem_errno(GetLastError(), EPERM);
137 return MAP_FAILED;
138 }
139
140 return map;
141 }
142
munmap(void * addr,size_t len)143 int munmap(void *addr, [[maybe_unused]] size_t len)
144 {
145 if (UnmapViewOfFile(addr)) {
146 return 0;
147 }
148
149 errno = mem_errno(GetLastError(), EPERM);
150
151 return -1;
152 }
153
MmapDeleter(std::byte * ptr,size_t size)154 void MmapDeleter(std::byte *ptr, size_t size) noexcept
155 {
156 if (ptr != nullptr) {
157 munmap(ptr, size);
158 }
159 }
160
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t fileOffset,void * hint)161 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t fileOffset, void *hint)
162 {
163 size_t mapOffset = RoundDown(fileOffset, GetPageSize());
164 size_t offset = fileOffset - mapOffset;
165 size_t mapSize = size + offset;
166 void *result = mmap(hint, mapSize, prot, flags, file.GetFd(), mapOffset);
167 if (result == MAP_FAILED) {
168 return BytePtr(nullptr, 0, MmapDeleter);
169 }
170
171 return BytePtr(static_cast<std::byte *>(result) + offset, size, MmapDeleter);
172 }
173
MapExecuted(size_t size)174 BytePtr MapExecuted(size_t size)
175 {
176 // By design caller should pass valid size, so don't do any additional checks except ones that
177 // mmap do itself
178 // NOLINTNEXTLINE(hicpp-signed-bitwise)
179 void *result = mmap(nullptr, size, MMAP_PROT_EXEC | MMAP_PROT_WRITE, MMAP_FLAG_SHARED | MMAP_FLAG_ANONYMOUS, -1, 0);
180 if (result == reinterpret_cast<void *>(-1)) {
181 result = nullptr;
182 }
183
184 return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
185 }
186
MakeMemWithProtFlag(void * mem,size_t size,int prot)187 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
188 {
189 PDWORD old = nullptr;
190 int r = VirtualProtect(mem, size, prot, old);
191 if (r != 0) {
192 return Error(GetLastError());
193 }
194 return {};
195 }
196
MakeMemReadExec(void * mem,size_t size)197 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
198 {
199 // NOLINTNEXTLINE(hicpp-signed-bitwise)
200 return MakeMemWithProtFlag(mem, size, MMAP_PROT_EXEC | MMAP_PROT_READ);
201 }
202
MakeMemReadWrite(void * mem,size_t size)203 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
204 {
205 // NOLINTNEXTLINE(hicpp-signed-bitwise)
206 return MakeMemWithProtFlag(mem, size, MMAP_PROT_WRITE | MMAP_PROT_READ);
207 }
208
MakeMemReadOnly(void * mem,size_t size)209 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
210 {
211 return MakeMemWithProtFlag(mem, size, MMAP_PROT_READ);
212 }
213
MakeMemProtected(void * mem,size_t size)214 std::optional<Error> MakeMemProtected(void *mem, size_t size)
215 {
216 return MakeMemWithProtFlag(mem, size, MMAP_PROT_NONE);
217 }
218
GetPageSize()219 uint32_t GetPageSize()
220 {
221 constexpr size_t PAGE_SIZE = 4096;
222 return PAGE_SIZE;
223 }
224
GetCacheLineSizeFromOs()225 static size_t GetCacheLineSizeFromOs()
226 {
227 // NOLINTNEXTLINE(google-runtime-int)
228 size_t lineSize = 0;
229 DWORD bufferSize = 0;
230 DWORD i = 0;
231 SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buffer = 0;
232
233 GetLogicalProcessorInformation(0, &bufferSize);
234 #ifdef ENABLE_THIS_CODE_IN_FUTURE
235 if (bufferSize == 0) {
236 // malloc behavior for zero bytes is implementation defined
237 // So, check it here
238 LOG_IF(lineSize == 0, FATAL, RUNTIME) << "Can't get cache line size from OS";
239 UNREACHABLE();
240 }
241 #endif // ENABLE_THIS_CODE_IN_FUTURE
242 buffer = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION *)malloc(bufferSize);
243 GetLogicalProcessorInformation(&buffer[0], &bufferSize);
244
245 for (i = 0; i != bufferSize / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); ++i) {
246 if (buffer[i].Relationship == RelationCache && buffer[i].Cache.Level == 1) {
247 lineSize = buffer[i].Cache.LineSize;
248 break;
249 }
250 }
251 LOG_IF(lineSize == 0, FATAL, RUNTIME) << "Can't get cache line size from OS";
252
253 free(buffer);
254 return lineSize;
255 }
256
GetCacheLineSize()257 size_t GetCacheLineSize()
258 {
259 // NOLINTNEXTLINE(google-runtime-int)
260 static size_t sz = GetCacheLineSizeFromOs();
261 return sz;
262 }
263
MapRWAnonymousRaw(size_t size,bool forcePoison)264 void *MapRWAnonymousRaw(size_t size, bool forcePoison)
265 {
266 ASSERT(size % GetPageSize() == 0);
267 // NOLINTNEXTLINE(hicpp-signed-bitwise)
268 void *result =
269 mmap(nullptr, size, MMAP_PROT_READ | MMAP_PROT_WRITE, MMAP_FLAG_PRIVATE | MMAP_FLAG_ANONYMOUS, -1, 0);
270 if (UNLIKELY(result == MAP_FAILED)) {
271 result = nullptr;
272 }
273 if ((result != nullptr) && forcePoison) {
274 ASAN_POISON_MEMORY_REGION(result, size);
275 }
276
277 return result;
278 }
279
PartiallyUnmapRaw(void * mem,size_t size)280 std::optional<Error> PartiallyUnmapRaw([[maybe_unused]] void *mem, [[maybe_unused]] size_t size)
281 {
282 // We can't partially unmap allocated memory
283 // Because UnmapViewOfFile in win32 doesn't support to unmap
284 // partial of the mapped memory
285 return {};
286 }
287
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligmentInBytes,bool forcePoison)288 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligmentInBytes, bool forcePoison)
289 {
290 ASSERT(aligmentInBytes != 0);
291 ASSERT(aligmentInBytes % GetPageSize() == 0);
292 if (size == 0) {
293 return nullptr;
294 }
295 void *result = MapRWAnonymousRaw(size + aligmentInBytes, forcePoison);
296 if (result == nullptr) {
297 return result;
298 }
299 auto allocatedMem = reinterpret_cast<uintptr_t>(result);
300 uintptr_t alignedMem =
301 (allocatedMem & ~(aligmentInBytes - 1U)) + ((allocatedMem % aligmentInBytes) != 0U ? aligmentInBytes : 0U);
302 ASSERT(alignedMem >= allocatedMem);
303 size_t unusedInStart = alignedMem - allocatedMem;
304 ASSERT(unusedInStart <= aligmentInBytes);
305 size_t unusedInEnd = aligmentInBytes - unusedInStart;
306 if (unusedInStart != 0) {
307 PartiallyUnmapRaw(result, unusedInStart);
308 }
309 if (unusedInEnd != 0) {
310 auto end_part = reinterpret_cast<void *>(alignedMem + size);
311 PartiallyUnmapRaw(end_part, unusedInEnd);
312 }
313 return reinterpret_cast<void *>(alignedMem);
314 }
315
AlignDownToPageSize(uintptr_t addr)316 uintptr_t AlignDownToPageSize(uintptr_t addr)
317 {
318 SYSTEM_INFO sysInfo;
319 GetSystemInfo(&sysInfo);
320 const size_t SYS_PAGE_SIZE = sysInfo.dwPageSize;
321 addr &= ~(SYS_PAGE_SIZE - 1);
322 return addr;
323 }
324
AlignedAlloc(size_t alignmentInBytes,size_t size)325 void *AlignedAlloc(size_t alignmentInBytes, size_t size)
326 {
327 size_t alignedSize = (size + alignmentInBytes - 1) & ~(alignmentInBytes - 1);
328 // aligned_alloc is not supported on MingW. instead we need to call _aligned_malloc.
329 auto ret = _aligned_malloc(alignedSize, alignmentInBytes);
330 // _aligned_malloc returns aligned pointer so just add assertion, no need to do runtime checks
331 ASSERT_PRINT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignmentInBytes - 1)),
332 "Address is not aligned");
333 return ret;
334 }
335
AlignedFree(void * mem)336 void AlignedFree(void *mem)
337 {
338 _aligned_free(mem);
339 }
340
UnmapRaw(void * mem,size_t size)341 std::optional<Error> UnmapRaw(void *mem, size_t size)
342 {
343 ASAN_UNPOISON_MEMORY_REGION(mem, size);
344 int res = munmap(mem, size);
345 if (UNLIKELY(res == -1)) {
346 return Error(errno);
347 }
348
349 return {};
350 }
351
TagAnonymousMemory(const void * mem,size_t size,const char * tag)352 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
353 [[maybe_unused]] const char *tag)
354 {
355 return {};
356 }
357
GetNativeBytesFromMallinfo()358 size_t GetNativeBytesFromMallinfo()
359 {
360 return DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
361 }
362
363 } // namespace ark::os::mem
364