• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "os/mem.h"
17 #include "utils/type_helpers.h"
18 #include "utils/asan_interface.h"
19 
20 #include <limits>
21 
22 #include <windows.h>
23 #include <cerrno>
24 #include <io.h>
25 
26 #include <sysinfoapi.h>
27 #include <type_traits>
28 
29 #define MAP_FAILED (reinterpret_cast<void *>(-1))
30 
31 namespace panda::os::mem {
32 
mem_errno(const DWORD err,const int deferr)33 static int mem_errno(const DWORD err, const int deferr)
34 {
35     return err == 0 ? deferr : err;
36 }
37 
mem_protection_flags_for_page(const int prot)38 static DWORD mem_protection_flags_for_page(const int prot)
39 {
40     DWORD flags = 0;
41 
42     if (static_cast<unsigned>(prot) == MMAP_PROT_NONE) {
43         return flags;
44     }
45 
46     if ((static_cast<unsigned>(prot) & MMAP_PROT_EXEC) != 0) {
47         flags = ((static_cast<unsigned>(prot) & MMAP_PROT_WRITE) != 0) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
48     } else {
49         flags = ((static_cast<unsigned>(prot) & MMAP_PROT_WRITE) != 0) ? PAGE_READWRITE : PAGE_READONLY;
50     }
51 
52     return flags;
53 }
54 
mem_protection_flags_for_file(const int prot,const uint32_t map_flags)55 static DWORD mem_protection_flags_for_file(const int prot, const uint32_t map_flags)
56 {
57     DWORD flags = 0;
58     if (prot == MMAP_PROT_NONE) {
59         return flags;
60     }
61 
62     /* Notice that only single FILE_MAP_COPY flag can ensure a copy-on-write mapping which
63      * MMAP_FLAG_PRIVATE needs. It can't be bitwise OR'ed with FILE_MAP_ALL_ACCESS, FILE_MAP_READ
64      * or FILE_MAP_WRITE. Or else it will be converted to PAGE_READONLY or PAGE_READWRITE, and make
65      * the changes synced back to the original file.
66      */
67     if ((map_flags & MMAP_FLAG_PRIVATE) != 0) {
68         return FILE_MAP_COPY;
69     }
70 
71     if ((static_cast<unsigned>(prot) & MMAP_PROT_READ) != 0) {
72         flags |= FILE_MAP_READ;
73     }
74     if ((static_cast<unsigned>(prot) & MMAP_PROT_WRITE) != 0) {
75         flags |= FILE_MAP_WRITE;
76     }
77     if ((static_cast<unsigned>(prot) & MMAP_PROT_EXEC) != 0) {
78         flags |= FILE_MAP_EXECUTE;
79     }
80 
81     return flags;
82 }
83 
mem_select_lower_bound(off_t off)84 static DWORD mem_select_lower_bound(off_t off)
85 {
86     using uoff_t = std::make_unsigned_t<off_t>;
87     return (sizeof(off_t) <= sizeof(DWORD)) ? static_cast<DWORD>(off)
88                                             : static_cast<DWORD>(static_cast<uoff_t>(off) & 0xFFFFFFFFL);
89 }
90 
mem_select_upper_bound(off_t off)91 static DWORD mem_select_upper_bound(off_t off)
92 {
93     constexpr uint32_t OFFSET_DWORD = 32;
94     using uoff_t = std::make_unsigned_t<off_t>;
95     return (sizeof(off_t) <= sizeof(DWORD))
96                ? static_cast<DWORD>(0)
97                : static_cast<DWORD>((static_cast<uoff_t>(off) >> OFFSET_DWORD) & 0xFFFFFFFFL);
98 }
99 
mmap(void * addr,size_t len,uint32_t prot,int flags,int fildes,off_t off)100 void *mmap([[maybe_unused]] void *addr, size_t len, uint32_t prot, int flags, int fildes, off_t off)
101 {
102     errno = 0;
103 
104     // Skip unsupported combinations of flags:
105     if (len == 0 || (static_cast<unsigned>(flags) & MMAP_FLAG_FIXED) != 0 || prot == MMAP_PROT_EXEC) {
106         errno = EINVAL;
107         return MAP_FAILED;
108     }
109 
110     HANDLE h = ((static_cast<unsigned>(flags) & MMAP_FLAG_ANONYMOUS) == 0)
111                    ? reinterpret_cast<HANDLE>(_get_osfhandle(fildes))
112                    : INVALID_HANDLE_VALUE;
113     if ((static_cast<unsigned>(flags) & MMAP_FLAG_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) {
114         errno = EBADF;
115         return MAP_FAILED;
116     }
117 
118     const auto prot_page = mem_protection_flags_for_page(prot);
119     const off_t max_size = off + static_cast<off_t>(len);
120     const auto max_size_low = mem_select_lower_bound(max_size);
121     const auto max_size_high = mem_select_upper_bound(max_size);
122     HANDLE fm = CreateFileMapping(h, nullptr, prot_page, max_size_high, max_size_low, nullptr);
123     if (fm == nullptr) {
124         errno = mem_errno(GetLastError(), EPERM);
125         return MAP_FAILED;
126     }
127 
128     const auto prot_file = mem_protection_flags_for_file(prot, flags);
129     const auto file_off_low = mem_select_lower_bound(off);
130     const auto file_off_high = mem_select_upper_bound(off);
131     void *map = MapViewOfFile(fm, prot_file, file_off_high, file_off_low, len);
132     CloseHandle(fm);
133     if (map == nullptr) {
134         errno = mem_errno(GetLastError(), EPERM);
135         return MAP_FAILED;
136     }
137 
138     return map;
139 }
140 
munmap(void * addr,size_t len)141 int munmap(void *addr, [[maybe_unused]] size_t len)
142 {
143     if (UnmapViewOfFile(addr)) {
144         return 0;
145     }
146 
147     errno = mem_errno(GetLastError(), EPERM);
148 
149     return -1;
150 }
151 
MmapDeleter(std::byte * ptr,size_t size)152 void MmapDeleter(std::byte *ptr, size_t size) noexcept
153 {
154     if (ptr != nullptr) {
155         munmap(ptr, size);
156     }
157 }
158 
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t file_offset,void * hint)159 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t file_offset, void *hint)
160 {
161     size_t map_offset = RoundDown(file_offset, GetPageSize());
162     size_t offset = file_offset - map_offset;
163     size_t map_size = size + offset;
164     void *result = mmap(hint, map_size, prot, flags, file.GetFd(), map_offset);
165     if (result == MAP_FAILED) {
166         return BytePtr(nullptr, 0, MmapDeleter);
167     }
168 
169     return BytePtr(static_cast<std::byte *>(result) + offset, size, MmapDeleter);
170 }
171 
MapExecuted(size_t size)172 BytePtr MapExecuted(size_t size)
173 {
174     // By design caller should pass valid size, so don't do any additional checks except ones that
175     // mmap do itself
176     // NOLINTNEXTLINE(hicpp-signed-bitwise)
177     void *result = mmap(nullptr, size, MMAP_PROT_EXEC | MMAP_PROT_WRITE, MMAP_FLAG_SHARED | MMAP_FLAG_ANONYMOUS, -1, 0);
178     if (result == reinterpret_cast<void *>(-1)) {
179         result = nullptr;
180     }
181 
182     return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
183 }
184 
MakeMemWithProtFlag(void * mem,size_t size,int prot)185 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
186 {
187     PDWORD old = nullptr;
188     int r = VirtualProtect(mem, size, prot, old);
189     if (r != 0) {
190         return Error(GetLastError());
191     }
192     return {};
193 }
194 
MakeMemReadExec(void * mem,size_t size)195 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
196 {
197     // NOLINTNEXTLINE(hicpp-signed-bitwise)
198     return MakeMemWithProtFlag(mem, size, MMAP_PROT_EXEC | MMAP_PROT_READ);
199 }
200 
MakeMemReadWrite(void * mem,size_t size)201 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
202 {
203     // NOLINTNEXTLINE(hicpp-signed-bitwise)
204     return MakeMemWithProtFlag(mem, size, MMAP_PROT_WRITE | MMAP_PROT_READ);
205 }
206 
MakeMemReadOnly(void * mem,size_t size)207 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
208 {
209     return MakeMemWithProtFlag(mem, size, MMAP_PROT_READ);
210 }
211 
MakeMemProtected(void * mem,size_t size)212 std::optional<Error> MakeMemProtected(void *mem, size_t size)
213 {
214     return MakeMemWithProtFlag(mem, size, MMAP_PROT_NONE);
215 }
216 
GetPageSize()217 uint32_t GetPageSize()
218 {
219     constexpr size_t PAGE_SIZE = 4096;
220     return PAGE_SIZE;
221 }
222 
MapRWAnonymousRaw(size_t size,bool force_poison)223 void *MapRWAnonymousRaw(size_t size, bool force_poison)
224 {
225     ASSERT(size % GetPageSize() == 0);
226     // NOLINTNEXTLINE(hicpp-signed-bitwise)
227     void *result =
228         mmap(nullptr, size, MMAP_PROT_READ | MMAP_PROT_WRITE, MMAP_FLAG_PRIVATE | MMAP_FLAG_ANONYMOUS, -1, 0);
229     if (UNLIKELY(result == MAP_FAILED)) {
230         result = nullptr;
231     }
232     if ((result != nullptr) && force_poison) {
233         ASAN_POISON_MEMORY_REGION(result, size);
234     }
235 
236     return result;
237 }
238 
PartiallyUnmapRaw(void * mem,size_t size)239 std::optional<Error> PartiallyUnmapRaw([[maybe_unused]] void *mem, [[maybe_unused]] size_t size)
240 {
241     // We can't partially unmap allocated memory
242     // Because UnmapViewOfFile in win32 doesn't support to unmap
243     // partial of the mapped memory
244     return {};
245 }
246 
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligment_in_bytes,bool force_poison)247 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligment_in_bytes, bool force_poison)
248 {
249     ASSERT(aligment_in_bytes % GetPageSize() == 0);
250     if (size == 0) {
251         return nullptr;
252     }
253     void *result = MapRWAnonymousRaw(size + aligment_in_bytes, force_poison);
254     if (result == nullptr) {
255         return result;
256     }
257     auto allocated_mem = reinterpret_cast<uintptr_t>(result);
258     uintptr_t aligned_mem = (allocated_mem & ~(aligment_in_bytes - 1U)) +
259                             ((allocated_mem % aligment_in_bytes) != 0U ? aligment_in_bytes : 0U);
260     ASSERT(aligned_mem >= allocated_mem);
261     size_t unused_in_start = aligned_mem - allocated_mem;
262     ASSERT(unused_in_start <= aligment_in_bytes);
263     size_t unused_in_end = aligment_in_bytes - unused_in_start;
264     if (unused_in_start != 0) {
265         PartiallyUnmapRaw(result, unused_in_start);
266     }
267     if (unused_in_end != 0) {
268         auto end_part = reinterpret_cast<void *>(aligned_mem + size);
269         PartiallyUnmapRaw(end_part, unused_in_end);
270     }
271     return reinterpret_cast<void *>(aligned_mem);
272 }
273 
AlignDownToPageSize(uintptr_t addr)274 uintptr_t AlignDownToPageSize(uintptr_t addr)
275 {
276     SYSTEM_INFO sysInfo;
277     GetSystemInfo(&sysInfo);
278     const size_t SYS_PAGE_SIZE = sysInfo.dwPageSize;
279     addr &= ~(SYS_PAGE_SIZE - 1);
280     return addr;
281 }
282 
AlignedAlloc(size_t alignment_in_bytes,size_t size)283 void *AlignedAlloc(size_t alignment_in_bytes, size_t size)
284 {
285     size_t aligned_size = (size + alignment_in_bytes - 1) & ~(alignment_in_bytes - 1);
286     // aligned_alloc is not supported on MingW. instead we need to call _aligned_malloc.
287     auto ret = _aligned_malloc(aligned_size, alignment_in_bytes);
288     // _aligned_malloc returns aligned pointer so just add assertion, no need to do runtime checks
289     ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignment_in_bytes - 1)));
290     return ret;
291 }
292 
AlignedFree(void * mem)293 void AlignedFree(void *mem)
294 {
295     _aligned_free(mem);
296 }
297 
UnmapRaw(void * mem,size_t size)298 std::optional<Error> UnmapRaw(void *mem, size_t size)
299 {
300     ASAN_UNPOISON_MEMORY_REGION(mem, size);
301     int res = munmap(mem, size);
302     if (UNLIKELY(res == -1)) {
303         return Error(errno);
304     }
305 
306     return {};
307 }
308 
TagAnonymousMemory(const void * mem,size_t size,const char * tag)309 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
310                                         [[maybe_unused]] const char *tag)
311 {
312     return {};
313 }
314 
GetNativeBytesFromMallinfo()315 size_t GetNativeBytesFromMallinfo()
316 {
317     return DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
318 }
319 
320 }  // namespace panda::os::mem
321