1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef PANDA_LIBPANDABASE_PBASE_OS_MEM_H_
17 #define PANDA_LIBPANDABASE_PBASE_OS_MEM_H_
18
19 #include "file.h"
20 #include "macros.h"
21 #include "utils/expected.h"
22 #include "utils/span.h"
23 #include "libpandabase/mem/mem.h"
24
25 #include <cstddef>
26 #ifdef PANDA_TARGET_UNIX
27 #include "platforms/unix/libpandabase/unix_mem.h"
28 #elif PANDA_TARGET_WINDOWS
29 #include "platforms/windows/libpandabase/windows_mem.h"
30 #else
31 #error "Unsupported target: please provide mmap API"
32 #endif
33
34 #include <functional>
35 #include <memory>
36 #include <optional>
37 #include <type_traits>
38
39 namespace ark::os::mem {
40
41 static constexpr uint64_t HIGH_BOUND_32BIT_ADDRESS = 4_GB;
42
43 PANDA_PUBLIC_API void MmapDeleter(std::byte *ptr, size_t size) noexcept;
44
45 /**
46 * @brief Make memory region @param mem with size @param size with protection flags @param prot
47 * @param mem Pointer to memory region (should be aligned to page size)
48 * @param size Size of memory region
49 * @param prot Memory protection flags, a combination of MMAP_PROT_XXX values
50 * @return Error object if any errors occur
51 */
52 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot);
53
54 /**
55 * @brief Make memory region @param mem with size @param size readable and executable
56 * @param mem Pointer to memory region (should be aligned to page size)
57 * @param size Size of memory region
58 * @return Error object if any errors occur
59 */
60 std::optional<Error> MakeMemReadExec(void *mem, size_t size);
61
62 /**
63 * @brief Make memory region @param mem with size @param size readable and writable
64 * @param mem Pointer to memory region (should be aligned to page size)
65 * @param size Size of memory region
66 * @return Error object if any errors occur
67 */
68 PANDA_PUBLIC_API std::optional<Error> MakeMemReadWrite(void *mem, size_t size);
69
70 /**
71 * @brief Make memory region @param mem with size @param size readable
72 * @param mem Pointer to memory region (should be aligned to page size)
73 * @param size Size of memory region
74 * @return Error object if any errors occur
75 */
76 std::optional<Error> MakeMemReadOnly(void *mem, size_t size);
77
78 /**
79 * @brief Make memory region @param mem with size @param size protected
80 * @param mem Pointer to memory region (should be aligned to page size)
81 * @param size Size of memory region
82 * @return Error object if any errors occur
83 */
84 PANDA_PUBLIC_API std::optional<Error> MakeMemProtected(void *mem, size_t size);
85
86 /**
87 * @brief Align addr @param addr to page size to pass it to MakeMem functions
88 * @param addr Address to align
89 * @return Aligned address
90 */
91 PANDA_PUBLIC_API uintptr_t AlignDownToPageSize(uintptr_t addr);
92
93 /**
94 * @brief Allocated aligned memory with alignment @param alignment_in_bytes and
95 * with size @param size. Use AlignedFree to free this memory.
96 * @param alignment_in_bytes - alignment in bytes
97 * @param size - min required size in bytes
98 * @return
99 */
100 PANDA_PUBLIC_API void *AlignedAlloc(size_t alignmentInBytes, size_t size);
101
102 /**
103 * @brief Free memory, allocated by AlignedAlloc.
104 * @param mem - Pointer to memory, allocated by AlignedAlloc
105 */
106 PANDA_PUBLIC_API void AlignedFree(void *mem);
107
108 template <class T>
109 class MapRange {
110 public:
MapRange(T * ptr,size_t size)111 MapRange(T *ptr, size_t size) : sp_(reinterpret_cast<std::byte *>(ptr), size) {}
112
GetSubRange(size_t offset,size_t size)113 MapRange GetSubRange(size_t offset, size_t size)
114 {
115 return MapRange(sp_.SubSpan(offset, size));
116 }
117
MakeReadExec()118 Expected<const std::byte *, Error> MakeReadExec()
119 {
120 auto res = MakeMemReadExec(sp_.Data(), sp_.Size());
121 if (res) {
122 return Unexpected(res.value());
123 }
124
125 return sp_.Data();
126 }
127
MakeReadOnly()128 Expected<const std::byte *, Error> MakeReadOnly()
129 {
130 auto res = MakeMemReadOnly(sp_.Data(), sp_.Size());
131 if (res) {
132 return Unexpected(res.value());
133 }
134
135 return sp_.Data();
136 }
137
MakeReadWrite()138 Expected<std::byte *, Error> MakeReadWrite()
139 {
140 auto res = MakeMemReadWrite(sp_.Data(), sp_.Size());
141 if (res) {
142 return Unexpected(res.value());
143 }
144
145 return sp_.Data();
146 }
147
Align()148 MapRange<T> Align() const
149 {
150 auto unaligned = reinterpret_cast<uintptr_t>(sp_.Data());
151 auto aligned = AlignDownToPageSize(unaligned);
152 Span<std::byte> sp(reinterpret_cast<std::byte *>(aligned), sp_.Size() + unaligned - aligned);
153 return MapRange<T>(sp);
154 }
155
GetSize()156 size_t GetSize() const
157 {
158 return sp_.Size();
159 }
160
GetData()161 std::byte *GetData()
162 {
163 return sp_.Data();
164 }
165
166 virtual ~MapRange() = default;
167
168 DEFAULT_COPY_SEMANTIC(MapRange);
169 NO_MOVE_SEMANTIC(MapRange);
170
171 private:
MapRange(const Span<std::byte> & sp)172 explicit MapRange(const Span<std::byte> &sp) : sp_(sp) {}
173
174 Span<std::byte> sp_;
175 };
176
177 enum class MapPtrType { CONST, NON_CONST };
178
179 template <class T, MapPtrType TYPE>
180 class MapPtr {
181 public:
182 using Deleter = void (*)(T *, size_t) noexcept;
183
MapPtr(T * ptr,size_t size,Deleter deleter)184 MapPtr(T *ptr, size_t size, Deleter deleter) : ptr_(ptr), size_(size), pageOffset_(0), deleter_(deleter) {}
MapPtr(T * ptr,size_t size,size_t pageOffset,Deleter deleter)185 MapPtr(T *ptr, size_t size, size_t pageOffset, Deleter deleter)
186 : ptr_(ptr), size_(size), pageOffset_(pageOffset), deleter_(deleter)
187 {
188 }
189
MapPtr(MapPtr && other)190 MapPtr(MapPtr &&other) noexcept
191 {
192 ptr_ = other.ptr_;
193 pageOffset_ = other.pageOffset_;
194 size_ = other.size_;
195 deleter_ = other.deleter_;
196 other.ptr_ = nullptr;
197 other.deleter_ = nullptr;
198 }
199
200 MapPtr &operator=(MapPtr &&other) noexcept
201 {
202 ptr_ = other.ptr_;
203 pageOffset_ = other.pageOffset_;
204 size_ = other.size_;
205 deleter_ = other.deleter_;
206 other.ptr_ = nullptr;
207 other.deleter_ = nullptr;
208 return *this;
209 }
210
Get()211 std::conditional_t<TYPE == MapPtrType::CONST, const T *, T *> Get() const
212 {
213 return ptr_;
214 }
215
GetSize()216 size_t GetSize() const
217 {
218 return size_;
219 }
220
GetMapRange()221 MapRange<T> GetMapRange() const
222 {
223 return MapRange(ptr_, size_);
224 }
225
GetMapRange()226 MapRange<T> GetMapRange()
227 {
228 return MapRange(ptr_, size_);
229 }
230
GetPtrOffset()231 static constexpr uint32_t GetPtrOffset()
232 {
233 return MEMBER_OFFSET(MapPtr, ptr_);
234 }
235
ToConst()236 MapPtr<T, MapPtrType::CONST> ToConst()
237 {
238 MapPtr<T, MapPtrType::CONST> res(ptr_, size_, pageOffset_, deleter_);
239 ptr_ = nullptr;
240 return res;
241 }
242
243 /*
244 * memory layout for mmap
245 *
246 * addr(is )
247 * ^
248 * page_offset_ | size_
249 * |--------|-----------|
250 * P0 P1 | P2 | P3 P4
251 * | | | | | | | 4 pages
252 * +-----------+--------S--+--------E--+-----------+
253 * ^
254 * |
255 * ptr_
256 * |--------------------| mmap memory
257 * size
258 *
259 * S: file start; E: file end
260 * Available space: [ptr_...(ptr_ + size_ - 1)]
261 * addr sould be page aligned for file map but it is not guaranteed for anonymous map
262 * For anonymous map, page_offset_ = 0
263 */
~MapPtr()264 ~MapPtr()
265 {
266 if (ptr_ == nullptr) {
267 return;
268 }
269 uintptr_t addr = reinterpret_cast<uintptr_t>(ptr_) - pageOffset_;
270 // LINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
271 size_t size = size_ + pageOffset_;
272 deleter_(reinterpret_cast<T *>(addr), size);
273 }
274
275 private:
276 T *ptr_;
277 size_t size_;
278 size_t pageOffset_;
279 Deleter deleter_;
280
281 NO_COPY_SEMANTIC(MapPtr);
282 };
283
284 using ByteMapRange = MapRange<std::byte>;
285 using BytePtr = MapPtr<std::byte, MapPtrType::NON_CONST>;
286 using ConstBytePtr = MapPtr<std::byte, MapPtrType::CONST>;
287 static_assert(ConstBytePtr::GetPtrOffset() == 0);
288
289 /**
290 * Map the specified file into memory.
291 * The interface is similat to POSIX mmap.
292 *
293 * @param file - file to map
294 * @param prot - memory protection flags, a combination of MMAP_PROT_XXX values.
295 * @param flags - memory map flags, a combination of MMAP_FLAG_XXX values.
296 * @param file_offset - an offset in the file. If the offset is not multiple of page size
297 * the function handles this situatio. The resulting BytePtr will points to desired data.
298 * @param hint - an desired address to map file to.
299 */
300 PANDA_PUBLIC_API BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t fileOffset = 0,
301 void *hint = nullptr);
302
303 /**
304 * @brief allocates executed memory of size @param size
305 * @param size
306 * @return
307 */
308 BytePtr MapExecuted(size_t size);
309
310 /**
311 * Anonymous mmap with READ | WRITE protection for pages
312 * Note: returned memory will be poisoned in ASAN targets,
313 * if you need other behavior - consider to change interface, or use manual unpoisoning.
314 * @param size - size in bytes, should be multiple of PAGE_SIZE
315 * @param force_poison - poison mmaped memory
316 * @return
317 */
318 PANDA_PUBLIC_API void *MapRWAnonymousRaw(size_t size, bool forcePoison = true);
319
320 /**
321 * Anonymous mmap with READ | WRITE protection for pages.
322 * Returned address will be aligned as @param aligment_in_bytes.
323 * Note: returned memory will be poisoned in ASAN targets,
324 * if you need other behavior - consider to change interface, or use manual unpoisoning.
325 * @param size - size in bytes, should be multiple of PAGE_SIZE
326 * @param aligment_in_bytes - alignment in bytes, should be multiple of PAGE_SIZE
327 * @param force_poison - poison mmaped memory
328 * @return
329 */
330 PANDA_PUBLIC_API void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligmentInBytes, bool forcePoison = true);
331
332 // ASAN mapped its structures at this magic address (shadow offset):
333 // see https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
334 // Therefore, we can successfully allocate memory at fixed address started somewhere at lower addresses
335 // and it can overlap sanitizer address space and mmap with MAP_FIXED flag finished successfully.
336 // (one can look at the MAP_FIXED flag description of Linux mmap)
337 // However, all load/store from this memory is prohibited.
338 // We can get an error during mmap call only if we use MAP_FIXED_NOREPLACE argument,
339 // but it is supported only since Linux 4.17 (Ubuntu 18 has 4.15)
340 // NOTE(aemelenko): Do smth with this constant.
341 #ifdef PANDA_TARGET_ARM64
342 static constexpr uint64_t MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS = 1ULL << 36ULL;
343 #else
344 static constexpr uint64_t MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS = 0x7fff8000ULL;
345 #endif
346
347 /**
348 * Anonymous mmap in first 4GB address space with READ | WRITE protection for pages
349 *
350 * Try iterative mmap memory from min_mem to 4GB with iterative_step step
351 *
352 * @param min_mem minimal address for mmap attemps in first 4GB, should be multiple of PAGE_SIZE
353 * @param size size in bytes, should be multiple of PAGE_SIZE
354 * @param iterative_step size of step for mmap iteration, should be multiple of PAGE_SIZE
355 *
356 * @return pointer to the mapped area, nullptr if could't map
357 *
358 * @note returned memory will be poisoned in ASAN targets,
359 * if you need other behavior - consider to change interface, or use manual unpoisoning
360 */
361 void *MapRWAnonymousInFirst4GB(void *minMem, size_t size, size_t iterativeStep = 4_KB);
362
363 /**
364 * Anonymous mmap with fixed address and READ | WRITE protection for pages
365 * Note: returned memory will be poisoned in ASAN targets,
366 * if you need other behavior - consider to change interface, or use manual unpoisoning.
367 * @param mem used address
368 * @param size size in bytes, should be multiple of PAGE_SIZE
369 * @param force_poison poison mmaped memory
370 * @return pointer to the mapped area
371 */
372 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool forcePoison = true);
373
374 /**
375 * Unmap previously mapped memory.
376 * Note: memory will be unpoisoned before unmapping in ASAN targets.
377 * @param mem - pointer to the memory
378 * @param size - size of memory to unmap
379 * @return Error object if any error occur
380 */
381 PANDA_PUBLIC_API std::optional<Error> UnmapRaw(void *mem, size_t size);
382
383 /**
384 * Unmap part of previously mapped memory.
385 * Note: memory will be unpoisoned before unmapping in ASAN targets.
386 * @param mem - pointer to the memory
387 * @param size - size of memory piece which we want to unmap
388 * @return Error object if any error occur
389 */
390 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size);
391
392 /**
393 * @brief returns page size for the system
394 * @return
395 */
396 PANDA_PUBLIC_API uint32_t GetPageSize();
397
398 /**
399 * @brief returns cache line size for the system
400 * @return
401 */
402 PANDA_PUBLIC_API size_t GetCacheLineSize();
403
404 /**
405 * Release pages [pages_start, pages_end] to os.
406 * @param pages_start - address of pages beginning, should be multiple of PAGE_SIZE
407 * @param pages_end - address of pages ending, should be multiple of PAGE_SIZE
408 * @return
409 */
ReleasePages(uintptr_t pagesStart,uintptr_t pagesEnd)410 inline int ReleasePages([[maybe_unused]] uintptr_t pagesStart, [[maybe_unused]] uintptr_t pagesEnd)
411 {
412 ASSERT(pagesStart % os::mem::GetPageSize() == 0);
413 ASSERT(pagesEnd % os::mem::GetPageSize() == 0);
414 ASSERT(pagesEnd >= pagesStart);
415 #ifdef PANDA_TARGET_UNIX
416 return madvise(ToVoidPtr(pagesStart), pagesEnd - pagesStart, MADV_DONTNEED);
417 #else
418 // On Windows systems we can do nothing
419 return 0;
420 #endif
421 }
422
423 /**
424 * Tag anonymous memory with a debug name.
425 * @param mem - pointer to the memory
426 * @param size - size of memory to tag
427 * @param tag - pointer to the debug name (must be a literal or heap object)
428 * @return Error object if any error occur
429 */
430 PANDA_PUBLIC_API std::optional<Error> TagAnonymousMemory(const void *mem, size_t size, const char *tag);
431
432 static constexpr size_t DEFAULT_NATIVE_BYTES_FROM_MALLINFO = 100000;
433
434 PANDA_PUBLIC_API size_t GetNativeBytesFromMallinfo();
435
436 } // namespace ark::os::mem
437
438 #endif // PANDA_LIBPANDABASE_PBASE_OS_MEM_H_
439