1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef PANDA_LIBPANDABASE_PBASE_OS_MEM_H_
17 #define PANDA_LIBPANDABASE_PBASE_OS_MEM_H_
18
19 #include "file.h"
20 #include "macros.h"
21 #include "utils/expected.h"
22 #include "utils/span.h"
23 #include "libpandabase/mem/mem.h"
24
25 #include <cstddef>
26 #ifdef PANDA_TARGET_UNIX
27 #include "platforms/unix/libpandabase/unix_mem.h"
28 #elif PANDA_TARGET_WINDOWS
29 #include "platforms/windows/libpandabase/windows_mem.h"
30 #else
31 #error "Unsupported target: please provide mmap API"
32 #endif
33
34 #include <functional>
35 #include <memory>
36 #include <optional>
37 #include <type_traits>
38
39 namespace panda::os::mem {
40
41 void MmapDeleter(std::byte *ptr, size_t size) noexcept;
42
43 /**
44 * \brief Make memory region \param mem with size \param size with protection flags \param prot
45 * @param mem Pointer to memory region (should be aligned to page size)
46 * @param size Size of memory region
47 * @param prot Memory protection flags, a combination of MMAP_PROT_XXX values
48 * @return Error object if any errors occur
49 */
50 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot);
51
52 /**
53 * \brief Make memory region \param mem with size \param size readable and executable
54 * @param mem Pointer to memory region (should be aligned to page size)
55 * @param size Size of memory region
56 * @return Error object if any errors occur
57 */
58 std::optional<Error> MakeMemReadExec(void *mem, size_t size);
59
60 /**
61 * \brief Make memory region \param mem with size \param size readable and writable
62 * @param mem Pointer to memory region (should be aligned to page size)
63 * @param size Size of memory region
64 * @return Error object if any errors occur
65 */
66 std::optional<Error> MakeMemReadWrite(void *mem, size_t size);
67
68 /**
69 * \brief Make memory region \param mem with size \param size readable
70 * @param mem Pointer to memory region (should be aligned to page size)
71 * @param size Size of memory region
72 * @return Error object if any errors occur
73 */
74 std::optional<Error> MakeMemReadOnly(void *mem, size_t size);
75
76 /**
77 * \brief Make memory region \param mem with size \param size protected
78 * @param mem Pointer to memory region (should be aligned to page size)
79 * @param size Size of memory region
80 * @return Error object if any errors occur
81 */
82 std::optional<Error> MakeMemProtected(void *mem, size_t size);
83
84 /**
85 * \brief Align addr \param addr to page size to pass it to MakeMem functions
86 * @param addr Address to align
87 * @return Aligned address
88 */
89 uintptr_t AlignDownToPageSize(uintptr_t addr);
90
91 /**
92 * \brief Allocated aligned memory with alignment \param alignment_in_bytes and
93 * with size \param size. Use AlignedFree to free this memory.
94 * @param alignment_in_bytes - alignment in bytes
95 * @param size - min required size in bytes
96 * @return
97 */
98 void *AlignedAlloc(size_t alignment_in_bytes, size_t size);
99
100 /**
101 * \brief Free memory, allocated by AlignedAlloc.
102 * @param mem - Pointer to memory, allocated by AlignedAlloc
103 */
104 void AlignedFree(void *mem);
105
106 template <class T>
107 class MapRange {
108 public:
MapRange(T * ptr,size_t size)109 MapRange(T *ptr, size_t size) : sp_(reinterpret_cast<std::byte *>(ptr), size) {}
110
GetSubRange(size_t offset,size_t size)111 MapRange GetSubRange(size_t offset, size_t size)
112 {
113 return MapRange(sp_.SubSpan(offset, size));
114 }
115
MakeReadExec()116 Expected<const std::byte *, Error> MakeReadExec()
117 {
118 auto res = MakeMemReadExec(sp_.Data(), sp_.Size());
119 if (res) {
120 return Unexpected(res.value());
121 }
122
123 return sp_.Data();
124 }
125
MakeReadOnly()126 Expected<const std::byte *, Error> MakeReadOnly()
127 {
128 auto res = MakeMemReadOnly(sp_.Data(), sp_.Size());
129 if (res) {
130 return Unexpected(res.value());
131 }
132
133 return sp_.Data();
134 }
135
MakeReadWrite()136 Expected<std::byte *, Error> MakeReadWrite()
137 {
138 auto res = MakeMemReadWrite(sp_.Data(), sp_.Size());
139 if (res) {
140 return Unexpected(res.value());
141 }
142
143 return sp_.Data();
144 }
145
Align()146 MapRange<T> Align() const
147 {
148 auto unaligned = reinterpret_cast<uintptr_t>(sp_.Data());
149 auto aligned = AlignDownToPageSize(unaligned);
150 Span<std::byte> sp(reinterpret_cast<std::byte *>(aligned), sp_.Size() + unaligned - aligned);
151 return MapRange<T>(sp);
152 }
153
GetSize()154 size_t GetSize() const
155 {
156 return sp_.Size();
157 }
158
GetData()159 std::byte *GetData()
160 {
161 return sp_.Data();
162 }
163
164 virtual ~MapRange() = default;
165
166 DEFAULT_COPY_SEMANTIC(MapRange);
167 NO_MOVE_SEMANTIC(MapRange);
168
169 private:
MapRange(const Span<std::byte> & sp)170 explicit MapRange(const Span<std::byte> &sp) : sp_(sp) {}
171
172 Span<std::byte> sp_;
173 };
174
175 enum class MapPtrType { CONST, NON_CONST };
176
177 template <class T, MapPtrType type>
178 class MapPtr {
179 public:
180 using Deleter = void (*)(T *, size_t) noexcept;
181
MapPtr(T * ptr,size_t size,Deleter deleter)182 MapPtr(T *ptr, size_t size, Deleter deleter) : ptr_(ptr), size_(size), page_offset_(0), deleter_(deleter) {}
MapPtr(T * ptr,size_t size,size_t page_offset,Deleter deleter)183 MapPtr(T *ptr, size_t size, size_t page_offset, Deleter deleter)
184 : ptr_(ptr), size_(size), page_offset_(page_offset), deleter_(deleter)
185 {
186 }
187
MapPtr(MapPtr && other)188 MapPtr(MapPtr &&other) noexcept
189 {
190 ptr_ = other.ptr_;
191 page_offset_ = other.page_offset_;
192 size_ = other.size_;
193 deleter_ = other.deleter_;
194 other.ptr_ = nullptr;
195 other.deleter_ = nullptr;
196 }
197
198 MapPtr &operator=(MapPtr &&other) noexcept
199 {
200 ptr_ = other.ptr_;
201 page_offset_ = other.page_offset_;
202 size_ = other.size_;
203 deleter_ = other.deleter_;
204 other.ptr_ = nullptr;
205 other.deleter_ = nullptr;
206 return *this;
207 }
208
Get()209 std::conditional_t<type == MapPtrType::CONST, const T *, T *> Get() const
210 {
211 return ptr_;
212 }
213
GetSize()214 size_t GetSize() const
215 {
216 return size_;
217 }
218
GetMapRange()219 MapRange<T> GetMapRange() const
220 {
221 return MapRange(ptr_, size_);
222 }
223
GetMapRange()224 MapRange<T> GetMapRange()
225 {
226 return MapRange(ptr_, size_);
227 }
228
GetPtrOffset()229 static constexpr uint32_t GetPtrOffset()
230 {
231 return MEMBER_OFFSET(MapPtr, ptr_);
232 }
233
ToConst()234 MapPtr<T, MapPtrType::CONST> ToConst()
235 {
236 MapPtr<T, MapPtrType::CONST> res(ptr_, size_, page_offset_, deleter_);
237 ptr_ = nullptr;
238 return res;
239 }
240
241 /*
242 * memory layout for mmap
243 *
244 * addr(is )
245 * ^
246 * page_offset_ | size_
247 * |--------|-----------|
248 * P0 P1 | P2 | P3 P4
249 * | | | | | | | 4 pages
250 * +-----------+--------S--+--------E--+-----------+
251 * ^
252 * |
253 * ptr_
254 * |--------------------| mmap memory
255 * size
256 *
257 * S: file start; E: file end
258 * Available space: [ptr_...(ptr_ + size_ - 1)]
259 * addr sould be page aligned for file map but it is not guaranteed for anonymous map
260 * For anonymous map, page_offset_ = 0
261 */
~MapPtr()262 ~MapPtr()
263 {
264 if (ptr_ == nullptr) {
265 return;
266 }
267 uintptr_t addr = reinterpret_cast<uintptr_t>(ptr_) - page_offset_;
268 // LINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
269 size_t size = size_ + page_offset_;
270 deleter_(reinterpret_cast<T *>(addr), size);
271 }
272
273 private:
274 T *ptr_;
275 size_t size_;
276 size_t page_offset_;
277 Deleter deleter_;
278
279 NO_COPY_SEMANTIC(MapPtr);
280 };
281
282 using ByteMapRange = MapRange<std::byte>;
283 using BytePtr = MapPtr<std::byte, MapPtrType::NON_CONST>;
284 using ConstBytePtr = MapPtr<std::byte, MapPtrType::CONST>;
285 static_assert(ConstBytePtr::GetPtrOffset() == 0);
286
287 /**
288 * Map the specified file into memory.
289 * The interface is similat to POSIX mmap.
290 *
291 * @param file - file to map
292 * @param prot - memory protection flags, a combination of MMAP_PROT_XXX values.
293 * @param flags - memory map flags, a combination of MMAP_FLAG_XXX values.
294 * @param file_offset - an offset in the file. If the offset is not multiple of page size
295 * the function handles this situatio. The resulting BytePtr will points to desired data.
296 * @param hint - an desired address to map file to.
297 */
298 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t file_offset = 0,
299 void *hint = nullptr);
300
301 /**
302 * \brief allocates executed memory of size \param size
303 * @param size
304 * @return
305 */
306 BytePtr MapExecuted(size_t size);
307
308 /**
309 * Anonymous mmap with READ | WRITE protection for pages
310 * Note: returned memory will be poisoned in ASAN targets,
311 * if you need other behavior - consider to change interface, or use manual unpoisoning.
312 * @param size - size in bytes, should be multiple of PAGE_SIZE
313 * @param force_poison - poison mmaped memory
314 * @return
315 */
316 void *MapRWAnonymousRaw(size_t size, bool force_poison = true);
317
318 /**
319 * Anonymous mmap with READ | WRITE protection for pages.
320 * Returned address will be aligned as \param aligment_in_bytes.
321 * Note: returned memory will be poisoned in ASAN targets,
322 * if you need other behavior - consider to change interface, or use manual unpoisoning.
323 * @param size - size in bytes, should be multiple of PAGE_SIZE
324 * @param aligment_in_bytes - alignment in bytes, should be multiple of PAGE_SIZE
325 * @param force_poison - poison mmaped memory
326 * @return
327 */
328 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligment_in_bytes, bool force_poison = true);
329
330 // ASAN mapped its structures at this magic address (shadow offset):
331 // see https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
332 // Therefore, we can successfully allocate memory at fixed address started somewhere at lower addresses
333 // and it can overlap sanitizer address space and mmap with MAP_FIXED flag finished successfully.
334 // (one can look at the MAP_FIXED flag description of Linux mmap)
335 // However, all load/store from this memory is prohibited.
336 // We can get an error during mmap call only if we use MAP_FIXED_NOREPLACE argument,
337 // but it is supported only since Linux 4.17 (Ubuntu 18 has 4.15)
338 // TODO(aemelenko): Do smth with this constant.
339 #ifdef PANDA_TARGET_ARM64
340 static constexpr uint64_t MMAP_FIXED_MAGIC_ADDR_FOR_ASAN = 1ULL << 36ULL;
341 #else
342 static constexpr uint64_t MMAP_FIXED_MAGIC_ADDR_FOR_ASAN = 0x7fff8000ULL;
343 #endif
344
345 /**
346 * Anonymous mmap with fixed address and READ | WRITE protection for pages
347 * Note: returned memory will be poisoned in ASAN targets,
348 * if you need other behavior - consider to change interface, or use manual unpoisoning.
349 * @param mem used address
350 * @param size size in bytes, should be multiple of PAGE_SIZE
351 * @param force_poison poison mmaped memory
352 * @return pointer to the mapped area
353 */
354 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool force_poison = true);
355
356 /**
357 * Unmap previously mapped memory.
358 * Note: memory will be unpoisoned before unmapping in ASAN targets.
359 * @param mem - pointer to the memory
360 * @param size - size of memory to unmap
361 * @return Error object if any error occur
362 */
363 std::optional<Error> UnmapRaw(void *mem, size_t size);
364
365 /**
366 * Unmap part of previously mapped memory.
367 * Note: memory will be unpoisoned before unmapping in ASAN targets.
368 * @param mem - pointer to the memory
369 * @param size - size of memory piece which we want to unmap
370 * @return Error object if any error occur
371 */
372 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size);
373
374 /**
375 * \brief returns page size for the system
376 * @return
377 */
378 uint32_t GetPageSize();
379
380 /**
381 * Release pages [pages_start, pages_end] to os.
382 * @param pages_start - address of pages beginning, should be multiple of PAGE_SIZE
383 * @param pages_end - address of pages ending, should be multiple of PAGE_SIZE
384 * @return
385 */
ReleasePages(uintptr_t pages_start,uintptr_t pages_end)386 inline int ReleasePages([[maybe_unused]] uintptr_t pages_start, [[maybe_unused]] uintptr_t pages_end)
387 {
388 ASSERT(pages_start % os::mem::GetPageSize() == 0);
389 ASSERT(pages_end % os::mem::GetPageSize() == 0);
390 ASSERT(pages_end >= pages_start);
391 #ifdef PANDA_TARGET_UNIX
392 return madvise(ToVoidPtr(pages_start), pages_end - pages_start, MADV_DONTNEED);
393 #else
394 // On Windows systems we can do nothing
395 return 0;
396 #endif
397 }
398
399 /**
400 * Tag anonymous memory with a debug name.
401 * @param mem - pointer to the memory
402 * @param size - size of memory to tag
403 * @param tag - pointer to the debug name (must be a literal or heap object)
404 * @return Error object if any error occur
405 */
406 std::optional<Error> TagAnonymousMemory(const void *mem, size_t size, const char *tag);
407
408 static constexpr size_t DEFAULT_NATIVE_BYTES_FROM_MALLINFO = 100000;
409
410 size_t GetNativeBytesFromMallinfo();
411
412 } // namespace panda::os::mem
413
414 #endif // PANDA_LIBPANDABASE_PBASE_OS_MEM_H_
415