1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "os/mem.h"
17 #include "utils/type_helpers.h"
18 #include "utils/asan_interface.h"
19 #include "utils/tsan_interface.h"
20
21 #include <limits>
22 #include <sys/mman.h>
23 #include <unistd.h>
24
25 #include <type_traits>
26
27 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
28 #include <malloc.h>
29 #endif
30
31 namespace panda::os::mem {
32
MmapDeleter(std::byte * ptr,size_t size)33 void MmapDeleter(std::byte *ptr, size_t size) noexcept
34 {
35 if (ptr != nullptr) {
36 munmap(ptr, size);
37 }
38 }
39
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t file_offset,void * hint)40 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t file_offset, void *hint)
41 {
42 size_t map_offset = RoundDown(file_offset, GetPageSize());
43 size_t offset = file_offset - map_offset;
44 size_t map_size = size + offset;
45 void *result = mmap(hint, map_size, prot, flags, file.GetFd(), map_offset);
46 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast)
47 if (result == MAP_FAILED) {
48 return BytePtr(nullptr, 0, MmapDeleter);
49 }
50
51 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
52 return BytePtr(static_cast<std::byte *>(result) + offset, size, offset, MmapDeleter);
53 }
54
MapExecuted(size_t size)55 BytePtr MapExecuted(size_t size)
56 {
57 // By design caller should pass valid size, so don't do any additional checks except ones that
58 // mmap do itself
59 // NOLINTNEXTLINE(hicpp-signed-bitwise)
60 void *result = mmap(nullptr, size, PROT_EXEC | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
61 if (result == reinterpret_cast<void *>(-1)) {
62 result = nullptr;
63 }
64
65 return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
66 }
67
MakeMemWithProtFlag(void * mem,size_t size,int prot)68 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
69 {
70 int r = mprotect(mem, size, prot);
71 if (r != 0) {
72 return Error(errno);
73 }
74 return {};
75 }
76
MakeMemReadExec(void * mem,size_t size)77 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
78 {
79 // NOLINTNEXTLINE(hicpp-signed-bitwise)
80 return MakeMemWithProtFlag(mem, size, PROT_EXEC | PROT_READ);
81 }
82
MakeMemReadWrite(void * mem,size_t size)83 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
84 {
85 // NOLINTNEXTLINE(hicpp-signed-bitwise)
86 return MakeMemWithProtFlag(mem, size, PROT_WRITE | PROT_READ);
87 }
88
MakeMemReadOnly(void * mem,size_t size)89 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
90 {
91 return MakeMemWithProtFlag(mem, size, PROT_READ);
92 }
93
MakeMemProtected(void * mem,size_t size)94 std::optional<Error> MakeMemProtected(void *mem, size_t size)
95 {
96 return MakeMemWithProtFlag(mem, size, PROT_NONE);
97 }
98
AlignDownToPageSize(uintptr_t addr)99 uintptr_t AlignDownToPageSize(uintptr_t addr)
100 {
101 const auto SYS_PAGE_SIZE = static_cast<size_t>(sysconf(_SC_PAGESIZE));
102 addr &= ~(SYS_PAGE_SIZE - 1);
103 return addr;
104 }
105
AlignedAlloc(size_t alignment_in_bytes,size_t size)106 void *AlignedAlloc(size_t alignment_in_bytes, size_t size)
107 {
108 size_t aligned_size = (size + alignment_in_bytes - 1) & ~(alignment_in_bytes - 1);
109 #if defined PANDA_TARGET_MOBILE || defined PANDA_TARGET_MACOS || defined PANDA_TARGET_IOS || \
110 defined PANDA_TARGET_ARKUI_X
111 void *ret = nullptr;
112 int r = posix_memalign(reinterpret_cast<void **>(&ret), alignment_in_bytes, aligned_size);
113 if (r != 0) {
114 std::cerr << "posix_memalign failed, code: " << r << std::endl;
115 ASSERT(0);
116 }
117 #else
118 auto ret = aligned_alloc(alignment_in_bytes, aligned_size);
119 #endif
120 ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignment_in_bytes - 1)));
121 return ret;
122 }
123
AlignedFree(void * mem)124 void AlignedFree(void *mem)
125 {
126 // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
127 std::free(mem);
128 }
129
GetPageSizeFromOs()130 static uint32_t GetPageSizeFromOs()
131 {
132 // NOLINTNEXTLINE(google-runtime-int)
133 long sz = sysconf(_SC_PAGESIZE);
134 LOG_IF(sz == -1, FATAL, RUNTIME) << "Can't get page size from OS";
135 return static_cast<uint32_t>(sz);
136 }
137
GetPageSize()138 uint32_t GetPageSize()
139 {
140 // NOLINTNEXTLINE(google-runtime-int)
141 static uint32_t sz = GetPageSizeFromOs();
142 return sz;
143 }
144
MapRWAnonymousRaw(size_t size,bool force_poison)145 void *MapRWAnonymousRaw(size_t size, bool force_poison)
146 {
147 ASSERT(size % GetPageSize() == 0);
148 // NOLINTNEXTLINE(hicpp-signed-bitwise)
149 void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
150 if (result == reinterpret_cast<void *>(-1)) {
151 result = nullptr;
152 }
153 if ((result != nullptr) && force_poison) {
154 ASAN_POISON_MEMORY_REGION(result, size);
155 }
156
157 return result;
158 }
159
PartiallyUnmapRaw(void * mem,size_t size)160 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size)
161 {
162 // We can partially unmap memory on Unix systems via common unmap
163 return UnmapRaw(mem, size);
164 }
165
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligment_in_bytes,bool force_poison)166 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligment_in_bytes, bool force_poison)
167 {
168 ASSERT(aligment_in_bytes % GetPageSize() == 0);
169 if (size == 0) {
170 return nullptr;
171 }
172 void *result = MapRWAnonymousRaw(size + aligment_in_bytes, force_poison);
173 if (result == nullptr) {
174 return result;
175 }
176 auto allocated_mem = reinterpret_cast<uintptr_t>(result);
177 uintptr_t aligned_mem = (allocated_mem & ~(aligment_in_bytes - 1U)) +
178 ((allocated_mem % aligment_in_bytes) != 0U ? aligment_in_bytes : 0U);
179 ASSERT(aligned_mem >= allocated_mem);
180 size_t unused_in_start = aligned_mem - allocated_mem;
181 ASSERT(unused_in_start <= aligment_in_bytes);
182 size_t unused_in_end = aligment_in_bytes - unused_in_start;
183 if (unused_in_start != 0) {
184 PartiallyUnmapRaw(result, unused_in_start);
185 }
186 if (unused_in_end != 0) {
187 auto end_part = reinterpret_cast<void *>(aligned_mem + size);
188 PartiallyUnmapRaw(end_part, unused_in_end);
189 }
190 return reinterpret_cast<void *>(aligned_mem);
191 }
192
MapRWAnonymousFixedRaw(void * mem,size_t size,bool force_poison)193 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool force_poison)
194 {
195 #if defined(PANDA_ASAN_ON)
196 // If this assert fails, please decrease the size of the memory for you program
197 // or don't run it with ASAN.
198 if (!((reinterpret_cast<uintptr_t>(mem) > MMAP_FIXED_MAGIC_ADDR_FOR_ASAN) ||
199 ((reinterpret_cast<uintptr_t>(mem) + size) < MMAP_FIXED_MAGIC_ADDR_FOR_ASAN))) {
200 ASSERT((reinterpret_cast<uintptr_t>(mem) > MMAP_FIXED_MAGIC_ADDR_FOR_ASAN) ||
201 ((reinterpret_cast<uintptr_t>(mem) + size) < MMAP_FIXED_MAGIC_ADDR_FOR_ASAN));
202 std::abort();
203 }
204 #endif
205 ASSERT(size % GetPageSize() == 0);
206 void *result = // NOLINTNEXTLINE(hicpp-signed-bitwise)
207 mmap(mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
208 if (result == reinterpret_cast<void *>(-1)) {
209 result = nullptr;
210 }
211 if ((result != nullptr) && force_poison) {
212 // If you have such an error here:
213 // ==4120==AddressSanitizer CHECK failed:
214 // ../../../../src/libsanitizer/asan/asan_mapping.h:303 "((AddrIsInMem(p))) != (0)" (0x0, 0x0)
215 // Look at the big comment at the start of the method.
216 ASAN_POISON_MEMORY_REGION(result, size);
217 }
218
219 return result;
220 }
221
UnmapRaw(void * mem,size_t size)222 std::optional<Error> UnmapRaw(void *mem, size_t size)
223 {
224 ASAN_UNPOISON_MEMORY_REGION(mem, size);
225 int res = munmap(mem, size);
226 if (UNLIKELY(res == -1)) {
227 return Error(errno);
228 }
229
230 return {};
231 }
232
233 #ifdef PANDA_TARGET_OHOS
234 #include <sys/prctl.h>
235
236 #ifndef PR_SET_VMA
237 constexpr int PR_SET_VMA = 0x53564d41;
238 #endif
239
240 #ifndef PR_SET_VMA_ANON_NAME
241 constexpr unsigned long PR_SET_VMA_ANON_NAME = 0;
242 #endif
243 #endif // PANDA_TARGET_OHOS
244
TagAnonymousMemory(const void * mem,size_t size,const char * tag)245 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
246 [[maybe_unused]] const char *tag)
247 {
248 #ifdef PANDA_TARGET_OHOS
249 ASSERT(size % GetPageSize() == 0);
250 ASSERT(reinterpret_cast<uintptr_t>(mem) % GetPageSize() == 0);
251
252 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
253 int res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
254 // NOLINTNEXTLINE(google-runtime-int)
255 static_cast<unsigned long>(ToUintPtr(mem)), size,
256 // NOLINTNEXTLINE(google-runtime-int)
257 static_cast<unsigned long>(ToUintPtr(tag)));
258 if (UNLIKELY(res == -1)) {
259 return Error(errno);
260 }
261 #endif // PANDA_TARGET_OHOS
262 return {};
263 }
264
GetNativeBytesFromMallinfo()265 size_t GetNativeBytesFromMallinfo()
266 {
267 size_t mallinfo_bytes;
268 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON)
269 mallinfo_bytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
270 LOG(INFO, RUNTIME) << "Get native bytes from mallinfo with ASAN or TSAN. Return default value";
271 #else
272 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
273
274 // For GLIBC, uordblks is total size of space which is allocated by malloc
275 // For mobile libc, uordblks is total size of space which is allocated by malloc or mmap called by malloc for
276 // non-small allocations
277 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
278 struct mallinfo2 info = mallinfo2();
279 mallinfo_bytes = info.uordblks;
280 #else
281 struct mallinfo info = mallinfo();
282 mallinfo_bytes = static_cast<unsigned int>(info.uordblks);
283 #endif // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
284
285 #if defined(__GLIBC__)
286
287 // For GLIBC, hblkhd is total size of space which is allocated by mmap called by malloc for non-small allocations
288 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
289 mallinfo_bytes += info.hblkhd;
290 #else
291 mallinfo_bytes += static_cast<unsigned int>(info.hblkhd);
292 #endif // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
293
294 #endif // __GLIBC__
295 #else
296 mallinfo_bytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
297 LOG(INFO, RUNTIME) << "Get native bytes from mallinfo without GLIBC or mobile libc. Return default value";
298 #endif // __GLIBC__ || PANDA_TARGET_MOBILE
299 #endif // PANDA_ASAN_ON || PANDA_TSAN_ON
300 // For ASAN or TSAN, return default value. For GLIBC, return uordblks + hblkhd. For mobile libc, return uordblks.
301 // For other, return default value.
302 return mallinfo_bytes;
303 }
304
305 } // namespace panda::os::mem
306