• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "os/mem.h"
17 #include "cpu_features.h"
18 #include "utils/type_helpers.h"
19 #include "utils/asan_interface.h"
20 #include "utils/tsan_interface.h"
21 
22 #include <limits>
23 #include <sys/mman.h>
24 #include <unistd.h>
25 
26 #include <type_traits>
27 
28 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
29 #include <malloc.h>
30 #endif
31 
32 namespace ark::os::mem {
33 
MmapDeleter(std::byte * ptr,size_t size)34 void MmapDeleter(std::byte *ptr, size_t size) noexcept
35 {
36     if (ptr != nullptr) {
37         munmap(ptr, size);
38     }
39 }
40 
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t fileOffset,void * hint)41 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t fileOffset, void *hint)
42 {
43     size_t mapOffset = RoundDown(fileOffset, GetPageSize());
44     size_t offset = fileOffset - mapOffset;
45     size_t mapSize = size + offset;
46     void *result =
47         mmap(hint, mapSize, static_cast<int>(prot), static_cast<int>(flags), file.GetFd(), static_cast<int>(mapOffset));
48     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast)
49     if (result == MAP_FAILED) {
50         return BytePtr(nullptr, 0, MmapDeleter);
51     }
52 
53     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
54     return BytePtr(static_cast<std::byte *>(result) + offset, size, offset, MmapDeleter);
55 }
56 
MapExecuted(size_t size)57 BytePtr MapExecuted(size_t size)
58 {
59     // By design caller should pass valid size, so don't do any additional checks except ones that
60     // mmap do itself
61     // NOLINTNEXTLINE(hicpp-signed-bitwise)
62     void *result = mmap(nullptr, size, PROT_EXEC | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
63     if (result == MAP_FAILED) {
64         result = nullptr;
65     }
66 
67     return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
68 }
69 
MakeMemWithProtFlag(void * mem,size_t size,int prot)70 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
71 {
72     int r = mprotect(mem, size, prot);
73     if (r != 0) {
74         return Error(errno);
75     }
76     return {};
77 }
78 
MakeMemReadExec(void * mem,size_t size)79 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
80 {
81     // NOLINTNEXTLINE(hicpp-signed-bitwise)
82     return MakeMemWithProtFlag(mem, size, PROT_EXEC | PROT_READ);
83 }
84 
MakeMemReadWrite(void * mem,size_t size)85 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
86 {
87     // NOLINTNEXTLINE(hicpp-signed-bitwise)
88     return MakeMemWithProtFlag(mem, size, PROT_WRITE | PROT_READ);
89 }
90 
MakeMemReadOnly(void * mem,size_t size)91 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
92 {
93     return MakeMemWithProtFlag(mem, size, PROT_READ);
94 }
95 
MakeMemProtected(void * mem,size_t size)96 std::optional<Error> MakeMemProtected(void *mem, size_t size)
97 {
98     return MakeMemWithProtFlag(mem, size, PROT_NONE);
99 }
100 
AlignDownToPageSize(uintptr_t addr)101 uintptr_t AlignDownToPageSize(uintptr_t addr)
102 {
103     const auto sysPageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
104     addr &= ~(sysPageSize - 1);
105     return addr;
106 }
107 
AlignedAlloc(size_t alignmentInBytes,size_t size)108 void *AlignedAlloc(size_t alignmentInBytes, size_t size)
109 {
110     size_t alignedSize = (size + alignmentInBytes - 1) & ~(alignmentInBytes - 1);
111 #if defined PANDA_TARGET_MOBILE || defined PANDA_TARGET_MACOS
112     void *ret = nullptr;
113     int r = posix_memalign(reinterpret_cast<void **>(&ret), alignmentInBytes, alignedSize);
114     if (r != 0) {
115         std::cerr << "posix_memalign failed, code: " << r << std::endl;
116         ASSERT(0);
117     }
118 #else
119     auto ret = aligned_alloc(alignmentInBytes, alignedSize);
120 #endif
121     ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignmentInBytes - 1)));
122     return ret;
123 }
124 
AlignedFree(void * mem)125 void AlignedFree(void *mem)
126 {
127     // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
128     std::free(mem);
129 }
130 
GetPageSizeFromOs()131 static uint32_t GetPageSizeFromOs()
132 {
133     // NOLINTNEXTLINE(google-runtime-int)
134     long sz = sysconf(_SC_PAGESIZE);
135     LOG_IF(sz == -1, FATAL, RUNTIME) << "Can't get page size from OS";
136     return static_cast<uint32_t>(sz);
137 }
138 
GetPageSize()139 uint32_t GetPageSize()
140 {
141     // NOLINTNEXTLINE(google-runtime-int)
142     static uint32_t sz = GetPageSizeFromOs();
143     return sz;
144 }
145 
GetCacheLineSizeFromOs()146 static size_t GetCacheLineSizeFromOs()
147 {
148 #if !defined(__MUSL__)
149     // NOLINTNEXTLINE(google-runtime-int)
150     long sz = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
151     LOG_IF(sz <= 0, FATAL, RUNTIME) << "Can't get cache line size from OS";
152     return static_cast<uint32_t>(sz);
153 #else
154     return ark::CACHE_LINE_SIZE;
155 #endif
156 }
157 
GetCacheLineSize()158 size_t GetCacheLineSize()
159 {
160     // NOLINTNEXTLINE(google-runtime-int)
161     static size_t sz = GetCacheLineSizeFromOs();
162     return sz;
163 }
164 
MapRWAnonymousRaw(size_t size,bool forcePoison)165 void *MapRWAnonymousRaw(size_t size, bool forcePoison)
166 {
167     ASSERT(size % GetPageSize() == 0);
168     // NOLINTNEXTLINE(hicpp-signed-bitwise)
169     void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
170     if (result == MAP_FAILED) {
171         result = nullptr;
172     }
173     if ((result != nullptr) && forcePoison) {
174         ASAN_POISON_MEMORY_REGION(result, size);
175     }
176 
177     return result;
178 }
179 
PartiallyUnmapRaw(void * mem,size_t size)180 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size)
181 {
182     // We can partially unmap memory on Unix systems via common unmap
183     return UnmapRaw(mem, size);
184 }
185 
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligmentInBytes,bool forcePoison)186 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligmentInBytes, bool forcePoison)
187 {
188     ASSERT(aligmentInBytes % GetPageSize() == 0);
189     if (size == 0) {
190         return nullptr;
191     }
192     void *result = MapRWAnonymousRaw(size + aligmentInBytes, forcePoison);
193     if (result == nullptr) {
194         return result;
195     }
196     auto allocatedMem = reinterpret_cast<uintptr_t>(result);
197     uintptr_t alignedMem =
198         (allocatedMem & ~(aligmentInBytes - 1U)) + ((allocatedMem % aligmentInBytes) != 0U ? aligmentInBytes : 0U);
199     ASSERT(alignedMem >= allocatedMem);
200     size_t unusedInStart = alignedMem - allocatedMem;
201     ASSERT(unusedInStart <= aligmentInBytes);
202     size_t unusedInEnd = aligmentInBytes - unusedInStart;
203     if (unusedInStart != 0) {
204         PartiallyUnmapRaw(result, unusedInStart);
205     }
206     if (unusedInEnd != 0) {
207         auto endPart = reinterpret_cast<void *>(alignedMem + size);
208         PartiallyUnmapRaw(endPart, unusedInEnd);
209     }
210     return reinterpret_cast<void *>(alignedMem);
211 }
212 
MapRWAnonymousInFirst4GB(void * minMem,size_t size,size_t iterativeStep)213 void *MapRWAnonymousInFirst4GB(void *minMem, size_t size, [[maybe_unused]] size_t iterativeStep)
214 {
215     ASSERT(ToUintPtr(minMem) % GetPageSize() == 0);
216     ASSERT(size % GetPageSize() == 0);
217     ASSERT(iterativeStep % GetPageSize() == 0);
218 #ifdef PANDA_TARGET_32
219     void *resultAddr = mmap(minMem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
220     if (resultAddr == MAP_FAILED) {
221         return nullptr;
222     }
223 #else
224     if (ToUintPtr(minMem) >= HIGH_BOUND_32BIT_ADDRESS) {
225         return nullptr;
226     }
227     if (ToUintPtr(minMem) + size > HIGH_BOUND_32BIT_ADDRESS) {
228         return nullptr;
229     }
230     uintptr_t requestedAddr = ToUintPtr(minMem);
231     for (; requestedAddr + size <= HIGH_BOUND_32BIT_ADDRESS; requestedAddr += iterativeStep) {
232         void *mmapAddr =
233             mmap(ToVoidPtr(requestedAddr), size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
234         if (mmapAddr == MAP_FAILED) {
235             continue;
236         }
237         if (mmapAddr == ToVoidPtr(requestedAddr)) {
238             break;
239         }
240         if (munmap(mmapAddr, size) != 0) {
241             return nullptr;
242         }
243     }
244     if (requestedAddr + size > HIGH_BOUND_32BIT_ADDRESS) {
245         return nullptr;
246     }
247     void *resultAddr = ToVoidPtr(requestedAddr);
248 #endif  // PANDA_TARGET_64
249     ASAN_POISON_MEMORY_REGION(resultAddr, size);
250     return resultAddr;
251 }
252 
MapRWAnonymousFixedRaw(void * mem,size_t size,bool forcePoison)253 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool forcePoison)
254 {
255 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || defined(USE_THREAD_SANITIZER)
256     // If this assert fails, please decrease the size of the memory for you program
257     // or don't run it with ASAN or TSAN.
258     LOG_IF((reinterpret_cast<uintptr_t>(mem) <= MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS) &&
259                ((reinterpret_cast<uintptr_t>(mem) + size) >= MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS),
260            FATAL, RUNTIME)
261         << "Unable to mmap mem [" << reinterpret_cast<uintptr_t>(mem) << "] because of ASAN or TSAN";
262 #endif
263     ASSERT(size % GetPageSize() == 0);
264     void *result =  // NOLINTNEXTLINE(hicpp-signed-bitwise)
265         mmap(mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
266     if (result == MAP_FAILED) {
267         result = nullptr;
268     }
269     if ((result != nullptr) && forcePoison) {
270         // If you have such an error here:
271         // ==4120==AddressSanitizer CHECK failed:
272         // ../../../../src/libsanitizer/asan/asan_mapping.h:303 "((AddrIsInMem(p))) != (0)" (0x0, 0x0)
273         // Look at the big comment at the start of the method.
274         ASAN_POISON_MEMORY_REGION(result, size);
275     }
276 
277     return result;
278 }
279 
UnmapRaw(void * mem,size_t size)280 std::optional<Error> UnmapRaw(void *mem, size_t size)
281 {
282     ASAN_UNPOISON_MEMORY_REGION(mem, size);
283     int res = munmap(mem, size);
284     if (UNLIKELY(res == -1)) {
285         return Error(errno);
286     }
287 
288     return {};
289 }
290 
291 #ifdef PANDA_TARGET_OHOS
292 #include <sys/prctl.h>
293 
294 #ifndef PR_SET_VMA
295 constexpr int PR_SET_VMA = 0x53564d41;
296 #endif
297 
298 #ifndef PR_SET_VMA_ANON_NAME
299 constexpr unsigned long PR_SET_VMA_ANON_NAME = 0;
300 #endif
301 #endif  // PANDA_TARGET_OHOS
302 
TagAnonymousMemory(const void * mem,size_t size,const char * tag)303 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
304                                         [[maybe_unused]] const char *tag)
305 {
306 #ifdef PANDA_TARGET_OHOS
307     ASSERT(size % GetPageSize() == 0);
308     ASSERT(reinterpret_cast<uintptr_t>(mem) % GetPageSize() == 0);
309 
310     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
311     int res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
312                     // NOLINTNEXTLINE(google-runtime-int)
313                     static_cast<unsigned long>(ToUintPtr(mem)), size,
314                     // NOLINTNEXTLINE(google-runtime-int)
315                     static_cast<unsigned long>(ToUintPtr(tag)));
316     if (UNLIKELY(res == -1)) {
317         return Error(errno);
318     }
319 #endif  // PANDA_TARGET_OHOS
320     return {};
321 }
322 
GetNativeBytesFromMallinfo()323 size_t GetNativeBytesFromMallinfo()
324 {
325     size_t mallinfoBytes;
326 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON)
327     mallinfoBytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
328     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo with ASAN or TSAN. Return default value";
329 #else
330 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
331 
332     // For GLIBC, uordblks is total size of space which is allocated by malloc
333     // For mobile libc, uordblks is total size of space which is allocated by malloc or mmap called by malloc for
334     // non-small allocations
335 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
336     struct mallinfo2 info = mallinfo2();
337     mallinfoBytes = info.uordblks;
338 #else
339     struct mallinfo info = mallinfo();
340     mallinfoBytes = static_cast<unsigned int>(info.uordblks);
341 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
342 
343 #if defined(__GLIBC__)
344 
345     // For GLIBC, hblkhd is total size of space which is allocated by mmap called by malloc for non-small allocations
346 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
347     mallinfoBytes += info.hblkhd;
348 #else
349     mallinfoBytes += static_cast<unsigned int>(info.hblkhd);
350 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
351 
352 #endif  // __GLIBC__
353 #else
354     mallinfoBytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
355     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo without GLIBC or mobile libc. Return default value";
356 #endif  // __GLIBC__ || PANDA_TARGET_MOBILE
357 #endif  // PANDA_ASAN_ON || PANDA_TSAN_ON
358     // For ASAN or TSAN, return default value. For GLIBC, return uordblks + hblkhd. For mobile libc, return uordblks.
359     // For other, return default value.
360     return mallinfoBytes;
361 }
362 
363 }  // namespace ark::os::mem
364