• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "os/mem.h"
17 #include "cpu_features.h"
18 #include "utils/type_helpers.h"
19 #include "utils/asan_interface.h"
20 #include "utils/tsan_interface.h"
21 
22 #include <limits>
23 #include <sys/mman.h>
24 #include <unistd.h>
25 
26 #include <type_traits>
27 
28 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
29 #include <malloc.h>
30 #endif
31 
32 namespace ark::os::mem {
33 
MmapDeleter(std::byte * ptr,size_t size)34 void MmapDeleter(std::byte *ptr, size_t size) noexcept
35 {
36     if (ptr != nullptr) {
37         munmap(ptr, size);
38     }
39 }
40 
41 // CC-OFFNXT(G.FUN.01) solid logic
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t fileOffset,void * hint)42 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t fileOffset, void *hint)
43 {
44     size_t mapOffset = RoundDown(fileOffset, GetPageSize());
45     size_t offset = fileOffset - mapOffset;
46     size_t mapSize = size + offset;
47     void *result =
48         mmap(hint, mapSize, static_cast<int>(prot), static_cast<int>(flags), file.GetFd(), static_cast<int>(mapOffset));
49     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast)
50     if (result == MAP_FAILED) {
51         return BytePtr(nullptr, 0, MmapDeleter);
52     }
53 
54     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
55     return BytePtr(static_cast<std::byte *>(result) + offset, size, offset, MmapDeleter);
56 }
57 
MapExecuted(size_t size)58 BytePtr MapExecuted(size_t size)
59 {
60     // By design caller should pass valid size, so don't do any additional checks except ones that
61     // mmap do itself
62     // NOLINTNEXTLINE(hicpp-signed-bitwise)
63     void *result = mmap(nullptr, size, PROT_EXEC | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
64     if (result == MAP_FAILED) {
65         result = nullptr;
66     }
67 
68     return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
69 }
70 
MakeMemWithProtFlag(void * mem,size_t size,int prot)71 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
72 {
73     int r = mprotect(mem, size, prot);
74     if (r != 0) {
75         return Error(errno);
76     }
77     return {};
78 }
79 
MakeMemReadExec(void * mem,size_t size)80 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
81 {
82     // NOLINTNEXTLINE(hicpp-signed-bitwise)
83     return MakeMemWithProtFlag(mem, size, PROT_EXEC | PROT_READ);
84 }
85 
MakeMemReadWrite(void * mem,size_t size)86 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
87 {
88     // NOLINTNEXTLINE(hicpp-signed-bitwise)
89     return MakeMemWithProtFlag(mem, size, PROT_WRITE | PROT_READ);
90 }
91 
MakeMemReadOnly(void * mem,size_t size)92 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
93 {
94     return MakeMemWithProtFlag(mem, size, PROT_READ);
95 }
96 
MakeMemProtected(void * mem,size_t size)97 std::optional<Error> MakeMemProtected(void *mem, size_t size)
98 {
99     return MakeMemWithProtFlag(mem, size, PROT_NONE);
100 }
101 
AlignDownToPageSize(uintptr_t addr)102 uintptr_t AlignDownToPageSize(uintptr_t addr)
103 {
104     const auto sysPageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
105     addr &= ~(sysPageSize - 1);
106     return addr;
107 }
108 
AlignedAlloc(size_t alignmentInBytes,size_t size)109 void *AlignedAlloc(size_t alignmentInBytes, size_t size)
110 {
111     size_t alignedSize = (size + alignmentInBytes - 1) & ~(alignmentInBytes - 1);
112 #if defined PANDA_TARGET_MOBILE || defined PANDA_TARGET_MACOS
113     void *ret = nullptr;
114     int r = posix_memalign(reinterpret_cast<void **>(&ret), alignmentInBytes, alignedSize);
115     if (r != 0) {
116         std::cerr << "posix_memalign failed, code: " << r << std::endl;
117         ASSERT(0);
118     }
119 #else
120     auto ret = aligned_alloc(alignmentInBytes, alignedSize);
121 #endif
122     ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignmentInBytes - 1)));
123     return ret;
124 }
125 
AlignedFree(void * mem)126 void AlignedFree(void *mem)
127 {
128     // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
129     std::free(mem);
130 }
131 
GetPageSizeFromOs()132 static uint32_t GetPageSizeFromOs()
133 {
134     // NOLINTNEXTLINE(google-runtime-int)
135     long sz = sysconf(_SC_PAGESIZE);
136     LOG_IF(sz == -1, FATAL, RUNTIME) << "Can't get page size from OS";
137     return static_cast<uint32_t>(sz);
138 }
139 
GetPageSize()140 uint32_t GetPageSize()
141 {
142     // NOLINTNEXTLINE(google-runtime-int)
143     static uint32_t sz = GetPageSizeFromOs();
144     return sz;
145 }
146 
GetCacheLineSizeFromOs()147 static size_t GetCacheLineSizeFromOs()
148 {
149 #if !defined(__MUSL__)
150     // NOLINTNEXTLINE(google-runtime-int)
151     long sz = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
152     LOG_IF(sz <= 0, FATAL, RUNTIME) << "Can't get cache line size from OS";
153     return static_cast<uint32_t>(sz);
154 #else
155     return ark::CACHE_LINE_SIZE;
156 #endif
157 }
158 
GetCacheLineSize()159 size_t GetCacheLineSize()
160 {
161     // NOLINTNEXTLINE(google-runtime-int)
162     static size_t sz = GetCacheLineSizeFromOs();
163     return sz;
164 }
165 
MapRWAnonymousRaw(size_t size,bool forcePoison)166 void *MapRWAnonymousRaw(size_t size, bool forcePoison)
167 {
168     ASSERT(size % GetPageSize() == 0);
169     // NOLINTNEXTLINE(hicpp-signed-bitwise)
170     void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
171     if (result == MAP_FAILED) {
172         result = nullptr;
173     }
174     if ((result != nullptr) && forcePoison) {
175         ASAN_POISON_MEMORY_REGION(result, size);
176     }
177 
178     return result;
179 }
180 
PartiallyUnmapRaw(void * mem,size_t size)181 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size)
182 {
183     // We can partially unmap memory on Unix systems via common unmap
184     return UnmapRaw(mem, size);
185 }
186 
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligmentInBytes,bool forcePoison)187 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligmentInBytes, bool forcePoison)
188 {
189     ASSERT(aligmentInBytes % GetPageSize() == 0);
190     if (size == 0) {
191         return nullptr;
192     }
193     void *result = MapRWAnonymousRaw(size + aligmentInBytes, forcePoison);
194     if (result == nullptr) {
195         return result;
196     }
197     auto allocatedMem = reinterpret_cast<uintptr_t>(result);
198     uintptr_t alignedMem =
199         (allocatedMem & ~(aligmentInBytes - 1U)) + ((allocatedMem % aligmentInBytes) != 0U ? aligmentInBytes : 0U);
200     ASSERT(alignedMem >= allocatedMem);
201     size_t unusedInStart = alignedMem - allocatedMem;
202     ASSERT(unusedInStart <= aligmentInBytes);
203     size_t unusedInEnd = aligmentInBytes - unusedInStart;
204     if (unusedInStart != 0) {
205         PartiallyUnmapRaw(result, unusedInStart);
206     }
207     if (unusedInEnd != 0) {
208         auto endPart = reinterpret_cast<void *>(alignedMem + size);
209         PartiallyUnmapRaw(endPart, unusedInEnd);
210     }
211     return reinterpret_cast<void *>(alignedMem);
212 }
213 
MapRWAnonymousInFirst4GB(void * minMem,size_t size,size_t iterativeStep)214 void *MapRWAnonymousInFirst4GB(void *minMem, size_t size, [[maybe_unused]] size_t iterativeStep)
215 {
216     ASSERT(ToUintPtr(minMem) % GetPageSize() == 0);
217     ASSERT(size % GetPageSize() == 0);
218     ASSERT(iterativeStep % GetPageSize() == 0);
219 #ifdef PANDA_TARGET_32
220     void *resultAddr = mmap(minMem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
221     if (resultAddr == MAP_FAILED) {
222         return nullptr;
223     }
224 #else
225     if (ToUintPtr(minMem) >= HIGH_BOUND_32BIT_ADDRESS) {
226         return nullptr;
227     }
228     if (ToUintPtr(minMem) + size > HIGH_BOUND_32BIT_ADDRESS) {
229         return nullptr;
230     }
231     uintptr_t requestedAddr = ToUintPtr(minMem);
232     for (; requestedAddr + size <= HIGH_BOUND_32BIT_ADDRESS; requestedAddr += iterativeStep) {
233         void *mmapAddr =
234             mmap(ToVoidPtr(requestedAddr), size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
235         if (mmapAddr == MAP_FAILED) {
236             continue;
237         }
238         if (mmapAddr == ToVoidPtr(requestedAddr)) {
239             break;
240         }
241         if (munmap(mmapAddr, size) != 0) {
242             return nullptr;
243         }
244     }
245     if (requestedAddr + size > HIGH_BOUND_32BIT_ADDRESS) {
246         return nullptr;
247     }
248     void *resultAddr = ToVoidPtr(requestedAddr);
249 #endif  // PANDA_TARGET_64
250     ASAN_POISON_MEMORY_REGION(resultAddr, size);
251     return resultAddr;
252 }
253 
MapRWAnonymousFixedRaw(void * mem,size_t size,bool forcePoison)254 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool forcePoison)
255 {
256 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || defined(USE_THREAD_SANITIZER)
257     // If this assert fails, please decrease the size of the memory for you program
258     // or don't run it with ASAN or TSAN.
259     LOG_IF((reinterpret_cast<uintptr_t>(mem) <= MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS) &&
260                ((reinterpret_cast<uintptr_t>(mem) + size) >= MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS),
261            FATAL, RUNTIME)
262         << "Unable to mmap mem [" << reinterpret_cast<uintptr_t>(mem) << "] because of ASAN or TSAN";
263 #endif
264     ASSERT(size % GetPageSize() == 0);
265     void *result =  // NOLINTNEXTLINE(hicpp-signed-bitwise)
266         mmap(mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
267     if (result == MAP_FAILED) {
268         result = nullptr;
269     }
270     if ((result != nullptr) && forcePoison) {
271         // If you have such an error here:
272         // ==4120==AddressSanitizer CHECK failed:
273         // ../../../../src/libsanitizer/asan/asan_mapping.h:303 "((AddrIsInMem(p))) != (0)" (0x0, 0x0)
274         // Look at the big comment at the start of the method.
275         ASAN_POISON_MEMORY_REGION(result, size);
276     }
277 
278     return result;
279 }
280 
UnmapRaw(void * mem,size_t size)281 std::optional<Error> UnmapRaw(void *mem, size_t size)
282 {
283     ASAN_UNPOISON_MEMORY_REGION(mem, size);
284     int res = munmap(mem, size);
285     if (UNLIKELY(res == -1)) {
286         return Error(errno);
287     }
288 
289     return {};
290 }
291 
292 #ifdef PANDA_TARGET_OHOS
293 #include <sys/prctl.h>
294 
295 #ifndef PR_SET_VMA
296 constexpr int PR_SET_VMA = 0x53564d41;
297 #endif
298 
299 #ifndef PR_SET_VMA_ANON_NAME
300 constexpr unsigned long PR_SET_VMA_ANON_NAME = 0;
301 #endif
302 #endif  // PANDA_TARGET_OHOS
303 
TagAnonymousMemory(const void * mem,size_t size,const char * tag)304 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
305                                         [[maybe_unused]] const char *tag)
306 {
307 #ifdef PANDA_TARGET_OHOS
308     ASSERT(size % GetPageSize() == 0);
309     ASSERT(reinterpret_cast<uintptr_t>(mem) % GetPageSize() == 0);
310 
311     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
312     int res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
313                     // NOLINTNEXTLINE(google-runtime-int)
314                     static_cast<unsigned long>(ToUintPtr(mem)), size,
315                     // NOLINTNEXTLINE(google-runtime-int)
316                     static_cast<unsigned long>(ToUintPtr(tag)));
317     if (UNLIKELY(res == -1)) {
318         return Error(errno);
319     }
320 #endif  // PANDA_TARGET_OHOS
321     return {};
322 }
323 
GetNativeBytesFromMallinfo()324 size_t GetNativeBytesFromMallinfo()
325 {
326     size_t mallinfoBytes;
327 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON)
328     mallinfoBytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
329     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo with ASAN or TSAN. Return default value";
330 #else
331 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
332 
333     // For GLIBC, uordblks is total size of space which is allocated by malloc
334     // For mobile libc, uordblks is total size of space which is allocated by malloc or mmap called by malloc for
335     // non-small allocations
336 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
337     struct mallinfo2 info = mallinfo2();
338     mallinfoBytes = info.uordblks;
339 #else
340     struct mallinfo info = mallinfo();
341     mallinfoBytes = static_cast<unsigned int>(info.uordblks);
342 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
343 
344 #if defined(__GLIBC__)
345 
346     // For GLIBC, hblkhd is total size of space which is allocated by mmap called by malloc for non-small allocations
347 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
348     mallinfoBytes += info.hblkhd;
349 #else
350     mallinfoBytes += static_cast<unsigned int>(info.hblkhd);
351 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
352 
353 #endif  // __GLIBC__
354 #else
355     mallinfoBytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
356     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo without GLIBC or mobile libc. Return default value";
357 #endif  // __GLIBC__ || PANDA_TARGET_MOBILE
358 #endif  // PANDA_ASAN_ON || PANDA_TSAN_ON
359     // For ASAN or TSAN, return default value. For GLIBC, return uordblks + hblkhd. For mobile libc, return uordblks.
360     // For other, return default value.
361     return mallinfoBytes;
362 }
363 
364 }  // namespace ark::os::mem
365