• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "os/mem.h"
17 #include "cpu_features.h"
18 #include "utils/type_helpers.h"
19 #include "utils/asan_interface.h"
20 #include "utils/tsan_interface.h"
21 
22 #include <limits>
23 #include <sys/mman.h>
24 #include <unistd.h>
25 
26 #ifdef __APPLE__
27 #include <sys/sysctl.h>
28 #endif
29 
30 #include <type_traits>
31 
32 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
33 #include <malloc.h>
34 #endif
35 
36 namespace ark::os::mem {
37 
MmapDeleter(std::byte * ptr,size_t size)38 void MmapDeleter(std::byte *ptr, size_t size) noexcept
39 {
40     if (ptr != nullptr) {
41         munmap(ptr, size);
42     }
43 }
44 
45 // CC-OFFNXT(G.FUN.01) solid logic
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t fileOffset,void * hint)46 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t fileOffset, void *hint)
47 {
48     size_t mapOffset = RoundDown(fileOffset, GetPageSize());
49     size_t offset = fileOffset - mapOffset;
50     size_t mapSize = size + offset;
51     void *result =
52         mmap(hint, mapSize, static_cast<int>(prot), static_cast<int>(flags), file.GetFd(), static_cast<int>(mapOffset));
53     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast)
54     if (result == MAP_FAILED) {
55         return BytePtr(nullptr, 0, MmapDeleter);
56     }
57 
58     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
59     return BytePtr(static_cast<std::byte *>(result) + offset, size, offset, MmapDeleter);
60 }
61 
MapExecuted(size_t size)62 BytePtr MapExecuted(size_t size)
63 {
64     // By design caller should pass valid size, so don't do any additional checks except ones that
65     // mmap do itself
66     // NOLINTNEXTLINE(hicpp-signed-bitwise)
67     void *result = mmap(nullptr, size, PROT_EXEC | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
68     if (result == MAP_FAILED) {
69         result = nullptr;
70     }
71 
72     return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
73 }
74 
MakeMemWithProtFlag(void * mem,size_t size,int prot)75 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
76 {
77     int r = mprotect(mem, size, prot);
78     if (r != 0) {
79         return Error(errno);
80     }
81     return {};
82 }
83 
MakeMemReadExec(void * mem,size_t size)84 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
85 {
86     // NOLINTNEXTLINE(hicpp-signed-bitwise)
87     return MakeMemWithProtFlag(mem, size, PROT_EXEC | PROT_READ);
88 }
89 
MakeMemReadWrite(void * mem,size_t size)90 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
91 {
92     // NOLINTNEXTLINE(hicpp-signed-bitwise)
93     return MakeMemWithProtFlag(mem, size, PROT_WRITE | PROT_READ);
94 }
95 
MakeMemReadOnly(void * mem,size_t size)96 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
97 {
98     return MakeMemWithProtFlag(mem, size, PROT_READ);
99 }
100 
MakeMemProtected(void * mem,size_t size)101 std::optional<Error> MakeMemProtected(void *mem, size_t size)
102 {
103     return MakeMemWithProtFlag(mem, size, PROT_NONE);
104 }
105 
AlignDownToPageSize(uintptr_t addr)106 uintptr_t AlignDownToPageSize(uintptr_t addr)
107 {
108     const auto sysPageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
109     addr &= ~(sysPageSize - 1);
110     return addr;
111 }
112 
AlignedAlloc(size_t alignmentInBytes,size_t size)113 void *AlignedAlloc(size_t alignmentInBytes, size_t size)
114 {
115     size_t alignedSize = (size + alignmentInBytes - 1) & ~(alignmentInBytes - 1);
116 #if defined PANDA_TARGET_MOBILE || defined PANDA_TARGET_MACOS
117     void *ret = nullptr;
118     int r = posix_memalign(reinterpret_cast<void **>(&ret), alignmentInBytes, alignedSize);
119     if (r != 0) {
120         std::cerr << "posix_memalign failed, code: " << r << std::endl;
121         ASSERT(0);
122     }
123 #else
124     auto ret = aligned_alloc(alignmentInBytes, alignedSize);
125 #endif
126     ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignmentInBytes - 1)));
127     return ret;
128 }
129 
AlignedFree(void * mem)130 void AlignedFree(void *mem)
131 {
132     // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
133     std::free(mem);
134 }
135 
GetPageSizeFromOs()136 static uint32_t GetPageSizeFromOs()
137 {
138     // NOLINTNEXTLINE(google-runtime-int)
139     long sz = sysconf(_SC_PAGESIZE);
140     LOG_IF(sz == -1, FATAL, RUNTIME) << "Can't get page size from OS";
141     return static_cast<uint32_t>(sz);
142 }
143 
GetPageSize()144 uint32_t GetPageSize()
145 {
146     // NOLINTNEXTLINE(google-runtime-int)
147     static uint32_t sz = GetPageSizeFromOs();
148     return sz;
149 }
150 
GetCacheLineSizeFromOs()151 static size_t GetCacheLineSizeFromOs()
152 {
153 #if !defined(__MUSL__)
154 #ifdef __linux__
155     // NOLINTNEXTLINE(google-runtime-int)
156     long sz = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
157 #else
158     long sz = 0;
159     size_t length = sizeof(sz);
160     sysctlbyname("hw.l1cachesize", &sz, &length, NULL, 0);
161 #endif
162     LOG_IF(sz <= 0, FATAL, RUNTIME) << "Can't get cache line size from OS";
163     return static_cast<uint32_t>(sz);
164 #else
165     return ark::CACHE_LINE_SIZE;
166 #endif
167 }
168 
GetCacheLineSize()169 size_t GetCacheLineSize()
170 {
171     // NOLINTNEXTLINE(google-runtime-int)
172     static size_t sz = GetCacheLineSizeFromOs();
173     return sz;
174 }
175 
MapRWAnonymousRaw(size_t size,bool forcePoison)176 void *MapRWAnonymousRaw(size_t size, bool forcePoison)
177 {
178     ASSERT(size % GetPageSize() == 0);
179     // NOLINTNEXTLINE(hicpp-signed-bitwise)
180     void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
181     if (result == MAP_FAILED) {
182         result = nullptr;
183     }
184     if ((result != nullptr) && forcePoison) {
185         ASAN_POISON_MEMORY_REGION(result, size);
186     }
187 
188     return result;
189 }
190 
PartiallyUnmapRaw(void * mem,size_t size)191 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size)
192 {
193     // We can partially unmap memory on Unix systems via common unmap
194     return UnmapRaw(mem, size);
195 }
196 
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligmentInBytes,bool forcePoison)197 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligmentInBytes, bool forcePoison)
198 {
199     ASSERT(aligmentInBytes % GetPageSize() == 0);
200     if (size == 0) {
201         return nullptr;
202     }
203     void *result = MapRWAnonymousRaw(size + aligmentInBytes, forcePoison);
204     if (result == nullptr) {
205         return result;
206     }
207     auto allocatedMem = reinterpret_cast<uintptr_t>(result);
208     uintptr_t alignedMem =
209         (allocatedMem & ~(aligmentInBytes - 1U)) + ((allocatedMem % aligmentInBytes) != 0U ? aligmentInBytes : 0U);
210     ASSERT(alignedMem >= allocatedMem);
211     size_t unusedInStart = alignedMem - allocatedMem;
212     ASSERT(unusedInStart <= aligmentInBytes);
213     size_t unusedInEnd = aligmentInBytes - unusedInStart;
214     if (unusedInStart != 0) {
215         PartiallyUnmapRaw(result, unusedInStart);
216     }
217     if (unusedInEnd != 0) {
218         auto endPart = reinterpret_cast<void *>(alignedMem + size);
219         PartiallyUnmapRaw(endPart, unusedInEnd);
220     }
221     return reinterpret_cast<void *>(alignedMem);
222 }
223 
MapRWAnonymousInFirst4GB(void * minMem,size_t size,size_t iterativeStep)224 void *MapRWAnonymousInFirst4GB(void *minMem, size_t size, [[maybe_unused]] size_t iterativeStep)
225 {
226     ASSERT(ToUintPtr(minMem) % GetPageSize() == 0);
227     ASSERT(size % GetPageSize() == 0);
228     ASSERT(iterativeStep % GetPageSize() == 0);
229 #ifdef PANDA_TARGET_32
230     void *resultAddr = mmap(minMem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
231     if (resultAddr == MAP_FAILED) {
232         return nullptr;
233     }
234 #else
235     if (ToUintPtr(minMem) >= HIGH_BOUND_32BIT_ADDRESS) {
236         return nullptr;
237     }
238     if (ToUintPtr(minMem) + size > HIGH_BOUND_32BIT_ADDRESS) {
239         return nullptr;
240     }
241     uintptr_t requestedAddr = ToUintPtr(minMem);
242     for (; requestedAddr + size <= HIGH_BOUND_32BIT_ADDRESS; requestedAddr += iterativeStep) {
243         void *mmapAddr =
244             mmap(ToVoidPtr(requestedAddr), size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
245         if (mmapAddr == MAP_FAILED) {
246             continue;
247         }
248         if (mmapAddr == ToVoidPtr(requestedAddr)) {
249             break;
250         }
251         if (munmap(mmapAddr, size) != 0) {
252             return nullptr;
253         }
254     }
255     if (requestedAddr + size > HIGH_BOUND_32BIT_ADDRESS) {
256         return nullptr;
257     }
258     void *resultAddr = ToVoidPtr(requestedAddr);
259 #endif  // PANDA_TARGET_64
260     ASAN_POISON_MEMORY_REGION(resultAddr, size);
261     return resultAddr;
262 }
263 
MapRWAnonymousFixedRaw(void * mem,size_t size,bool forcePoison)264 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool forcePoison)
265 {
266 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || defined(USE_THREAD_SANITIZER)
267     // If this assert fails, please decrease the size of the memory for you program
268     // or don't run it with ASAN or TSAN.
269     LOG_IF((reinterpret_cast<uintptr_t>(mem) <= MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS) &&
270                ((reinterpret_cast<uintptr_t>(mem) + size) >= MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS),
271            FATAL, RUNTIME)
272         << "Unable to mmap mem [" << reinterpret_cast<uintptr_t>(mem) << "] because of ASAN or TSAN";
273 #endif
274     ASSERT(size % GetPageSize() == 0);
275     void *result =  // NOLINTNEXTLINE(hicpp-signed-bitwise)
276         mmap(mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
277     if (result == MAP_FAILED) {
278         result = nullptr;
279     }
280     if ((result != nullptr) && forcePoison) {
281         // If you have such an error here:
282         // ==4120==AddressSanitizer CHECK failed:
283         // ../../../../src/libsanitizer/asan/asan_mapping.h:303 "((AddrIsInMem(p))) != (0)" (0x0, 0x0)
284         // Look at the big comment at the start of the method.
285         ASAN_POISON_MEMORY_REGION(result, size);
286     }
287 
288     return result;
289 }
290 
UnmapRaw(void * mem,size_t size)291 std::optional<Error> UnmapRaw(void *mem, size_t size)
292 {
293     ASAN_UNPOISON_MEMORY_REGION(mem, size);
294     int res = munmap(mem, size);
295     if (UNLIKELY(res == -1)) {
296         return Error(errno);
297     }
298 
299     return {};
300 }
301 
302 #ifdef PANDA_TARGET_OHOS
303 #include <sys/prctl.h>
304 
305 #ifndef PR_SET_VMA
306 constexpr int PR_SET_VMA = 0x53564d41;
307 #endif
308 
309 #ifndef PR_SET_VMA_ANON_NAME
310 constexpr unsigned long PR_SET_VMA_ANON_NAME = 0;
311 #endif
312 #endif  // PANDA_TARGET_OHOS
313 
TagAnonymousMemory(const void * mem,size_t size,const char * tag)314 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
315                                         [[maybe_unused]] const char *tag)
316 {
317 #ifdef PANDA_TARGET_OHOS
318     ASSERT(size % GetPageSize() == 0);
319     ASSERT(reinterpret_cast<uintptr_t>(mem) % GetPageSize() == 0);
320 
321     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
322     int res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
323                     // NOLINTNEXTLINE(google-runtime-int)
324                     static_cast<unsigned long>(ToUintPtr(mem)), size,
325                     // NOLINTNEXTLINE(google-runtime-int)
326                     static_cast<unsigned long>(ToUintPtr(tag)));
327     if (UNLIKELY(res == -1)) {
328         return Error(errno);
329     }
330 #endif  // PANDA_TARGET_OHOS
331     return {};
332 }
333 
GetNativeBytesFromMallinfo()334 size_t GetNativeBytesFromMallinfo()
335 {
336     size_t mallinfoBytes;
337 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON)
338     mallinfoBytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
339     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo with ASAN or TSAN. Return default value";
340 #else
341 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
342 
343     // For GLIBC, uordblks is total size of space which is allocated by malloc
344     // For mobile libc, uordblks is total size of space which is allocated by malloc or mmap called by malloc for
345     // non-small allocations
346 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
347     struct mallinfo2 info = mallinfo2();
348     mallinfoBytes = info.uordblks;
349 #else
350     struct mallinfo info = mallinfo();
351     mallinfoBytes = static_cast<unsigned int>(info.uordblks);
352 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
353 
354 #if defined(__GLIBC__)
355 
356     // For GLIBC, hblkhd is total size of space which is allocated by mmap called by malloc for non-small allocations
357 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
358     mallinfoBytes += info.hblkhd;
359 #else
360     mallinfoBytes += static_cast<unsigned int>(info.hblkhd);
361 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
362 
363 #endif  // __GLIBC__
364 #else
365     mallinfoBytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
366     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo without GLIBC or mobile libc. Return default value";
367 #endif  // __GLIBC__ || PANDA_TARGET_MOBILE
368 #endif  // PANDA_ASAN_ON || PANDA_TSAN_ON
369     // For ASAN or TSAN, return default value. For GLIBC, return uordblks + hblkhd. For mobile libc, return uordblks.
370     // For other, return default value.
371     return mallinfoBytes;
372 }
373 
374 }  // namespace ark::os::mem
375