• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "os/mem.h"
17 #include "utils/asan_interface.h"
18 
19 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
20 #include <malloc.h>
21 #endif
22 
23 namespace panda::os::mem {
24 
MmapDeleter(std::byte * ptr,size_t size)25 void MmapDeleter(std::byte *ptr, size_t size) noexcept
26 {
27     if (ptr != nullptr) {
28         munmap(ptr, size);
29     }
30 }
31 
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t file_offset,void * hint)32 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t file_offset, void *hint)
33 {
34     size_t map_offset = RoundDown(file_offset, GetPageSize());
35     size_t offset = file_offset - map_offset;
36     size_t map_size = size + offset;
37     void *result = mmap(hint, map_size, prot, flags, file.GetFd(), map_offset);
38     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast)
39     if (result == MAP_FAILED) {
40         return BytePtr(nullptr, 0, MmapDeleter);
41     }
42 
43     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
44     return BytePtr(static_cast<std::byte *>(result) + offset, size, offset, MmapDeleter);
45 }
46 
MapExecuted(size_t size)47 BytePtr MapExecuted(size_t size)
48 {
49     // By design caller should pass valid size, so don't do any additional checks except ones that
50     // mmap do itself
51     // NOLINTNEXTLINE(hicpp-signed-bitwise)
52     void *result = mmap(nullptr, size, PROT_EXEC | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
53     if (result == reinterpret_cast<void *>(-1)) {
54         result = nullptr;
55     }
56 
57     return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
58 }
59 
MakeMemWithProtFlag(void * mem,size_t size,int prot)60 std::optional<Error> MakeMemWithProtFlag(void *mem, size_t size, int prot)
61 {
62     int r = mprotect(mem, size, prot);
63     if (r != 0) {
64         return Error(errno);
65     }
66     return {};
67 }
68 
MakeMemReadExec(void * mem,size_t size)69 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
70 {
71     // NOLINTNEXTLINE(hicpp-signed-bitwise)
72     return MakeMemWithProtFlag(mem, size, PROT_EXEC | PROT_READ);
73 }
74 
MakeMemReadWrite(void * mem,size_t size)75 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
76 {
77     // NOLINTNEXTLINE(hicpp-signed-bitwise)
78     return MakeMemWithProtFlag(mem, size, PROT_WRITE | PROT_READ);
79 }
80 
MakeMemReadOnly(void * mem,size_t size)81 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
82 {
83     return MakeMemWithProtFlag(mem, size, PROT_READ);
84 }
85 
MakeMemProtected(void * mem,size_t size)86 std::optional<Error> MakeMemProtected(void *mem, size_t size)
87 {
88     return MakeMemWithProtFlag(mem, size, PROT_NONE);
89 }
90 
AlignDownToPageSize(uintptr_t addr)91 uintptr_t AlignDownToPageSize(uintptr_t addr)
92 {
93     const auto SYS_PAGE_SIZE = static_cast<size_t>(sysconf(_SC_PAGESIZE));
94     addr &= ~(SYS_PAGE_SIZE - 1);
95     return addr;
96 }
97 
AlignedAlloc(size_t alignment_in_bytes,size_t size)98 void *AlignedAlloc(size_t alignment_in_bytes, size_t size)
99 {
100     size_t aligned_size = (size + alignment_in_bytes - 1) & ~(alignment_in_bytes - 1);
101 #if defined PANDA_TARGET_MOBILE || defined PANDA_TARGET_MACOS || defined PANDA_TARGET_IOS || \
102     defined PANDA_TARGET_ARKUI_X
103     void *ret = nullptr;
104     int r = posix_memalign(reinterpret_cast<void **>(&ret), alignment_in_bytes, aligned_size);
105     if (r != 0) {
106         std::cerr << "posix_memalign failed, code: " << r << std::endl;
107         ASSERT(0);
108     }
109 #else
110     auto ret = aligned_alloc(alignment_in_bytes, aligned_size);
111 #endif
112     ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignment_in_bytes - 1)));
113     return ret;
114 }
115 
AlignedFree(void * mem)116 void AlignedFree(void *mem)
117 {
118     // NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
119     std::free(mem);
120 }
121 
GetPageSizeFromOs()122 static uint32_t GetPageSizeFromOs()
123 {
124     // NOLINTNEXTLINE(google-runtime-int)
125     long sz = sysconf(_SC_PAGESIZE);
126     LOG_IF(sz == -1, FATAL, RUNTIME) << "Can't get page size from OS";
127     return static_cast<uint32_t>(sz);
128 }
129 
GetPageSize()130 uint32_t GetPageSize()
131 {
132     // NOLINTNEXTLINE(google-runtime-int)
133     static uint32_t sz = GetPageSizeFromOs();
134     return sz;
135 }
136 
MapRWAnonymousRaw(size_t size,bool force_poison)137 void *MapRWAnonymousRaw(size_t size, bool force_poison)
138 {
139     ASSERT(size % GetPageSize() == 0);
140     // NOLINTNEXTLINE(hicpp-signed-bitwise)
141     void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
142     if (result == reinterpret_cast<void *>(-1)) {
143         result = nullptr;
144     }
145     if ((result != nullptr) && force_poison) {
146         ASAN_POISON_MEMORY_REGION(result, size);
147     }
148 
149     return result;
150 }
151 
PartiallyUnmapRaw(void * mem,size_t size)152 std::optional<Error> PartiallyUnmapRaw(void *mem, size_t size)
153 {
154     // We can partially unmap memory on Unix systems via common unmap
155     return UnmapRaw(mem, size);
156 }
157 
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligment_in_bytes,bool force_poison)158 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligment_in_bytes, bool force_poison)
159 {
160     ASSERT(aligment_in_bytes % GetPageSize() == 0);
161     if (size == 0) {
162         return nullptr;
163     }
164     void *result = MapRWAnonymousRaw(size + aligment_in_bytes, force_poison);
165     if (result == nullptr) {
166         return result;
167     }
168     auto allocated_mem = reinterpret_cast<uintptr_t>(result);
169     uintptr_t aligned_mem = (allocated_mem & ~(aligment_in_bytes - 1U)) +
170                             ((allocated_mem % aligment_in_bytes) != 0U ? aligment_in_bytes : 0U);
171     ASSERT(aligned_mem >= allocated_mem);
172     size_t unused_in_start = aligned_mem - allocated_mem;
173     ASSERT(unused_in_start <= aligment_in_bytes);
174     size_t unused_in_end = aligment_in_bytes - unused_in_start;
175     if (unused_in_start != 0) {
176         PartiallyUnmapRaw(result, unused_in_start);
177     }
178     if (unused_in_end != 0) {
179         auto end_part = reinterpret_cast<void *>(aligned_mem + size);
180         PartiallyUnmapRaw(end_part, unused_in_end);
181     }
182     return reinterpret_cast<void *>(aligned_mem);
183 }
184 
MapRWAnonymousFixedRaw(void * mem,size_t size,bool force_poison)185 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool force_poison)
186 {
187 #if defined(PANDA_ASAN_ON)
188     // If this assert fails, please decrease the size of the memory for you program
189     // or don't run it with ASAN.
190     if (!((reinterpret_cast<uintptr_t>(mem) > MMAP_FIXED_MAGIC_ADDR_FOR_ASAN) ||
191           ((reinterpret_cast<uintptr_t>(mem) + size) < MMAP_FIXED_MAGIC_ADDR_FOR_ASAN))) {
192         ASSERT((reinterpret_cast<uintptr_t>(mem) > MMAP_FIXED_MAGIC_ADDR_FOR_ASAN) ||
193                ((reinterpret_cast<uintptr_t>(mem) + size) < MMAP_FIXED_MAGIC_ADDR_FOR_ASAN));
194         std::abort();
195     }
196 #endif
197     ASSERT(size % GetPageSize() == 0);
198     void *result =  // NOLINTNEXTLINE(hicpp-signed-bitwise)
199         mmap(mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
200     if (result == reinterpret_cast<void *>(-1)) {
201         result = nullptr;
202     }
203     if ((result != nullptr) && force_poison) {
204         // If you have such an error here:
205         // ==4120==AddressSanitizer CHECK failed:
206         // ../../../../src/libsanitizer/asan/asan_mapping.h:303 "((AddrIsInMem(p))) != (0)" (0x0, 0x0)
207         // Look at the big comment at the start of the method.
208         ASAN_POISON_MEMORY_REGION(result, size);
209     }
210 
211     return result;
212 }
213 
UnmapRaw(void * mem,size_t size)214 std::optional<Error> UnmapRaw(void *mem, size_t size)
215 {
216     ASAN_UNPOISON_MEMORY_REGION(mem, size);
217     int res = munmap(mem, size);
218     if (UNLIKELY(res == -1)) {
219         return Error(errno);
220     }
221 
222     return {};
223 }
224 
225 #ifdef PANDA_TARGET_OHOS
226 #include <sys/prctl.h>
227 
228 #ifndef PR_SET_VMA
229 constexpr int PR_SET_VMA = 0x53564d41;
230 #endif
231 
232 #ifndef PR_SET_VMA_ANON_NAME
233 constexpr unsigned long PR_SET_VMA_ANON_NAME = 0;
234 #endif
235 #endif  // PANDA_TARGET_OHOS
236 
TagAnonymousMemory(const void * mem,size_t size,const char * tag)237 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
238                                         [[maybe_unused]] const char *tag)
239 {
240 #ifdef PANDA_TARGET_OHOS
241     ASSERT(size % GetPageSize() == 0);
242     ASSERT(reinterpret_cast<uintptr_t>(mem) % GetPageSize() == 0);
243 
244     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
245     int res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
246                     // NOLINTNEXTLINE(google-runtime-int)
247                     static_cast<unsigned long>(ToUintPtr(mem)), size,
248                     // NOLINTNEXTLINE(google-runtime-int)
249                     static_cast<unsigned long>(ToUintPtr(tag)));
250     if (UNLIKELY(res == -1)) {
251         return Error(errno);
252     }
253 #endif  // PANDA_TARGET_OHOS
254     return {};
255 }
256 
GetNativeBytesFromMallinfo()257 size_t GetNativeBytesFromMallinfo()
258 {
259     size_t mallinfo_bytes;
260 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON)
261     mallinfo_bytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
262     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo with ASAN or TSAN. Return default value";
263 #else
264 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
265 
266     // For GLIBC, uordblks is total size of space which is allocated by malloc
267     // For mobile libc, uordblks is total size of space which is allocated by malloc or mmap called by malloc for
268     // non-small allocations
269 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
270     struct mallinfo2 info = mallinfo2();
271     mallinfo_bytes = info.uordblks;
272 #else
273     struct mallinfo info = mallinfo();
274     mallinfo_bytes = static_cast<unsigned int>(info.uordblks);
275 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
276 
277 #if defined(__GLIBC__)
278 
279     // For GLIBC, hblkhd is total size of space which is allocated by mmap called by malloc for non-small allocations
280 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
281     mallinfo_bytes += info.hblkhd;
282 #else
283     mallinfo_bytes += static_cast<unsigned int>(info.hblkhd);
284 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
285 
286 #endif  // __GLIBC__
287 #else
288     mallinfo_bytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
289     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo without GLIBC or mobile libc. Return default value";
290 #endif  // __GLIBC__ || PANDA_TARGET_MOBILE
291 #endif  // PANDA_ASAN_ON || PANDA_TSAN_ON
292     // For ASAN or TSAN, return default value. For GLIBC, return uordblks + hblkhd. For mobile libc, return uordblks.
293     // For other, return default value.
294     return mallinfo_bytes;
295 }
296 
297 }  // namespace panda::os::mem
298