• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "os/mem.h"
17 #include "utils/type_helpers.h"
18 #include "utils/asan_interface.h"
19 #include "utils/tsan_interface.h"
20 
21 #include <limits>
22 #include <sys/mman.h>
23 #include <unistd.h>
24 
25 #include <type_traits>
26 
27 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
28 #include <malloc.h>
29 #endif
30 
31 namespace panda::os::mem {
32 
MmapDeleter(std::byte * ptr,size_t size)33 void MmapDeleter(std::byte *ptr, size_t size) noexcept
34 {
35     if (ptr != nullptr) {
36         munmap(ptr, size);
37     }
38 }
39 
MapFile(file::File file,uint32_t prot,uint32_t flags,size_t size,size_t file_offset,void * hint)40 BytePtr MapFile(file::File file, uint32_t prot, uint32_t flags, size_t size, size_t file_offset, void *hint)
41 {
42     size_t map_offset = RoundDown(file_offset, GetPageSize());
43     size_t offset = file_offset - map_offset;
44     size_t map_size = size + offset;
45     void *result = mmap(hint, map_size, prot, flags, file.GetFd(), map_offset);
46     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast)
47     if (result == MAP_FAILED) {
48         return BytePtr(nullptr, 0, MmapDeleter);
49     }
50 
51     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
52     return BytePtr(static_cast<std::byte *>(result) + offset, size, offset, MmapDeleter);
53 }
54 
MapExecuted(size_t size)55 BytePtr MapExecuted(size_t size)
56 {
57     // By design caller should pass valid size, so don't do any additional checks except ones that
58     // mmap do itself
59     // NOLINTNEXTLINE(hicpp-signed-bitwise)
60     void *result = mmap(nullptr, size, PROT_EXEC | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
61     if (result == reinterpret_cast<void *>(-1)) {
62         result = nullptr;
63     }
64 
65     return BytePtr(static_cast<std::byte *>(result), (result == nullptr) ? 0 : size, MmapDeleter);
66 }
67 
MakeMemReadExec(void * mem,size_t size)68 std::optional<Error> MakeMemReadExec(void *mem, size_t size)
69 {
70     // NOLINTNEXTLINE(hicpp-signed-bitwise)
71     int r = mprotect(mem, size, PROT_EXEC | PROT_READ);
72     if (r != 0) {
73         return Error(errno);
74     }
75     return {};
76 }
77 
MakeMemReadWrite(void * mem,size_t size)78 std::optional<Error> MakeMemReadWrite(void *mem, size_t size)
79 {
80     // NOLINTNEXTLINE(hicpp-signed-bitwise)
81     int r = mprotect(mem, size, PROT_WRITE | PROT_READ);
82     if (r != 0) {
83         return Error(errno);
84     }
85 
86     return {};
87 }
88 
MakeMemReadOnly(void * mem,size_t size)89 std::optional<Error> MakeMemReadOnly(void *mem, size_t size)
90 {
91     // NOLINTNEXTLINE(hicpp-signed-bitwise)
92     int r = mprotect(mem, size, PROT_READ);
93     if (r != 0) {
94         return Error(errno);
95     }
96 
97     return {};
98 }
99 
AlignDownToPageSize(uintptr_t addr)100 uintptr_t AlignDownToPageSize(uintptr_t addr)
101 {
102     const auto SYS_PAGE_SIZE = static_cast<size_t>(sysconf(_SC_PAGESIZE));
103     addr &= ~(SYS_PAGE_SIZE - 1);
104     return addr;
105 }
106 
AlignedAlloc(size_t alignment_in_bytes,size_t size)107 void *AlignedAlloc(size_t alignment_in_bytes, size_t size)
108 {
109     size_t aligned_size = (size + alignment_in_bytes - 1) & ~(alignment_in_bytes - 1);
110 #if defined PANDA_TARGET_MOBILE || defined PANDA_TARGET_MACOS
111     void *ret = nullptr;
112     int r = posix_memalign(reinterpret_cast<void **>(&ret), alignment_in_bytes, aligned_size);
113     if (r != 0) {
114         std::cerr << "posix_memalign failed, code: " << r << std::endl;
115         ASSERT(0);
116     }
117 #else
118     auto ret = aligned_alloc(alignment_in_bytes, aligned_size);
119 #endif
120     ASSERT(reinterpret_cast<uintptr_t>(ret) == (reinterpret_cast<uintptr_t>(ret) & ~(alignment_in_bytes - 1)));
121     return ret;
122 }
123 
GetPageSizeFromOs()124 static uint32_t GetPageSizeFromOs()
125 {
126     // NOLINTNEXTLINE(google-runtime-int)
127     long sz = sysconf(_SC_PAGESIZE);
128     LOG_IF(sz == -1, FATAL, RUNTIME) << "Can't get page size from OS";
129     return static_cast<uint32_t>(sz);
130 }
131 
GetPageSize()132 uint32_t GetPageSize()
133 {
134     // NOLINTNEXTLINE(google-runtime-int)
135     static uint32_t sz = GetPageSizeFromOs();
136     return sz;
137 }
138 
MapRWAnonymousRaw(size_t size,bool force_poison)139 void *MapRWAnonymousRaw(size_t size, bool force_poison)
140 {
141     ASSERT(size % GetPageSize() == 0);
142     // NOLINTNEXTLINE(hicpp-signed-bitwise)
143     void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
144     if (result == reinterpret_cast<void *>(-1)) {
145         result = nullptr;
146     }
147     if ((result != nullptr) && force_poison) {
148         ASAN_POISON_MEMORY_REGION(result, size);
149     }
150 
151     return result;
152 }
153 
MapRWAnonymousWithAlignmentRaw(size_t size,size_t aligment_in_bytes,bool force_poison)154 void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligment_in_bytes, bool force_poison)
155 {
156     ASSERT(aligment_in_bytes % GetPageSize() == 0);
157     if (size == 0) {
158         return nullptr;
159     }
160     void *result = MapRWAnonymousRaw(size + aligment_in_bytes, force_poison);
161     if (result == nullptr) {
162         return result;
163     }
164     auto allocated_mem = reinterpret_cast<uintptr_t>(result);
165     uintptr_t aligned_mem = (allocated_mem & ~(aligment_in_bytes - 1U)) +
166                             ((allocated_mem % aligment_in_bytes) != 0U ? aligment_in_bytes : 0U);
167     ASSERT(aligned_mem >= allocated_mem);
168     size_t unused_in_start = aligned_mem - allocated_mem;
169     ASSERT(unused_in_start <= aligment_in_bytes);
170     size_t unused_in_end = aligment_in_bytes - unused_in_start;
171     if (unused_in_start != 0) {
172         UnmapRaw(result, unused_in_start);
173     }
174     if (unused_in_end != 0) {
175         auto end_part = reinterpret_cast<void *>(aligned_mem + size);
176         UnmapRaw(end_part, unused_in_end);
177     }
178     return reinterpret_cast<void *>(aligned_mem);
179 }
180 
MapRWAnonymousFixedRaw(void * mem,size_t size,bool force_poison)181 void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool force_poison)
182 {
183 #if defined(PANDA_ASAN_ON)
184     // If this assert fails, please decrease the size of the memory for your program
185     // or don't run it with ASAN.
186     if (!((reinterpret_cast<uintptr_t>(mem) > MMAP_FIXED_MAGIC_ADDR_FOR_ASAN) ||
187           ((reinterpret_cast<uintptr_t>(mem) + size) < MMAP_FIXED_MAGIC_ADDR_FOR_ASAN))) {
188         ASSERT((reinterpret_cast<uintptr_t>(mem) > MMAP_FIXED_MAGIC_ADDR_FOR_ASAN) ||
189                ((reinterpret_cast<uintptr_t>(mem) + size) < MMAP_FIXED_MAGIC_ADDR_FOR_ASAN));
190         std::abort();
191     }
192 #endif
193     ASSERT(size % GetPageSize() == 0);
194     void *result =  // NOLINTNEXTLINE(hicpp-signed-bitwise)
195         mmap(mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
196     if (result == reinterpret_cast<void *>(-1)) {
197         result = nullptr;
198     }
199     if ((result != nullptr) && force_poison) {
200         // If you have such an error here:
201         // ==4120==AddressSanitizer CHECK failed:
202         // ../../../../src/libsanitizer/asan/asan_mapping.h:303 "((AddrIsInMem(p))) != (0)" (0x0, 0x0)
203         // Look at the big comment at the start of the method.
204         ASAN_POISON_MEMORY_REGION(result, size);
205     }
206 
207     return result;
208 }
209 
UnmapRaw(void * mem,size_t size)210 std::optional<Error> UnmapRaw(void *mem, size_t size)
211 {
212     ASAN_UNPOISON_MEMORY_REGION(mem, size);
213     int res = munmap(mem, size);
214     if (UNLIKELY(res == -1)) {
215         return Error(errno);
216     }
217 
218     return {};
219 }
220 
221 #ifdef PANDA_TARGET_MOBILE
222 #include <sys/prctl.h>
223 
224 #ifndef PR_SET_VMA
225 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
226 #define PR_SET_VMA 0x53564d41
227 #endif
228 
229 #ifndef PR_SET_VMA_ANON_NAME
230 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
231 #define PR_SET_VMA_ANON_NAME 0
232 #endif
233 #endif  // PANDA_TARGET_MOBILE
234 
TagAnonymousMemory(const void * mem,size_t size,const char * tag)235 std::optional<Error> TagAnonymousMemory([[maybe_unused]] const void *mem, [[maybe_unused]] size_t size,
236                                         [[maybe_unused]] const char *tag)
237 {
238 #ifdef PANDA_TARGET_MOBILE
239     ASSERT(size % GetPageSize() == 0);
240     ASSERT(reinterpret_cast<uintptr_t>(mem) % GetPageSize() == 0);
241 
242     // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
243     int res = prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
244                     // NOLINTNEXTLINE(google-runtime-int)
245                     static_cast<unsigned long>(ToUintPtr(mem)), size,
246                     // NOLINTNEXTLINE(google-runtime-int)
247                     static_cast<unsigned long>(ToUintPtr(tag)));
248     if (UNLIKELY(res == -1)) {
249         return Error(errno);
250     }
251 #endif  // PANDA_TARGET_MOBILE
252     return {};
253 }
254 
GetNativeBytesFromMallinfo()255 size_t GetNativeBytesFromMallinfo()
256 {
257     size_t mallinfo_bytes;
258 #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON)
259     mallinfo_bytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
260     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo with ASAN or TSAN. Return default value";
261 #else
262 #if defined(__GLIBC__) || defined(PANDA_TARGET_MOBILE)
263 
264     // For GLIBC, uordblks is total size of space which is allocated by malloc
265     // For MOBILE_LIBC, uordblks is total size of space which is allocated by malloc or mmap called by malloc for
266     // non-small allocations
267 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
268     struct mallinfo2 info = mallinfo2();
269     mallinfo_bytes = info.uordblks;
270 #else
271     struct mallinfo info = mallinfo();
272     mallinfo_bytes = static_cast<unsigned int>(info.uordblks);
273 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
274 
275 #if defined(__GLIBC__)
276 
277     // For GLIBC, hblkhd is total size of space which is allocated by mmap called by malloc for non-small allocations
278 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
279     mallinfo_bytes += info.hblkhd;
280 #else
281     mallinfo_bytes += static_cast<unsigned int>(info.hblkhd);
282 #endif  // __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 33
283 
284 #endif  // __GLIBC__
285 #else
286     mallinfo_bytes = DEFAULT_NATIVE_BYTES_FROM_MALLINFO;
287     LOG(INFO, RUNTIME) << "Get native bytes from mallinfo without GLIBC or MOBILE_LIBC. Return default value";
288 #endif  // __GLIBC__ || PANDA_TARGET_MOBILE
289 #endif  // PANDA_ASAN_ON || PANDA_TSAN_ON
290     // For ASAN or TSAN, return default value. For GLIBC, return uordblks + hblkhd. For MOBILE_LIBC, return uordblks.
291     // For others, return default value.
292     return mallinfo_bytes;
293 }
294 
295 }  // namespace panda::os::mem
296