1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "common_components/heap/allocator/memory_map.h"
16
17 #include <algorithm>
18 #include "base/globals.h"
19 #ifdef _WIN64
20 #include <errhandlingapi.h>
21 #include <handleapi.h>
22 #include <memoryapi.h>
23 #endif
24
25 #include "common_components/platform/os.h"
26 #include "common_components/common_runtime/hooks.h"
27 #include "common_components/base/sys_call.h"
28 #include "common_components/log/log.h"
29
30 #include "securec.h"
31
32 namespace common {
33 using namespace std;
34
35 // not thread safe, do not call from multiple threads
MapMemory(size_t reqSize,size_t initSize,const Option & opt)36 MemoryMap* MemoryMap::MapMemory(size_t reqSize, size_t initSize, const Option& opt)
37 {
38 void* mappedAddr = nullptr;
39 reqSize = AllocUtilRndUp<size_t>(reqSize, ALLOC_UTIL_PAGE_SIZE);
40
41 #ifdef _WIN64
42 // Windows don`t support to map a non-access memory, and virtualProtect() only can change page protections on
43 // memory blocks allocated by GlobalAlloc(), HeapAlloc(), or LocalAlloc(). For mapped views, this value must be
44 // compatible with the access protection specified when the view was mapped, which means we can`t crete a non-access
45 // map view, than change the protections like linux procedure. Here we just map a PAGE_EXECUTE_READWRITE at
46 // beginning.
47 mappedAddr = VirtualAlloc(NULL, reqSize, MEM_RESERVE, PAGE_READWRITE);
48 #else
49 DLOG(ALLOC, "MemoryMap::MapMemory size %zu", reqSize);
50 mappedAddr = mmap(opt.reqBase, reqSize, PROT_NONE, opt.flags, -1, 0);
51 #endif
52
53 bool failure = false;
54 #if defined(_WIN64) || defined(__APPLE__)
55 if (mappedAddr != NULL) {
56 #else
57 if (mappedAddr != MAP_FAILED) {
58 (void)madvise(mappedAddr, reqSize, MADV_NOHUGEPAGE);
59 COMMON_PRCTL(mappedAddr, reqSize, opt.tag);
60 #endif
61 // if protAll, all memory is protected at creation, and we never change it (save time)
62 size_t protSize = opt.protAll ? reqSize : initSize;
63 if (!ProtectMemInternal(mappedAddr, protSize, opt.prot)) {
64 failure = true;
65 LOG_COMMON(ERROR) << "MemoryMap::MapMemory mprotect failed";
66 ALLOCUTIL_MEM_UNMAP(mappedAddr, reqSize);
67 }
68 } else {
69 failure = true;
70 }
71 LOGF_CHECK(!failure) << "MemoryMap::MapMemory failed reqSize: " << reqSize << " initSize: " << initSize;
72
73 DLOG(ALLOC, "MemoryMap::MapMemory size %zu successful at %p", reqSize, mappedAddr);
74 MemoryMap* memMap = new (std::nothrow) MemoryMap(mappedAddr, initSize, reqSize);
75 LOGF_CHECK(memMap != nullptr) << "new MemoryMap failed";
76
77 os::PrctlSetVMA(mappedAddr, reqSize, (std::string("ArkTS Heap CMCGC ") + opt.tag).c_str());
78 return memMap;
79 }
80
81 MemoryMap* MemoryMap::MapMemoryAlignInner4G(uint64_t reqSize, uint64_t initSize, const Option& opt)
82 {
83 static constexpr uint64_t MAX_SUPPORT_CAPACITY = 4ULL * GB;
84
85 void* mappedAddr = nullptr;
86 reqSize = AllocUtilRndUp<size_t>(reqSize, ALLOC_UTIL_PAGE_SIZE);
87 #ifdef PANDA_TARGET_64
88 size_t needReqSize = reqSize * 2;
89 #else
90 size_t needReqSize = reqSize;
91 #endif
92
93 #ifdef _WIN64
94 // Windows don`t support to map a non-access memory, and virtualProtect() only can change page protections on
95 // memory blocks allocated by GlobalAlloc(), HeapAlloc(), or LocalAlloc(). For mapped views, this value must be
96 // compatible with the access protection specified when the view was mapped, which means we can`t crete a non-access
97 // map view, than change the protections like linux procedure. Here we just map a PAGE_EXECUTE_READWRITE at
98 // beginning.
99 mappedAddr = VirtualAlloc(NULL, needReqSize, MEM_RESERVE, PAGE_READWRITE);
100 #else
101 DLOG(ALLOC, "MemMap::MapMemory size %zu", needReqSize);
102 mappedAddr = mmap(opt.reqBase, needReqSize, PROT_NONE, opt.flags, -1, 0);
103 #endif
104
105 uintptr_t baseAddr = 0x0;
106 #ifdef PANDA_TARGET_64
107 uintptr_t startAddr = reinterpret_cast<uintptr_t>(mappedAddr);
108 uintptr_t alignAddr = AllocUtilRndUp(startAddr, MAX_SUPPORT_CAPACITY);
109
110 size_t leftSize = alignAddr - startAddr;
111 uintptr_t remainderAddr = alignAddr;
112 if (leftSize > needReqSize) {
113 remainderAddr = startAddr;
114 } else if (leftSize > reqSize) {
115 remainderAddr = alignAddr - reqSize;
116 }
117 mappedAddr = reinterpret_cast<void *>(remainderAddr);
118 baseAddr = remainderAddr & (0xFFFFFFFFULL << 32);
119
120 auto leftUnmapAddr = reinterpret_cast<void *>(startAddr);
121 size_t leftUnmapSize = remainderAddr - reinterpret_cast<uintptr_t>(leftUnmapAddr);
122
123 auto rightUnmapAddr = reinterpret_cast<void *>(remainderAddr + reqSize);
124 size_t rightUnmapSize = (startAddr + needReqSize) - reinterpret_cast<uintptr_t>(rightUnmapAddr);
125 #ifdef _WIN64
126 VirtualFree(leftUnmapAddr, leftUnmapSize, MEM_DECOMMIT);
127 VirtualFree(rightUnmapAddr, rightUnmapSize, MEM_DECOMMIT);
128 #else
129 munmap(leftUnmapAddr, leftUnmapSize);
130 munmap(rightUnmapAddr, rightUnmapSize);
131 #endif
132 #endif
133 SetBaseAddress(baseAddr);
134
135 bool failure = false;
136 #if defined(_WIN64) || defined(__APPLE__)
137 if (mappedAddr != NULL) {
138 #else
139 if (mappedAddr != MAP_FAILED) {
140 (void)madvise(mappedAddr, reqSize, MADV_NOHUGEPAGE);
141 COMMON_PRCTL(mappedAddr, reqSize, opt.tag);
142 #endif
143 // if protAll, all memory is protected at creation, and we never change it (save time)
144 size_t protSize = opt.protAll ? reqSize : initSize;
145 if (!ProtectMemInternal(mappedAddr, protSize, opt.prot)) {
146 failure = true;
147 LOG_COMMON(ERROR) << "MemMap::MapMemory mprotect failed";
148 ALLOCUTIL_MEM_UNMAP(mappedAddr, reqSize);
149 }
150 } else {
151 failure = true;
152 }
153 LOGF_CHECK(!failure) << "MemMap::MapMemory failed reqSize: " << reqSize << " initSize: " << initSize;
154
155 DLOG(ALLOC, "MemMap::MapMemory size %zu successful at %p", reqSize, mappedAddr);
156 MemoryMap* memMap = new (std::nothrow) MemoryMap(mappedAddr, initSize, reqSize);
157 LOGF_CHECK(memMap != nullptr) << "new MemMap failed";
158
159 os::PrctlSetVMA(mappedAddr, needReqSize, (std::string("ArkTS Heap CMCGC ") + opt.tag).c_str());
160 return memMap;
161 }
162
163 #ifdef _WIN64
164 void MemoryMap::CommitMemory(void* addr, size_t size)
165 {
166 LOGE_IF(UNLIKELY_CC(!VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE))) <<
167 "VirtualAlloc commit failed in GetPage, errno: " << GetLastError();
168 }
169 #endif
170
171 MemoryMap::MemoryMap(void* baseAddr, size_t initSize, size_t mappedSize)
172 : memBaseAddr_(baseAddr), memCurrSize_(initSize), memMappedSize_(mappedSize)
173 {
174 memCurrEndAddr_ = reinterpret_cast<void*>(reinterpret_cast<HeapAddress>(memBaseAddr_) + memCurrSize_);
175 memMappedEndAddr_ = reinterpret_cast<void*>(reinterpret_cast<HeapAddress>(memBaseAddr_) + memMappedSize_);
176 }
177
178 bool MemoryMap::ProtectMemInternal(void* addr, size_t size, int prot)
179 {
180 DLOG(ALLOC, "MemoryMap::ProtectMem %p, size %zu, prot %d", addr, size, prot);
181 #ifdef _WIN64
182 return true;
183 #else
184 int ret = mprotect(addr, size, prot);
185 return (ret == 0);
186 #endif
187 }
188 MemoryMap::~MemoryMap()
189 {
190 ALLOCUTIL_MEM_UNMAP(memBaseAddr_, memMappedSize_);
191 memBaseAddr_ = nullptr;
192 memCurrEndAddr_ = nullptr;
193 memMappedEndAddr_ = nullptr;
194 }
195 } // namespace common
196