• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/mem_map_allocator.h"
17 #include "ecmascript/platform/os.h"
18 
19 namespace panda::ecmascript {
GetInstance()20 MemMapAllocator *MemMapAllocator::GetInstance()
21 {
22     static MemMapAllocator *vmAllocator_ = new MemMapAllocator();
23     return vmAllocator_;
24 }
25 
InitializeRegularRegionMap(size_t alignment)26 void MemMapAllocator::InitializeRegularRegionMap([[maybe_unused]] size_t alignment)
27 {
28 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
29     size_t initialRegularObjectCapacity = std::min(capacity_ / 2, INITIAL_REGULAR_OBJECT_CAPACITY);
30     size_t i = 0;
31     while (i < MEM_MAP_RETRY_NUM) {
32         void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(REGULAR_OBJECT_MEM_MAP_BEGIN_ADDR)) +
33             i * STEP_INCREASE_MEM_MAP_ADDR);
34         MemMap memMap = PageMap(initialRegularObjectCapacity, PAGE_PROT_NONE, alignment, addr);
35         if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr)) {
36             PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::HEAP);
37             PageRelease(memMap.GetMem(), memMap.GetSize());
38             memMapPool_.InsertMemMap(memMap);
39             memMapPool_.SplitMemMapToCache(memMap);
40             break;
41         } else {
42             PageUnmap(memMap);
43             LOG_ECMA(ERROR) << "Regular object mem map big addr fail: " << errno;
44         }
45         i++;
46     }
47 #endif
48 }
49 
InitializeHugeRegionMap(size_t alignment)50 void MemMapAllocator::InitializeHugeRegionMap(size_t alignment)
51 {
52     size_t initialHugeObjectCapacity = std::min(capacity_ / 2, INITIAL_HUGE_OBJECT_CAPACITY);
53 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
54     size_t i = 0;
55     while (i <= MEM_MAP_RETRY_NUM) {
56         void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(HUGE_OBJECT_MEM_MAP_BEGIN_ADDR)) +
57             i * STEP_INCREASE_MEM_MAP_ADDR);
58         MemMap memMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment, addr);
59         if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr) || i == MEM_MAP_RETRY_NUM) {
60             PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::HEAP);
61             PageRelease(memMap.GetMem(), memMap.GetSize());
62             memMapFreeList_.Initialize(memMap, capacity_);
63             break;
64         } else {
65             PageUnmap(memMap);
66             LOG_ECMA(ERROR) << "Huge object mem map big addr fail: " << errno;
67         }
68         i++;
69     }
70 #else
71     MemMap hugeMemMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment);
72     PageTag(hugeMemMap.GetMem(), hugeMemMap.GetSize(), PageTagType::HEAP);
73     PageRelease(hugeMemMap.GetMem(), hugeMemMap.GetSize());
74     memMapFreeList_.Initialize(hugeMemMap, capacity_);
75 #endif
76 }
77 
PageProtectMem(bool machineCodeSpace,void * mem,size_t size,bool isEnableJitFort)78 static bool PageProtectMem(bool machineCodeSpace, void *mem, size_t size, [[maybe_unused]] bool isEnableJitFort)
79 {
80     int prot = machineCodeSpace ? PAGE_PROT_EXEC_READWRITE : PAGE_PROT_READWRITE;
81 
82     if (!machineCodeSpace) {
83         return PageProtect(mem, size, prot);
84     }
85 
86     // MachineCode and HugeMachineCode space pages:
87 #if defined(PANDA_TARGET_ARM64) && defined(PANDA_TARGET_OHOS)
88     if (isEnableJitFort) {
89         // if JitFort enabled, Jit code will be in JitFort space, so only need READWRITE here
90         return PageProtect(mem, size, PAGE_PROT_READWRITE);
91     } else {
92         // else Jit code will be in MachineCode space, need EXEC_READWRITE and MAP_EXECUTABLE (0x1000)
93         void *addr = PageMapExecFortSpace(mem, size, PAGE_PROT_EXEC_READWRITE);
94         if (addr != mem) {
95             return false;
96         }
97         return true;
98     }
99 #else
100     // not running phone kernel. Jit code will be MachineCode space
101     return PageProtect(mem, size, PAGE_PROT_EXEC_READWRITE);
102 #endif
103 }
104 
Allocate(const uint32_t threadId,size_t size,size_t alignment,const std::string & spaceName,bool regular,bool isMachineCode,bool isEnableJitFort,bool shouldPageTag)105 MemMap MemMapAllocator::Allocate(const uint32_t threadId, size_t size, size_t alignment,
106                                  const std::string &spaceName, bool regular, bool isMachineCode, bool isEnableJitFort,
107                                  bool shouldPageTag)
108 {
109     MemMap mem;
110     PageTagType type = isMachineCode ? PageTagType::MACHINE_CODE : PageTagType::HEAP;
111 
112     if (regular) {
113         mem = memMapPool_.GetRegularMemFromCommitted(size);
114         if (mem.GetMem() != nullptr) {
115             bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
116             if (!res) {
117                 return MemMap();
118             }
119             if (shouldPageTag) {
120                 PageTag(mem.GetMem(), size, type, spaceName, threadId);
121             }
122             return mem;
123         }
124         if (UNLIKELY(memMapTotalSize_ + size > capacity_)) {
125             LOG_GC(ERROR) << "memory map overflow";
126             return MemMap();
127         }
128         mem = memMapPool_.GetMemFromCache(size);
129         if (mem.GetMem() != nullptr) {
130             memMapTotalSize_ += size;
131             bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
132             if (!res) {
133                 return MemMap();
134             }
135             if (shouldPageTag) {
136                 PageTag(mem.GetMem(), size, type, spaceName, threadId);
137             }
138             return mem;
139         }
140         mem = PageMap(REGULAR_REGION_MMAP_SIZE, PAGE_PROT_NONE, alignment);
141         memMapPool_.InsertMemMap(mem);
142         mem = memMapPool_.SplitMemFromCache(mem);
143     } else {
144         if (UNLIKELY(memMapTotalSize_ + size > capacity_)) { // LCOV_EXCL_BR_LINE
145             LOG_GC(ERROR) << "memory map overflow";
146             return MemMap();
147         }
148         mem = memMapFreeList_.GetMemFromList(size);
149     }
150     if (mem.GetMem() != nullptr) {
151         bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
152         if (!res) {
153             return MemMap();
154         }
155         if (shouldPageTag) {
156             PageTag(mem.GetMem(), mem.GetSize(), type, spaceName, threadId);
157         }
158         memMapTotalSize_ += mem.GetSize();
159     }
160     return mem;
161 }
162 
CacheOrFree(void * mem,size_t size,bool isRegular,size_t cachedSize,bool shouldPageTag,bool skipCache)163 void MemMapAllocator::CacheOrFree(void *mem, size_t size, bool isRegular, size_t cachedSize, bool shouldPageTag,
164                                   bool skipCache)
165 {
166     // Clear ThreadId tag and tag the mem with ARKTS HEAP.
167     if (shouldPageTag) {
168         PageTag(mem, size, PageTagType::HEAP);
169     }
170     if (!skipCache && isRegular && !memMapPool_.IsRegularCommittedFull(cachedSize)) {
171         // Cache regions to accelerate allocation.
172         memMapPool_.AddMemToCommittedCache(mem, size);
173         return;
174     }
175     Free(mem, size, isRegular);
176     if (!skipCache && isRegular && memMapPool_.ShouldFreeMore(cachedSize) > 0) {
177         int freeNum = memMapPool_.ShouldFreeMore(cachedSize);
178         for (int i = 0; i < freeNum; i++) {
179             void *freeMem = memMapPool_.GetRegularMemFromCommitted(size).GetMem();
180             if (freeMem != nullptr) {
181                 Free(freeMem, size, isRegular);
182             } else {
183                 return;
184             }
185         }
186     }
187 }
188 
Free(void * mem,size_t size,bool isRegular)189 void MemMapAllocator::Free(void *mem, size_t size, bool isRegular)
190 {
191     memMapTotalSize_ -= size;
192     if (!PageProtect(mem, size, PAGE_PROT_NONE)) { // LCOV_EXCL_BR_LINE
193         return;
194     }
195     PageRelease(mem, size);
196     if (isRegular) {
197         memMapPool_.AddMemToCache(mem, size);
198     } else {
199         memMapFreeList_.AddMemToList(MemMap(mem, size));
200     }
201 }
202 
AdapterSuitablePoolCapacity()203 void MemMapAllocator::AdapterSuitablePoolCapacity()
204 {
205     size_t physicalSize = PhysicalSize();
206     capacity_ = std::min<size_t>(physicalSize * DEFAULT_CAPACITY_RATE, MAX_MEM_POOL_CAPACITY);
207     LOG_GC(INFO) << "Ark Auto adapter memory pool capacity:" << capacity_;
208 }
209 
TransferToInfiniteModeForGC()210 void MemMapAllocator::TransferToInfiniteModeForGC()
211 {
212     capacity_ = std::numeric_limits<size_t>::max();
213     LOG_GC(INFO) << "MemMapAllocator transfer to infinite mode:" << capacity_;
214 }
215 }  // namespace panda::ecmascript
216