• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_WORK_MANAGER_INL_H
17 #define ECMASCRIPT_MEM_WORK_MANAGER_INL_H
18 
19 #include "ecmascript/mem/work_manager.h"
20 
21 #include "ecmascript/mem/incremental_marker.h"
22 #include "ecmascript/mem/tlab_allocator-inl.h"
23 
24 namespace panda::ecmascript {
Setup(Heap * heap,WorkManager * workManager,GlobalWorkStack * workStack)25 void WorkNodeHolder::Setup(Heap *heap, WorkManager *workManager, GlobalWorkStack *workStack)
26 {
27     heap_ = heap;
28     workManager_ = workManager;
29     workStack_ = workStack;
30     continuousQueue_ = new ProcessQueue();
31     continuousJSWeakMapQueue_ = new JSWeakMapProcessQueue();
32 }
33 
Destroy()34 void WorkNodeHolder::Destroy()
35 {
36     continuousQueue_->Destroy();
37     delete continuousQueue_;
38     continuousQueue_ = nullptr;
39 
40     continuousJSWeakMapQueue_->Destroy();
41     delete continuousJSWeakMapQueue_;
42     continuousJSWeakMapQueue_ = nullptr;
43 }
44 
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)45 void WorkNodeHolder::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
46 {
47     inNode_ = workManager_->AllocateWorkNode();
48     cachedInNode_ = workManager_->AllocateWorkNode();
49     outNode_ = workManager_->AllocateWorkNode();
50     weakQueue_ = new ProcessQueue();
51     weakQueue_->BeginMarking(continuousQueue_);
52     jsWeakMapQueue_ = new JSWeakMapProcessQueue();
53     jsWeakMapQueue_->BeginMarking(continuousJSWeakMapQueue_);
54     aliveSize_ = 0;
55     promotedSize_ = 0;
56     parallelGCTaskPhase_ = taskPhase;
57     if (gcType != TriggerGCType::OLD_GC) {
58         allocator_ = new TlabAllocator(heap_);
59     }
60 }
61 
Finish()62 void WorkNodeHolder::Finish()
63 {
64     if (weakQueue_ != nullptr) {
65         weakQueue_->FinishMarking(continuousQueue_);
66         delete weakQueue_;
67         weakQueue_ = nullptr;
68     }
69     if (jsWeakMapQueue_ != nullptr) {
70         jsWeakMapQueue_->FinishMarking(continuousJSWeakMapQueue_);
71         delete jsWeakMapQueue_;
72         jsWeakMapQueue_ = nullptr;
73     }
74     if (allocator_ != nullptr) {
75         allocator_->Finalize();
76         delete allocator_;
77         allocator_ = nullptr;
78     }
79     parallelGCTaskPhase_ = ParallelGCTaskPhase::UNDEFINED_TASK;
80 }
81 
Push(TaggedObject * object)82 bool WorkNodeHolder::Push(TaggedObject *object)
83 {
84     if (!inNode_->PushObject(ToUintPtr(object))) {
85         PushWorkNodeToGlobal();
86         return inNode_->PushObject(ToUintPtr(object));
87     }
88     return true;
89 }
90 
PushWorkNodeToGlobal(bool postTask)91 void WorkNodeHolder::PushWorkNodeToGlobal(bool postTask)
92 {
93     if (!inNode_->IsEmpty()) {
94         workStack_->Push(inNode_);
95         inNode_ = cachedInNode_;
96         ASSERT(inNode_ != nullptr);
97         cachedInNode_ = cachedInNode_->Next();
98         if (cachedInNode_ == nullptr) {
99             cachedInNode_ = workManager_->AllocateWorkNode();
100         }
101         if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask() &&
102             !(heap_->IsMarking() && heap_->GetIncrementalMarker()->IsTriggeredIncrementalMark())) {
103             heap_->PostParallelGCTask(parallelGCTaskPhase_);
104         }
105     }
106 }
107 
Pop(TaggedObject ** object)108 bool WorkNodeHolder::Pop(TaggedObject **object)
109 {
110     if (!outNode_->PopObject(reinterpret_cast<uintptr_t *>(object))) {
111         if (!inNode_->IsEmpty()) {
112             WorkNode *tmp = outNode_;
113             outNode_ = inNode_;
114             inNode_ = tmp;
115         } else {
116             cachedInNode_->SetNext(outNode_);
117             outNode_->SetNext(nullptr);
118             if (!PopWorkNodeFromGlobal()) {
119                 return false;
120             }
121         }
122         return outNode_->PopObject(reinterpret_cast<uintptr_t *>(object));
123     }
124     return true;
125 }
126 
PopWorkNodeFromGlobal()127 bool WorkNodeHolder::PopWorkNodeFromGlobal()
128 {
129     return workStack_->Pop(&outNode_);
130 }
131 
PushWeakReference(JSTaggedType * weak)132 void WorkNodeHolder::PushWeakReference(JSTaggedType *weak)
133 {
134     weakQueue_->PushBack(weak);
135 }
136 
PushJSWeakMap(TaggedObject * jsWeakMap)137 void WorkNodeHolder::PushJSWeakMap(TaggedObject *jsWeakMap)
138 {
139     jsWeakMapQueue_->PushBack(jsWeakMap);
140 }
141 
IncreaseAliveSize(size_t size)142 void WorkNodeHolder::IncreaseAliveSize(size_t size)
143 {
144     aliveSize_ += size;
145 }
146 
IncreasePromotedSize(size_t size)147 void WorkNodeHolder::IncreasePromotedSize(size_t size)
148 {
149     promotedSize_ += size;
150 }
151 
GetWeakReferenceQueue()152 ProcessQueue *WorkNodeHolder::GetWeakReferenceQueue() const
153 {
154     return weakQueue_;
155 }
156 
GetJSWeakMapQueue()157 JSWeakMapProcessQueue *WorkNodeHolder::GetJSWeakMapQueue() const
158 {
159     return jsWeakMapQueue_;
160 }
161 
GetTlabAllocator()162 TlabAllocator *WorkNodeHolder::GetTlabAllocator() const
163 {
164     return allocator_;
165 }
166 
GetJSThread()167 JSThread *WorkNodeHolder::GetJSThread() const
168 {
169     return heap_->GetJSThread();
170 }
171 
WorkManagerBase(NativeAreaAllocator * allocator)172 WorkManagerBase::WorkManagerBase(NativeAreaAllocator *allocator)
173     : spaceChunk_(allocator), workSpace_(0), spaceStart_(0), spaceEnd_(0)
174 {
175     auto allocatedSpace = GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE);
176     ASSERT(allocatedSpace != nullptr);
177     workSpace_ = ToUintPtr(allocatedSpace);
178 }
179 
AllocateWorkNode()180 WorkNode *WorkManagerBase::AllocateWorkNode()
181 {
182     LockHolder lock(mtx_);
183     size_t allocatedSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
184     ASSERT(allocatedSize < WORKNODE_SPACE_SIZE);
185 
186     uintptr_t begin = spaceStart_;
187     if (begin + allocatedSize >= spaceEnd_) {
188         agedSpaces_.emplace_back(workSpace_);
189         workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
190         spaceStart_ = workSpace_;
191         spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
192         begin = spaceStart_;
193     }
194     spaceStart_ = begin + allocatedSize;
195     Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
196     stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + allocatedSize);
197     WorkNode *work = reinterpret_cast<WorkNode *>(begin);
198     return new (work) WorkNode(stack);
199 }
200 
~WorkManagerBase()201 WorkManagerBase::~WorkManagerBase()
202 {
203     if (workSpace_ != 0) {
204         GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
205     }
206 }
207 
WorkManager(Heap * heap,uint32_t threadNum)208 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
209     : WorkManagerBase(heap->GetNativeAreaAllocator()), heap_(heap), threadNum_(threadNum),
210       parallelGCTaskPhase_(UNDEFINED_TASK)
211 {
212     for (uint32_t i = 0; i < threadNum_; i++) {
213         works_.at(i).Setup(heap_, this, &workStack_);
214     }
215 }
216 
~WorkManager()217 WorkManager::~WorkManager()
218 {
219     Finish();
220     for (uint32_t i = 0; i < threadNum_; i++) {
221         works_.at(i).Destroy();
222     }
223 }
224 
Finish()225 size_t WorkManager::Finish()
226 {
227     size_t aliveSize = 0;
228     for (uint32_t i = 0; i < threadNum_; i++) {
229         WorkNodeHolder &holder = works_.at(i);
230         holder.Finish();
231         aliveSize += holder.aliveSize_;
232     }
233     workStack_.Clear();
234     FinishBase();
235     initialized_.store(false, std::memory_order_release);
236     return aliveSize;
237 }
238 
Finish(size_t & aliveSize,size_t & promotedSize)239 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
240 {
241     aliveSize = Finish();
242     for (uint32_t i = 0; i < threadNum_; i++) {
243         WorkNodeHolder &holder = works_.at(i);
244         promotedSize += holder.promotedSize_;
245         if (holder.allocator_ != nullptr) {
246             holder.allocator_->Finalize();
247             delete holder.allocator_;
248             holder.allocator_ = nullptr;
249         }
250     }
251     initialized_.store(false, std::memory_order_release);
252 }
253 
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)254 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
255 {
256     parallelGCTaskPhase_ = taskPhase;
257     InitializeBase();
258     for (uint32_t i = 0; i < threadNum_; i++) {
259         WorkNodeHolder &holder = works_.at(i);
260         holder.Initialize(gcType, taskPhase);
261     }
262     if (initialized_.load(std::memory_order_acquire)) { // LOCV_EXCL_BR_LINE
263         LOG_ECMA(FATAL) << "this branch is unreachable";
264         UNREACHABLE();
265     }
266     initialized_.store(true, std::memory_order_release);
267 }
268 
SharedGCWorkManager(SharedHeap * heap,uint32_t threadNum)269 SharedGCWorkManager::SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum)
270     : WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum),
271       continuousQueue_ { nullptr }, sharedTaskPhase_(SHARED_UNDEFINED_TASK)
272 {
273     for (uint32_t i = 0; i < threadNum_; i++) {
274         continuousQueue_.at(i) = new ProcessQueue();
275     }
276 }
277 
~SharedGCWorkManager()278 SharedGCWorkManager::~SharedGCWorkManager()
279 {
280     Finish();
281     for (uint32_t i = 0; i < threadNum_; i++) {
282         continuousQueue_.at(i)->Destroy();
283         delete continuousQueue_.at(i);
284         continuousQueue_.at(i) = nullptr;
285     }
286 }
287 
Initialize(TriggerGCType gcType,SharedParallelMarkPhase taskPhase)288 void SharedGCWorkManager::Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase)
289 {
290     sharedTaskPhase_ = taskPhase;
291     InitializeBase();
292     for (uint32_t i = 0; i < threadNum_; i++) {
293         SharedGCWorkNodeHolder &holder = works_.at(i);
294         holder.inNode_ = AllocateWorkNode();
295         holder.cachedInNode_ = AllocateWorkNode();
296         holder.outNode_ = AllocateWorkNode();
297         holder.weakQueue_ = new ProcessQueue();
298         holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
299         if (gcType == TriggerGCType::SHARED_FULL_GC) {
300             holder.allocator_ = new SharedTlabAllocator(sHeap_);
301         }
302     }
303     if (initialized_.load(std::memory_order_acquire)) { // LOCV_EXCL_BR_LINE
304         LOG_ECMA(FATAL) << "this branch is unreachable";
305         UNREACHABLE();
306     }
307     initialized_.store(true, std::memory_order_release);
308 }
309 
Finish()310 size_t SharedGCWorkManager::Finish()
311 {
312     size_t aliveSize = 0;
313     for (uint32_t i = 0; i < threadNum_; i++) {
314         SharedGCWorkNodeHolder &holder = works_.at(i);
315         if (holder.weakQueue_ != nullptr) {
316             holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
317             delete holder.weakQueue_;
318             holder.weakQueue_ = nullptr;
319         }
320         aliveSize += holder.aliveSize_;
321         if (holder.allocator_ != nullptr) {
322             holder.allocator_->Finalize();
323             delete holder.allocator_;
324             holder.allocator_ = nullptr;
325         }
326     }
327     workStack_.Clear();
328     FinishBase();
329     initialized_.store(false, std::memory_order_release);
330     return aliveSize;
331 }
332 
Push(uint32_t threadId,TaggedObject * object)333 bool SharedGCWorkManager::Push(uint32_t threadId, TaggedObject *object)
334 {
335     WorkNode *&inNode = works_.at(threadId).inNode_;
336     if (!inNode->PushObject(ToUintPtr(object))) {
337         PushWorkNodeToGlobal(threadId);
338         return inNode->PushObject(ToUintPtr(object));
339     }
340     return true;
341 }
342 
PushToLocalMarkingBuffer(WorkNode * & markingBuffer,TaggedObject * object)343 bool SharedGCWorkManager::PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object)
344 {
345     if (UNLIKELY(markingBuffer == nullptr)) {
346         markingBuffer = AllocateWorkNode();
347     }
348     ASSERT(markingBuffer != nullptr);
349     if (UNLIKELY(!markingBuffer->PushObject(ToUintPtr(object)))) {
350         PushLocalBufferToGlobal(markingBuffer);
351         ASSERT(markingBuffer == nullptr);
352         markingBuffer = AllocateWorkNode();
353         return markingBuffer->PushObject(ToUintPtr(object));
354     }
355     return true;
356 }
357 
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)358 void SharedGCWorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
359 {
360     WorkNode *&inNode = works_.at(threadId).inNode_;
361     if (!inNode->IsEmpty()) {
362         workStack_.Push(inNode);
363         inNode = works_.at(threadId).cachedInNode_;
364         ASSERT(inNode != nullptr);
365         works_.at(threadId).cachedInNode_ = AllocateWorkNode();
366         if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
367             sHeap_->PostGCMarkingTask(sharedTaskPhase_);
368         }
369     }
370 }
371 
PushLocalBufferToGlobal(WorkNode * & node,bool postTask)372 void SharedGCWorkManager::PushLocalBufferToGlobal(WorkNode *&node, bool postTask)
373 {
374     ASSERT(node != nullptr);
375     ASSERT(!node->IsEmpty());
376     workStack_.Push(node);
377     if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
378         sHeap_->PostGCMarkingTask(sharedTaskPhase_);
379     }
380     node = nullptr;
381 }
382 
Pop(uint32_t threadId,TaggedObject ** object)383 bool SharedGCWorkManager::Pop(uint32_t threadId, TaggedObject **object)
384 {
385     WorkNode *&outNode = works_.at(threadId).outNode_;
386     WorkNode *&inNode = works_.at(threadId).inNode_;
387     if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
388         if (!inNode->IsEmpty()) {
389             WorkNode *tmp = outNode;
390             outNode = inNode;
391             inNode = tmp;
392         } else if (!PopWorkNodeFromGlobal(threadId)) {
393             return false;
394         }
395         return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
396     }
397     return true;
398 }
399 
PopWorkNodeFromGlobal(uint32_t threadId)400 bool SharedGCWorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
401 {
402     return workStack_.Pop(&works_.at(threadId).outNode_);
403 }
404 }  // namespace panda::ecmascript
405 #endif  //  ECMASCRIPT_MEM_WORK_MANAGER_INL_H
406