1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef ECMASCRIPT_MEM_WORK_MANAGER_INL_H
17 #define ECMASCRIPT_MEM_WORK_MANAGER_INL_H
18
19 #include "ecmascript/mem/work_manager.h"
20
21 #include "ecmascript/mem/incremental_marker.h"
22 #include "ecmascript/mem/tlab_allocator-inl.h"
23
24 namespace panda::ecmascript {
Setup(Heap * heap,WorkManager * workManager,GlobalWorkStack * workStack)25 void WorkNodeHolder::Setup(Heap *heap, WorkManager *workManager, GlobalWorkStack *workStack)
26 {
27 heap_ = heap;
28 workManager_ = workManager;
29 workStack_ = workStack;
30 continuousQueue_ = new ProcessQueue();
31 }
32
Destroy()33 void WorkNodeHolder::Destroy()
34 {
35 continuousQueue_->Destroy();
36 delete continuousQueue_;
37 continuousQueue_ = nullptr;
38 }
39
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)40 void WorkNodeHolder::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
41 {
42 inNode_ = workManager_->AllocateWorkNode();
43 cachedInNode_ = workManager_->AllocateWorkNode();
44 outNode_ = workManager_->AllocateWorkNode();
45 weakQueue_ = new ProcessQueue();
46 weakQueue_->BeginMarking(continuousQueue_);
47 aliveSize_ = 0;
48 promotedSize_ = 0;
49 parallelGCTaskPhase_ = taskPhase;
50 if (gcType != TriggerGCType::OLD_GC) {
51 allocator_ = new TlabAllocator(heap_);
52 }
53 }
54
Finish()55 void WorkNodeHolder::Finish()
56 {
57 if (weakQueue_ != nullptr) {
58 weakQueue_->FinishMarking(continuousQueue_);
59 delete weakQueue_;
60 weakQueue_ = nullptr;
61 }
62 if (allocator_ != nullptr) {
63 allocator_->Finalize();
64 delete allocator_;
65 allocator_ = nullptr;
66 }
67 parallelGCTaskPhase_ = ParallelGCTaskPhase::UNDEFINED_TASK;
68 }
69
Push(TaggedObject * object)70 bool WorkNodeHolder::Push(TaggedObject *object)
71 {
72 if (!inNode_->PushObject(ToUintPtr(object))) {
73 PushWorkNodeToGlobal();
74 return inNode_->PushObject(ToUintPtr(object));
75 }
76 return true;
77 }
78
PushWorkNodeToGlobal(bool postTask)79 void WorkNodeHolder::PushWorkNodeToGlobal(bool postTask)
80 {
81 if (!inNode_->IsEmpty()) {
82 workStack_->Push(inNode_);
83 inNode_ = cachedInNode_;
84 ASSERT(inNode_ != nullptr);
85 cachedInNode_ = cachedInNode_->Next();
86 if (cachedInNode_ == nullptr) {
87 cachedInNode_ = workManager_->AllocateWorkNode();
88 }
89 if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask() &&
90 !(heap_->IsMarking() && heap_->GetIncrementalMarker()->IsTriggeredIncrementalMark())) {
91 heap_->PostParallelGCTask(parallelGCTaskPhase_);
92 }
93 }
94 }
95
Pop(TaggedObject ** object)96 bool WorkNodeHolder::Pop(TaggedObject **object)
97 {
98 if (!outNode_->PopObject(reinterpret_cast<uintptr_t *>(object))) {
99 if (!inNode_->IsEmpty()) {
100 WorkNode *tmp = outNode_;
101 outNode_ = inNode_;
102 inNode_ = tmp;
103 } else {
104 cachedInNode_->SetNext(outNode_);
105 outNode_->SetNext(nullptr);
106 if (!PopWorkNodeFromGlobal()) {
107 return false;
108 }
109 }
110 return outNode_->PopObject(reinterpret_cast<uintptr_t *>(object));
111 }
112 return true;
113 }
114
PopWorkNodeFromGlobal()115 bool WorkNodeHolder::PopWorkNodeFromGlobal()
116 {
117 return workStack_->Pop(&outNode_);
118 }
119
PushWeakReference(JSTaggedType * weak)120 void WorkNodeHolder::PushWeakReference(JSTaggedType *weak)
121 {
122 weakQueue_->PushBack(weak);
123 }
IncreaseAliveSize(size_t size)124 void WorkNodeHolder::IncreaseAliveSize(size_t size)
125 {
126 aliveSize_ += size;
127 }
IncreasePromotedSize(size_t size)128 void WorkNodeHolder::IncreasePromotedSize(size_t size)
129 {
130 promotedSize_ += size;
131 }
132
GetWeakReferenceQueue()133 ProcessQueue *WorkNodeHolder::GetWeakReferenceQueue() const
134 {
135 return weakQueue_;
136 }
GetTlabAllocator()137 TlabAllocator *WorkNodeHolder::GetTlabAllocator() const
138 {
139 return allocator_;
140 }
141
WorkManagerBase(NativeAreaAllocator * allocator)142 WorkManagerBase::WorkManagerBase(NativeAreaAllocator *allocator)
143 : spaceChunk_(allocator), workSpace_(0), spaceStart_(0), spaceEnd_(0)
144 {
145 auto allocatedSpace = GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE);
146 ASSERT(allocatedSpace != nullptr);
147 workSpace_ = ToUintPtr(allocatedSpace);
148 }
149
AllocateWorkNode()150 WorkNode *WorkManagerBase::AllocateWorkNode()
151 {
152 LockHolder lock(mtx_);
153 size_t allocatedSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
154 ASSERT(allocatedSize < WORKNODE_SPACE_SIZE);
155
156 uintptr_t begin = spaceStart_;
157 if (begin + allocatedSize >= spaceEnd_) {
158 agedSpaces_.emplace_back(workSpace_);
159 workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
160 spaceStart_ = workSpace_;
161 spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
162 begin = spaceStart_;
163 }
164 spaceStart_ = begin + allocatedSize;
165 Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
166 stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + allocatedSize);
167 WorkNode *work = reinterpret_cast<WorkNode *>(begin);
168 return new (work) WorkNode(stack);
169 }
170
~WorkManagerBase()171 WorkManagerBase::~WorkManagerBase()
172 {
173 if (workSpace_ != 0) {
174 GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
175 }
176 }
177
WorkManager(Heap * heap,uint32_t threadNum)178 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
179 : WorkManagerBase(heap->GetNativeAreaAllocator()), heap_(heap), threadNum_(threadNum),
180 parallelGCTaskPhase_(UNDEFINED_TASK)
181 {
182 for (uint32_t i = 0; i < threadNum_; i++) {
183 works_.at(i).Setup(heap_, this, &workStack_);
184 }
185 }
186
~WorkManager()187 WorkManager::~WorkManager()
188 {
189 Finish();
190 for (uint32_t i = 0; i < threadNum_; i++) {
191 works_.at(i).Destroy();
192 }
193 }
194
Finish()195 size_t WorkManager::Finish()
196 {
197 size_t aliveSize = 0;
198 for (uint32_t i = 0; i < threadNum_; i++) {
199 WorkNodeHolder &holder = works_.at(i);
200 holder.Finish();
201 aliveSize += holder.aliveSize_;
202 }
203 workStack_.Clear();
204 FinishBase();
205 initialized_.store(false, std::memory_order_release);
206 return aliveSize;
207 }
208
Finish(size_t & aliveSize,size_t & promotedSize)209 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
210 {
211 aliveSize = Finish();
212 for (uint32_t i = 0; i < threadNum_; i++) {
213 WorkNodeHolder &holder = works_.at(i);
214 promotedSize += holder.promotedSize_;
215 if (holder.allocator_ != nullptr) {
216 holder.allocator_->Finalize();
217 delete holder.allocator_;
218 holder.allocator_ = nullptr;
219 }
220 }
221 initialized_.store(false, std::memory_order_release);
222 }
223
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)224 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
225 {
226 parallelGCTaskPhase_ = taskPhase;
227 InitializeBase();
228 for (uint32_t i = 0; i < threadNum_; i++) {
229 WorkNodeHolder &holder = works_.at(i);
230 holder.Initialize(gcType, taskPhase);
231 }
232 if (initialized_.load(std::memory_order_acquire)) { // LOCV_EXCL_BR_LINE
233 LOG_ECMA(FATAL) << "this branch is unreachable";
234 UNREACHABLE();
235 }
236 initialized_.store(true, std::memory_order_release);
237 }
238
SharedGCWorkManager(SharedHeap * heap,uint32_t threadNum)239 SharedGCWorkManager::SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum)
240 : WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum),
241 continuousQueue_ { nullptr }, sharedTaskPhase_(SHARED_UNDEFINED_TASK)
242 {
243 for (uint32_t i = 0; i < threadNum_; i++) {
244 continuousQueue_.at(i) = new ProcessQueue();
245 }
246 }
247
~SharedGCWorkManager()248 SharedGCWorkManager::~SharedGCWorkManager()
249 {
250 Finish();
251 for (uint32_t i = 0; i < threadNum_; i++) {
252 continuousQueue_.at(i)->Destroy();
253 delete continuousQueue_.at(i);
254 continuousQueue_.at(i) = nullptr;
255 }
256 }
257
Initialize(TriggerGCType gcType,SharedParallelMarkPhase taskPhase)258 void SharedGCWorkManager::Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase)
259 {
260 sharedTaskPhase_ = taskPhase;
261 InitializeBase();
262 for (uint32_t i = 0; i < threadNum_; i++) {
263 SharedGCWorkNodeHolder &holder = works_.at(i);
264 holder.inNode_ = AllocateWorkNode();
265 holder.cachedInNode_ = AllocateWorkNode();
266 holder.outNode_ = AllocateWorkNode();
267 holder.weakQueue_ = new ProcessQueue();
268 holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
269 if (gcType == TriggerGCType::SHARED_FULL_GC) {
270 holder.allocator_ = new SharedTlabAllocator(sHeap_);
271 }
272 }
273 if (initialized_.load(std::memory_order_acquire)) { // LOCV_EXCL_BR_LINE
274 LOG_ECMA(FATAL) << "this branch is unreachable";
275 UNREACHABLE();
276 }
277 initialized_.store(true, std::memory_order_release);
278 }
279
Finish()280 size_t SharedGCWorkManager::Finish()
281 {
282 size_t aliveSize = 0;
283 for (uint32_t i = 0; i < threadNum_; i++) {
284 SharedGCWorkNodeHolder &holder = works_.at(i);
285 if (holder.weakQueue_ != nullptr) {
286 holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
287 delete holder.weakQueue_;
288 holder.weakQueue_ = nullptr;
289 }
290 aliveSize += holder.aliveSize_;
291 if (holder.allocator_ != nullptr) {
292 holder.allocator_->Finalize();
293 delete holder.allocator_;
294 holder.allocator_ = nullptr;
295 }
296 }
297 workStack_.Clear();
298 FinishBase();
299 initialized_.store(false, std::memory_order_release);
300 return aliveSize;
301 }
302
Push(uint32_t threadId,TaggedObject * object)303 bool SharedGCWorkManager::Push(uint32_t threadId, TaggedObject *object)
304 {
305 WorkNode *&inNode = works_.at(threadId).inNode_;
306 if (!inNode->PushObject(ToUintPtr(object))) {
307 PushWorkNodeToGlobal(threadId);
308 return inNode->PushObject(ToUintPtr(object));
309 }
310 return true;
311 }
312
PushToLocalMarkingBuffer(WorkNode * & markingBuffer,TaggedObject * object)313 bool SharedGCWorkManager::PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object)
314 {
315 if (UNLIKELY(markingBuffer == nullptr)) {
316 markingBuffer = AllocateWorkNode();
317 }
318 ASSERT(markingBuffer != nullptr);
319 if (UNLIKELY(!markingBuffer->PushObject(ToUintPtr(object)))) {
320 PushLocalBufferToGlobal(markingBuffer);
321 ASSERT(markingBuffer == nullptr);
322 markingBuffer = AllocateWorkNode();
323 return markingBuffer->PushObject(ToUintPtr(object));
324 }
325 return true;
326 }
327
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)328 void SharedGCWorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
329 {
330 WorkNode *&inNode = works_.at(threadId).inNode_;
331 if (!inNode->IsEmpty()) {
332 workStack_.Push(inNode);
333 inNode = works_.at(threadId).cachedInNode_;
334 ASSERT(inNode != nullptr);
335 works_.at(threadId).cachedInNode_ = AllocateWorkNode();
336 if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
337 sHeap_->PostGCMarkingTask(sharedTaskPhase_);
338 }
339 }
340 }
341
PushLocalBufferToGlobal(WorkNode * & node,bool postTask)342 void SharedGCWorkManager::PushLocalBufferToGlobal(WorkNode *&node, bool postTask)
343 {
344 ASSERT(node != nullptr);
345 ASSERT(!node->IsEmpty());
346 workStack_.Push(node);
347 if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
348 sHeap_->PostGCMarkingTask(sharedTaskPhase_);
349 }
350 node = nullptr;
351 }
352
Pop(uint32_t threadId,TaggedObject ** object)353 bool SharedGCWorkManager::Pop(uint32_t threadId, TaggedObject **object)
354 {
355 WorkNode *&outNode = works_.at(threadId).outNode_;
356 WorkNode *&inNode = works_.at(threadId).inNode_;
357 if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
358 if (!inNode->IsEmpty()) {
359 WorkNode *tmp = outNode;
360 outNode = inNode;
361 inNode = tmp;
362 } else if (!PopWorkNodeFromGlobal(threadId)) {
363 return false;
364 }
365 return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
366 }
367 return true;
368 }
369
PopWorkNodeFromGlobal(uint32_t threadId)370 bool SharedGCWorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
371 {
372 return workStack_.Pop(&works_.at(threadId).outNode_);
373 }
374 } // namespace panda::ecmascript
375 #endif // ECMASCRIPT_MEM_WORK_MANAGER_INL_H
376