1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/work_manager.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/area.h"
20 #include "ecmascript/mem/full_gc.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/heap_region_allocator.h"
23 #include "ecmascript/mem/mark_stack.h"
24 #include "ecmascript/mem/partial_gc.h"
25 #include "ecmascript/mem/region.h"
26 #include "ecmascript/mem/tlab_allocator-inl.h"
27
28 namespace panda::ecmascript {
WorkManager(Heap * heap,uint32_t threadNum)29 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
30 : heap_(heap), threadNum_(threadNum), continuousQueue_ { nullptr }, workSpace_(0), spaceStart_(0), spaceEnd_(0),
31 parallelGCTaskPhase_(UNDEFINED_TASK)
32 {
33 for (uint32_t i = 0; i < threadNum_; i++) {
34 continuousQueue_[i] = new ProcessQueue(heap);
35 }
36 workSpace_ =
37 ToUintPtr(heap_->GetNativeAreaAllocator()->AllocateBuffer(SPACE_SIZE));
38 }
39
~WorkManager()40 WorkManager::~WorkManager()
41 {
42 Finish();
43 for (uint32_t i = 0; i < threadNum_; i++) {
44 continuousQueue_[i]->Destroy();
45 delete continuousQueue_[i];
46 continuousQueue_[i] = nullptr;
47 }
48
49 heap_->GetNativeAreaAllocator()->FreeBuffer(
50 reinterpret_cast<void *>(workSpace_));
51 }
52
Push(uint32_t threadId,TaggedObject * object)53 bool WorkManager::Push(uint32_t threadId, TaggedObject *object)
54 {
55 WorkNode *&inNode = works_[threadId].inNode_;
56 if (!inNode->PushObject(ToUintPtr(object))) {
57 PushWorkNodeToGlobal(threadId);
58 return inNode->PushObject(ToUintPtr(object));
59 }
60 return true;
61 }
62
Push(uint32_t threadId,TaggedObject * object,Region * region)63 bool WorkManager::Push(uint32_t threadId, TaggedObject *object, Region *region)
64 {
65 if (Push(threadId, object)) {
66 auto klass = object->GetClass();
67 auto size = klass->SizeFromJSHClass(object);
68 region->IncreaseAliveObjectSafe(size);
69 return true;
70 }
71 return false;
72 }
73
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)74 void WorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
75 {
76 WorkNode *&inNode = works_[threadId].inNode_;
77 if (!inNode->IsEmpty()) {
78 workStack_.Push(inNode);
79 inNode = AllocateWorkNode();
80 if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask()) {
81 heap_->PostParallelGCTask(parallelGCTaskPhase_);
82 }
83 }
84 }
85
Pop(uint32_t threadId,TaggedObject ** object)86 bool WorkManager::Pop(uint32_t threadId, TaggedObject **object)
87 {
88 WorkNode *&outNode = works_[threadId].outNode_;
89 WorkNode *&inNode = works_[threadId].inNode_;
90 if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
91 if (!inNode->IsEmpty()) {
92 WorkNode *tmp = outNode;
93 outNode = inNode;
94 inNode = tmp;
95 } else if (!PopWorkNodeFromGlobal(threadId)) {
96 return false;
97 }
98 return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
99 }
100 return true;
101 }
102
PopWorkNodeFromGlobal(uint32_t threadId)103 bool WorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
104 {
105 return workStack_.Pop(&works_[threadId].outNode_);
106 }
107
Finish()108 size_t WorkManager::Finish()
109 {
110 size_t aliveSize = 0;
111 for (uint32_t i = 0; i < threadNum_; i++) {
112 WorkNodeHolder &holder = works_[i];
113 if (holder.weakQueue_ != nullptr) {
114 holder.weakQueue_->FinishMarking(continuousQueue_[i]);
115 delete holder.weakQueue_;
116 holder.weakQueue_ = nullptr;
117 }
118 if (holder.allocator_ != nullptr) {
119 holder.allocator_->Finalize();
120 delete holder.allocator_;
121 holder.allocator_ = nullptr;
122 }
123 holder.pendingUpdateSlots_.clear();
124 aliveSize += holder.aliveSize_;
125 }
126
127 while (!agedSpaces_.empty()) {
128 heap_->GetNativeAreaAllocator()->FreeBuffer(reinterpret_cast<void *>(
129 agedSpaces_.back()));
130 agedSpaces_.pop_back();
131 }
132 return aliveSize;
133 }
134
Finish(size_t & aliveSize,size_t & promotedSize)135 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
136 {
137 aliveSize = Finish();
138 for (uint32_t i = 0; i < threadNum_; i++) {
139 WorkNodeHolder &holder = works_[i];
140 promotedSize += holder.promotedSize_;
141 }
142 }
143
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)144 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
145 {
146 parallelGCTaskPhase_ = taskPhase;
147 spaceStart_ = workSpace_;
148 spaceEnd_ = workSpace_ + SPACE_SIZE;
149 for (uint32_t i = 0; i < threadNum_; i++) {
150 WorkNodeHolder &holder = works_[i];
151 holder.inNode_ = AllocateWorkNode();
152 holder.outNode_ = AllocateWorkNode();
153 holder.weakQueue_ = new ProcessQueue();
154 holder.weakQueue_->BeginMarking(heap_, continuousQueue_[i]);
155 holder.aliveSize_ = 0;
156 holder.promotedSize_ = 0;
157 if (gcType != TriggerGCType::OLD_GC) {
158 holder.allocator_ = new TlabAllocator(heap_);
159 }
160 }
161 }
162
AllocateWorkNode()163 WorkNode *WorkManager::AllocateWorkNode()
164 {
165 size_t totalSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
166 ASSERT(totalSize < SPACE_SIZE);
167
168 // CAS
169 volatile auto atomicField = reinterpret_cast<volatile std::atomic<uintptr_t> *>(&spaceStart_);
170 bool result = false;
171 uintptr_t begin = 0;
172 do {
173 begin = atomicField->load(std::memory_order_acquire);
174 if (begin + totalSize >= spaceEnd_) {
175 os::memory::LockHolder lock(mtx_);
176 begin = atomicField->load(std::memory_order_acquire);
177 if (begin + totalSize >= spaceEnd_) {
178 agedSpaces_.emplace_back(workSpace_);
179 workSpace_ = ToUintPtr(
180 heap_->GetNativeAreaAllocator()->AllocateBuffer(SPACE_SIZE));
181 spaceStart_ = workSpace_;
182 spaceEnd_ = workSpace_ + SPACE_SIZE;
183 begin = spaceStart_;
184 }
185 }
186 result = std::atomic_compare_exchange_strong_explicit(atomicField, &begin, begin + totalSize,
187 std::memory_order_release, std::memory_order_relaxed);
188 } while (!result);
189 Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
190 stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + totalSize);
191 WorkNode *work = reinterpret_cast<WorkNode *>(begin);
192 return new (work) WorkNode(stack);
193 }
194 } // namespace panda::ecmascript
195