1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/work_manager.h"
17
18 #include "ecmascript/mem/incremental_marker.h"
19 #include "ecmascript/mem/tlab_allocator-inl.h"
20
21 namespace panda::ecmascript {
WorkManagerBase(NativeAreaAllocator * allocator)22 WorkManagerBase::WorkManagerBase(NativeAreaAllocator *allocator)
23 : spaceChunk_(allocator), workSpace_(0), spaceStart_(0), spaceEnd_(0)
24 {
25 workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
26 }
27
AllocateWorkNode()28 WorkNode *WorkManagerBase::AllocateWorkNode()
29 {
30 LockHolder lock(mtx_);
31 size_t allocatedSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
32 ASSERT(allocatedSize < WORKNODE_SPACE_SIZE);
33
34 uintptr_t begin = spaceStart_;
35 if (begin + allocatedSize >= spaceEnd_) {
36 agedSpaces_.emplace_back(workSpace_);
37 workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
38 spaceStart_ = workSpace_;
39 spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
40 begin = spaceStart_;
41 }
42 spaceStart_ = begin + allocatedSize;
43 Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
44 stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + allocatedSize);
45 WorkNode *work = reinterpret_cast<WorkNode *>(begin);
46 return new (work) WorkNode(stack);
47 }
48
~WorkManagerBase()49 WorkManagerBase::~WorkManagerBase()
50 {
51 GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
52 }
53
WorkManager(Heap * heap,uint32_t threadNum)54 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
55 : WorkManagerBase(heap->GetNativeAreaAllocator()), heap_(heap), threadNum_(threadNum),
56 continuousQueue_ { nullptr }, parallelGCTaskPhase_(UNDEFINED_TASK)
57 {
58 for (uint32_t i = 0; i < threadNum_; i++) {
59 continuousQueue_.at(i) = new ProcessQueue();
60 }
61 }
62
~WorkManager()63 WorkManager::~WorkManager()
64 {
65 Finish();
66 for (uint32_t i = 0; i < threadNum_; i++) {
67 continuousQueue_.at(i)->Destroy();
68 delete continuousQueue_.at(i);
69 continuousQueue_.at(i) = nullptr;
70 }
71 }
72
Push(uint32_t threadId,TaggedObject * object)73 bool WorkManager::Push(uint32_t threadId, TaggedObject *object)
74 {
75 WorkNode *&inNode = works_.at(threadId).inNode_;
76 if (!inNode->PushObject(ToUintPtr(object))) {
77 PushWorkNodeToGlobal(threadId);
78 return inNode->PushObject(ToUintPtr(object));
79 }
80 return true;
81 }
82
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)83 void WorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
84 {
85 WorkNode *&inNode = works_.at(threadId).inNode_;
86 if (!inNode->IsEmpty()) {
87 workStack_.Push(inNode);
88 inNode = AllocateWorkNode();
89 if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask() &&
90 !(heap_->IsMarking() && heap_->GetIncrementalMarker()->IsTriggeredIncrementalMark())) {
91 heap_->PostParallelGCTask(parallelGCTaskPhase_);
92 }
93 }
94 }
95
Pop(uint32_t threadId,TaggedObject ** object)96 bool WorkManager::Pop(uint32_t threadId, TaggedObject **object)
97 {
98 WorkNode *&outNode = works_.at(threadId).outNode_;
99 WorkNode *&inNode = works_.at(threadId).inNode_;
100 if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
101 if (!inNode->IsEmpty()) {
102 WorkNode *tmp = outNode;
103 outNode = inNode;
104 inNode = tmp;
105 } else if (!PopWorkNodeFromGlobal(threadId)) {
106 return false;
107 }
108 return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
109 }
110 return true;
111 }
112
PopWorkNodeFromGlobal(uint32_t threadId)113 bool WorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
114 {
115 return workStack_.Pop(&works_.at(threadId).outNode_);
116 }
117
Finish()118 size_t WorkManager::Finish()
119 {
120 size_t aliveSize = 0;
121 for (uint32_t i = 0; i < threadNum_; i++) {
122 WorkNodeHolder &holder = works_.at(i);
123 if (holder.weakQueue_ != nullptr) {
124 holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
125 delete holder.weakQueue_;
126 holder.weakQueue_ = nullptr;
127 }
128 if (holder.allocator_ != nullptr) {
129 holder.allocator_->Finalize();
130 delete holder.allocator_;
131 holder.allocator_ = nullptr;
132 }
133 holder.pendingUpdateSlots_.clear();
134 aliveSize += holder.aliveSize_;
135 }
136 FinishBase();
137 initialized_.store(false, std::memory_order_release);
138 return aliveSize;
139 }
140
Finish(size_t & aliveSize,size_t & promotedSize)141 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
142 {
143 aliveSize = Finish();
144 for (uint32_t i = 0; i < threadNum_; i++) {
145 WorkNodeHolder &holder = works_.at(i);
146 promotedSize += holder.promotedSize_;
147 if (holder.allocator_ != nullptr) {
148 holder.allocator_->Finalize();
149 delete holder.allocator_;
150 holder.allocator_ = nullptr;
151 }
152 }
153 initialized_.store(false, std::memory_order_release);
154 }
155
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)156 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
157 {
158 parallelGCTaskPhase_ = taskPhase;
159 InitializeBase();
160 for (uint32_t i = 0; i < threadNum_; i++) {
161 WorkNodeHolder &holder = works_.at(i);
162 holder.inNode_ = AllocateWorkNode();
163 holder.outNode_ = AllocateWorkNode();
164 holder.weakQueue_ = new ProcessQueue();
165 holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
166 holder.aliveSize_ = 0;
167 holder.promotedSize_ = 0;
168 if (gcType != TriggerGCType::OLD_GC) {
169 holder.allocator_ = new TlabAllocator(heap_);
170 }
171 }
172 if (initialized_.load(std::memory_order_acquire)) {
173 LOG_ECMA(FATAL) << "this branch is unreachable";
174 UNREACHABLE();
175 }
176 initialized_.store(true, std::memory_order_release);
177 }
178
SharedGCWorkManager(SharedHeap * heap,uint32_t threadNum)179 SharedGCWorkManager::SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum)
180 : WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum),
181 continuousQueue_ { nullptr }, sharedTaskPhase_(SHARED_UNDEFINED_TASK)
182 {
183 for (uint32_t i = 0; i < threadNum_; i++) {
184 continuousQueue_.at(i) = new ProcessQueue();
185 }
186 }
187
~SharedGCWorkManager()188 SharedGCWorkManager::~SharedGCWorkManager()
189 {
190 Finish();
191 for (uint32_t i = 0; i < threadNum_; i++) {
192 continuousQueue_.at(i)->Destroy();
193 delete continuousQueue_.at(i);
194 continuousQueue_.at(i) = nullptr;
195 }
196 }
197
Initialize(TriggerGCType gcType,SharedParallelMarkPhase taskPhase)198 void SharedGCWorkManager::Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase)
199 {
200 sharedTaskPhase_ = taskPhase;
201 InitializeBase();
202 for (uint32_t i = 0; i < threadNum_; i++) {
203 SharedGCWorkNodeHolder &holder = works_.at(i);
204 holder.inNode_ = AllocateWorkNode();
205 holder.outNode_ = AllocateWorkNode();
206 holder.weakQueue_ = new ProcessQueue();
207 holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
208 if (gcType == TriggerGCType::SHARED_FULL_GC) {
209 holder.allocator_ = new SharedTlabAllocator(sHeap_);
210 }
211 }
212 if (initialized_.load(std::memory_order_acquire)) {
213 LOG_ECMA(FATAL) << "this branch is unreachable";
214 UNREACHABLE();
215 }
216 initialized_.store(true, std::memory_order_release);
217 }
218
Finish()219 size_t SharedGCWorkManager::Finish()
220 {
221 size_t aliveSize = 0;
222 for (uint32_t i = 0; i < threadNum_; i++) {
223 SharedGCWorkNodeHolder &holder = works_.at(i);
224 if (holder.weakQueue_ != nullptr) {
225 holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
226 delete holder.weakQueue_;
227 holder.weakQueue_ = nullptr;
228 }
229 aliveSize += holder.aliveSize_;
230 if (holder.allocator_ != nullptr) {
231 holder.allocator_->Finalize();
232 delete holder.allocator_;
233 holder.allocator_ = nullptr;
234 }
235 }
236 FinishBase();
237 initialized_.store(false, std::memory_order_release);
238 return aliveSize;
239 }
240
Push(uint32_t threadId,TaggedObject * object)241 bool SharedGCWorkManager::Push(uint32_t threadId, TaggedObject *object)
242 {
243 WorkNode *&inNode = works_.at(threadId).inNode_;
244 if (!inNode->PushObject(ToUintPtr(object))) {
245 PushWorkNodeToGlobal(threadId);
246 return inNode->PushObject(ToUintPtr(object));
247 }
248 return true;
249 }
250
PushToLocalMarkingBuffer(WorkNode * & markingBuffer,TaggedObject * object)251 bool SharedGCWorkManager::PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object)
252 {
253 if (UNLIKELY(markingBuffer == nullptr)) {
254 markingBuffer = AllocateWorkNode();
255 }
256 ASSERT(markingBuffer != nullptr);
257 if (UNLIKELY(!markingBuffer->PushObject(ToUintPtr(object)))) {
258 PushLocalBufferToGlobal(markingBuffer);
259 ASSERT(markingBuffer == nullptr);
260 markingBuffer = AllocateWorkNode();
261 return markingBuffer->PushObject(ToUintPtr(object));
262 }
263 return true;
264 }
265
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)266 void SharedGCWorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
267 {
268 WorkNode *&inNode = works_.at(threadId).inNode_;
269 if (!inNode->IsEmpty()) {
270 workStack_.Push(inNode);
271 inNode = AllocateWorkNode();
272 if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
273 sHeap_->PostGCMarkingTask(sharedTaskPhase_);
274 }
275 }
276 }
277
PushLocalBufferToGlobal(WorkNode * & node,bool postTask)278 void SharedGCWorkManager::PushLocalBufferToGlobal(WorkNode *&node, bool postTask)
279 {
280 ASSERT(node != nullptr);
281 ASSERT(!node->IsEmpty());
282 workStack_.Push(node);
283 if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
284 sHeap_->PostGCMarkingTask(sharedTaskPhase_);
285 }
286 node = nullptr;
287 }
288
Pop(uint32_t threadId,TaggedObject ** object)289 bool SharedGCWorkManager::Pop(uint32_t threadId, TaggedObject **object)
290 {
291 WorkNode *&outNode = works_.at(threadId).outNode_;
292 WorkNode *&inNode = works_.at(threadId).inNode_;
293 if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
294 if (!inNode->IsEmpty()) {
295 WorkNode *tmp = outNode;
296 outNode = inNode;
297 inNode = tmp;
298 } else if (!PopWorkNodeFromGlobal(threadId)) {
299 return false;
300 }
301 return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
302 }
303 return true;
304 }
305
PopWorkNodeFromGlobal(uint32_t threadId)306 bool SharedGCWorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
307 {
308 return workStack_.Pop(&works_.at(threadId).outNode_);
309 }
310 } // namespace panda::ecmascript
311