• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/work_manager.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/area.h"
20 #include "ecmascript/mem/full_gc.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/heap_region_allocator.h"
23 #include "ecmascript/mem/incremental_marker.h"
24 #include "ecmascript/mem/mark_stack.h"
25 #include "ecmascript/mem/parallel_marker-inl.h"
26 #include "ecmascript/mem/partial_gc.h"
27 #include "ecmascript/mem/region.h"
28 #include "ecmascript/mem/tlab_allocator-inl.h"
29 
30 namespace panda::ecmascript {
WorkManager(Heap * heap,uint32_t threadNum)31 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
32     : heap_(heap), threadNum_(threadNum), spaceChunk_(heap_->GetNativeAreaAllocator()), continuousQueue_ { nullptr },
33       workSpace_(0), spaceStart_(0), spaceEnd_(0), parallelGCTaskPhase_(UNDEFINED_TASK)
34 {
35     for (uint32_t i = 0; i < threadNum_; i++) {
36         continuousQueue_.at(i) = new ProcessQueue(heap);
37     }
38     workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
39 }
40 
~WorkManager()41 WorkManager::~WorkManager()
42 {
43     Finish();
44     for (uint32_t i = 0; i < threadNum_; i++) {
45         continuousQueue_.at(i)->Destroy();
46         delete continuousQueue_.at(i);
47         continuousQueue_.at(i) = nullptr;
48     }
49 
50     GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
51 }
52 
Push(uint32_t threadId,TaggedObject * object)53 bool WorkManager::Push(uint32_t threadId, TaggedObject *object)
54 {
55     WorkNode *&inNode = works_.at(threadId).inNode_;
56     if (!inNode->PushObject(ToUintPtr(object))) {
57         PushWorkNodeToGlobal(threadId);
58         return inNode->PushObject(ToUintPtr(object));
59     }
60     return true;
61 }
62 
Push(uint32_t threadId,TaggedObject * object,Region * region)63 bool WorkManager::Push(uint32_t threadId, TaggedObject *object, Region *region)
64 {
65     if (Push(threadId, object)) {
66         auto klass = object->GetClass();
67         auto size = klass->SizeFromJSHClass(object);
68         region->IncreaseAliveObjectSafe(size);
69         return true;
70     }
71     return false;
72 }
73 
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)74 void WorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
75 {
76     WorkNode *&inNode = works_.at(threadId).inNode_;
77     if (!inNode->IsEmpty()) {
78         workStack_.Push(inNode);
79         inNode = AllocateWorkNode();
80         if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask() &&
81             !(heap_->GetJSThread()->IsMarking() && heap_->GetIncrementalMarker()->IsTriggeredIncrementalMark())) {
82             heap_->PostParallelGCTask(parallelGCTaskPhase_);
83         }
84     }
85 }
86 
Pop(uint32_t threadId,TaggedObject ** object)87 bool WorkManager::Pop(uint32_t threadId, TaggedObject **object)
88 {
89     WorkNode *&outNode = works_.at(threadId).outNode_;
90     WorkNode *&inNode = works_.at(threadId).inNode_;
91     if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
92         if (!inNode->IsEmpty()) {
93             WorkNode *tmp = outNode;
94             outNode = inNode;
95             inNode = tmp;
96         } else if (!PopWorkNodeFromGlobal(threadId)) {
97             return false;
98         }
99         return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
100     }
101     return true;
102 }
103 
PopWorkNodeFromGlobal(uint32_t threadId)104 bool WorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
105 {
106     return workStack_.Pop(&works_.at(threadId).outNode_);
107 }
108 
Finish()109 size_t WorkManager::Finish()
110 {
111     size_t aliveSize = 0;
112     for (uint32_t i = 0; i < threadNum_; i++) {
113         WorkNodeHolder &holder = works_.at(i);
114         if (holder.weakQueue_ != nullptr) {
115             holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
116             delete holder.weakQueue_;
117             holder.weakQueue_ = nullptr;
118         }
119         if (holder.allocator_ != nullptr) {
120             holder.allocator_->Finalize();
121             delete holder.allocator_;
122             holder.allocator_ = nullptr;
123         }
124         holder.pendingUpdateSlots_.clear();
125         aliveSize += holder.aliveSize_;
126     }
127 
128     while (!agedSpaces_.empty()) {
129         GetSpaceChunk()->Free(reinterpret_cast<void *>(agedSpaces_.back()));
130         agedSpaces_.pop_back();
131     }
132     initialized_.store(false, std::memory_order_release);
133     return aliveSize;
134 }
135 
Finish(size_t & aliveSize,size_t & promotedSize)136 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
137 {
138     aliveSize = Finish();
139     for (uint32_t i = 0; i < threadNum_; i++) {
140         WorkNodeHolder &holder = works_.at(i);
141         promotedSize += holder.promotedSize_;
142     }
143     initialized_.store(false, std::memory_order_release);
144 }
145 
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)146 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
147 {
148     parallelGCTaskPhase_ = taskPhase;
149     spaceStart_ = workSpace_;
150     spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
151     for (uint32_t i = 0; i < threadNum_; i++) {
152         WorkNodeHolder &holder = works_.at(i);
153         holder.inNode_ = AllocateWorkNode();
154         holder.outNode_ = AllocateWorkNode();
155         holder.weakQueue_ = new ProcessQueue();
156         holder.weakQueue_->BeginMarking(heap_, continuousQueue_.at(i));
157         holder.aliveSize_ = 0;
158         holder.promotedSize_ = 0;
159         if (gcType != TriggerGCType::OLD_GC) {
160             holder.allocator_ = new TlabAllocator(heap_);
161         }
162     }
163     if (initialized_.load(std::memory_order_acquire)) {
164         LOG_ECMA(FATAL) << "this branch is unreachable";
165         UNREACHABLE();
166     }
167     initialized_.store(true, std::memory_order_release);
168 }
169 
AllocateWorkNode()170 WorkNode *WorkManager::AllocateWorkNode()
171 {
172     LockHolder lock(mtx_);
173     size_t allocatedSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
174     ASSERT(allocatedSize < WORKNODE_SPACE_SIZE);
175 
176     uintptr_t begin = spaceStart_;
177     if (begin + allocatedSize >= spaceEnd_) {
178         agedSpaces_.emplace_back(workSpace_);
179         workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
180         spaceStart_ = workSpace_;
181         spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
182         begin = spaceStart_;
183     }
184     spaceStart_ = begin + allocatedSize;
185     Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
186     stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + allocatedSize);
187     WorkNode *work = reinterpret_cast<WorkNode *>(begin);
188     return new (work) WorkNode(stack);
189 }
190 }  // namespace panda::ecmascript
191