• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/work_manager.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/area.h"
20 #include "ecmascript/mem/full_gc.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/heap_region_allocator.h"
23 #include "ecmascript/mem/incremental_marker.h"
24 #include "ecmascript/mem/mark_stack.h"
25 #include "ecmascript/mem/parallel_marker-inl.h"
26 #include "ecmascript/mem/partial_gc.h"
27 #include "ecmascript/mem/region.h"
28 #include "ecmascript/mem/tlab_allocator-inl.h"
29 
30 namespace panda::ecmascript {
WorkManager(Heap * heap,uint32_t threadNum)31 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
32     : heap_(heap), threadNum_(threadNum), continuousQueue_ { nullptr }, workSpace_(0), spaceStart_(0), spaceEnd_(0),
33       parallelGCTaskPhase_(UNDEFINED_TASK)
34 {
35     for (uint32_t i = 0; i < threadNum_; i++) {
36         continuousQueue_.at(i) = new ProcessQueue(heap);
37     }
38     workSpace_ =
39         ToUintPtr(heap_->GetNativeAreaAllocator()->AllocateBuffer(SPACE_SIZE));
40 }
41 
~WorkManager()42 WorkManager::~WorkManager()
43 {
44     Finish();
45     for (uint32_t i = 0; i < threadNum_; i++) {
46         continuousQueue_.at(i)->Destroy();
47         delete continuousQueue_.at(i);
48         continuousQueue_.at(i) = nullptr;
49     }
50 
51     heap_->GetNativeAreaAllocator()->FreeBuffer(
52         reinterpret_cast<void *>(workSpace_));
53 }
54 
Push(uint32_t threadId,TaggedObject * object)55 bool WorkManager::Push(uint32_t threadId, TaggedObject *object)
56 {
57     WorkNode *&inNode = works_.at(threadId).inNode_;
58     if (!inNode->PushObject(ToUintPtr(object))) {
59         PushWorkNodeToGlobal(threadId);
60         return inNode->PushObject(ToUintPtr(object));
61     }
62     return true;
63 }
64 
Push(uint32_t threadId,TaggedObject * object,Region * region)65 bool WorkManager::Push(uint32_t threadId, TaggedObject *object, Region *region)
66 {
67     if (Push(threadId, object)) {
68         auto klass = object->GetClass();
69         auto size = klass->SizeFromJSHClass(object);
70         region->IncreaseAliveObjectSafe(size);
71         return true;
72     }
73     return false;
74 }
75 
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)76 void WorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
77 {
78     WorkNode *&inNode = works_.at(threadId).inNode_;
79     if (!inNode->IsEmpty()) {
80         workStack_.Push(inNode);
81         inNode = AllocateWorkNode();
82         if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask() &&
83             !(heap_->GetJSThread()->IsMarking() && heap_->GetIncrementalMarker()->IsTriggeredIncrementalMark())) {
84             heap_->PostParallelGCTask(parallelGCTaskPhase_);
85         }
86     }
87 }
88 
Pop(uint32_t threadId,TaggedObject ** object)89 bool WorkManager::Pop(uint32_t threadId, TaggedObject **object)
90 {
91     WorkNode *&outNode = works_.at(threadId).outNode_;
92     WorkNode *&inNode = works_.at(threadId).inNode_;
93     if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
94         if (!inNode->IsEmpty()) {
95             WorkNode *tmp = outNode;
96             outNode = inNode;
97             inNode = tmp;
98         } else if (!PopWorkNodeFromGlobal(threadId)) {
99             return false;
100         }
101         return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
102     }
103     return true;
104 }
105 
PopWorkNodeFromGlobal(uint32_t threadId)106 bool WorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
107 {
108     return workStack_.Pop(&works_.at(threadId).outNode_);
109 }
110 
Finish()111 size_t WorkManager::Finish()
112 {
113     size_t aliveSize = 0;
114     for (uint32_t i = 0; i < threadNum_; i++) {
115         WorkNodeHolder &holder = works_.at(i);
116         if (holder.weakQueue_ != nullptr) {
117             holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
118             delete holder.weakQueue_;
119             holder.weakQueue_ = nullptr;
120         }
121         if (holder.allocator_ != nullptr) {
122             holder.allocator_->Finalize();
123             delete holder.allocator_;
124             holder.allocator_ = nullptr;
125         }
126         holder.pendingUpdateSlots_.clear();
127         aliveSize += holder.aliveSize_;
128     }
129 
130     while (!agedSpaces_.empty()) {
131         heap_->GetNativeAreaAllocator()->FreeBuffer(reinterpret_cast<void *>(
132             agedSpaces_.back()));
133         agedSpaces_.pop_back();
134     }
135     return aliveSize;
136 }
137 
Finish(size_t & aliveSize,size_t & promotedSize)138 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
139 {
140     aliveSize = Finish();
141     for (uint32_t i = 0; i < threadNum_; i++) {
142         WorkNodeHolder &holder = works_.at(i);
143         promotedSize += holder.promotedSize_;
144     }
145 }
146 
Initialize(TriggerGCType gcType,ParallelGCTaskPhase taskPhase)147 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
148 {
149     parallelGCTaskPhase_ = taskPhase;
150     spaceStart_ = workSpace_;
151     spaceEnd_ = workSpace_ + SPACE_SIZE;
152     for (uint32_t i = 0; i < threadNum_; i++) {
153         WorkNodeHolder &holder = works_.at(i);
154         holder.inNode_ = AllocateWorkNode();
155         holder.outNode_ = AllocateWorkNode();
156         holder.weakQueue_ = new ProcessQueue();
157         holder.weakQueue_->BeginMarking(heap_, continuousQueue_.at(i));
158         holder.aliveSize_ = 0;
159         holder.promotedSize_ = 0;
160         if (gcType != TriggerGCType::OLD_GC) {
161             holder.allocator_ = new TlabAllocator(heap_);
162         }
163     }
164 }
165 
AllocateWorkNode()166 WorkNode *WorkManager::AllocateWorkNode()
167 {
168     size_t totalSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
169     ASSERT(totalSize < SPACE_SIZE);
170 
171     // CAS
172     volatile auto atomicField = reinterpret_cast<volatile std::atomic<uintptr_t> *>(&spaceStart_);
173     bool result = false;
174     uintptr_t begin = 0;
175     do {
176         begin = atomicField->load(std::memory_order_acquire);
177         if (begin + totalSize >= spaceEnd_) {
178             os::memory::LockHolder lock(mtx_);
179             begin = atomicField->load(std::memory_order_acquire);
180             if (begin + totalSize >= spaceEnd_) {
181                 agedSpaces_.emplace_back(workSpace_);
182                 workSpace_ = ToUintPtr(
183                     heap_->GetNativeAreaAllocator()->AllocateBuffer(SPACE_SIZE));
184                 spaceStart_ = workSpace_;
185                 spaceEnd_ = workSpace_ + SPACE_SIZE;
186                 begin = spaceStart_;
187             }
188         }
189         result = std::atomic_compare_exchange_strong_explicit(atomicField, &begin, begin + totalSize,
190                                                               std::memory_order_release, std::memory_order_relaxed);
191     } while (!result);
192     Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
193     stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + totalSize);
194     WorkNode *work = reinterpret_cast<WorkNode *>(begin);
195     return new (work) WorkNode(stack);
196 }
197 }  // namespace panda::ecmascript
198