• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/parallel_work_helper.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/area.h"
20 #include "ecmascript/mem/full_gc.h"
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/heap_region_allocator.h"
23 #include "ecmascript/mem/mark_stack.h"
24 #include "ecmascript/mem/mix_gc.h"
25 #include "ecmascript/mem/region.h"
26 #include "ecmascript/mem/tlab_allocator-inl.h"
27 
28 namespace panda::ecmascript {
WorkerHelper(Heap * heap,uint32_t threadNum)29 WorkerHelper::WorkerHelper(Heap *heap, uint32_t threadNum)
30     : heap_(heap), threadNum_(threadNum), markSpace_(0), spaceTop_(0), markSpaceEnd_(0)
31 {
32     for (uint32_t i = 0; i < threadNum_; i++) {
33         continuousQueue_[i] = new ProcessQueue(heap);
34     }
35     markSpace_ =
36         ToUintPtr(const_cast<NativeAreaAllocator *>(heap_->GetNativeAreaAllocator())->AllocateBuffer(SPACE_SIZE));
37 }
38 
~WorkerHelper()39 WorkerHelper::~WorkerHelper()
40 {
41     for (uint32_t i = 0; i < threadNum_; i++) {
42         continuousQueue_[i]->Destroy();
43         delete continuousQueue_[i];
44         continuousQueue_[i] = nullptr;
45     }
46     const_cast<NativeAreaAllocator *>(heap_->GetNativeAreaAllocator())->FreeBuffer(
47         reinterpret_cast<void *>(markSpace_));
48 }
49 
Push(uint32_t threadId,TaggedObject * object)50 bool WorkerHelper::Push(uint32_t threadId, TaggedObject *object)
51 {
52     WorkNode *&pushNode = workList_[threadId].pushNode_;
53     if (!pushNode->Push(ToUintPtr(object))) {
54         PushWorkNodeToGlobal(threadId);
55         return pushNode->Push(ToUintPtr(object));
56     }
57     return true;
58 }
59 
Push(uint32_t threadId,TaggedObject * object,Region * region)60 bool WorkerHelper::Push(uint32_t threadId, TaggedObject *object, Region *region)
61 {
62     if (Push(threadId, object)) {
63         auto klass = object->GetClass();
64         auto size = klass->SizeFromJSHClass(object);
65         region->IncrementAliveObjectSafe(size);
66         return true;
67     }
68     return false;
69 }
70 
PushWorkNodeToGlobal(uint32_t threadId,bool postTask)71 void WorkerHelper::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
72 {
73     WorkNode *&pushNode = workList_[threadId].pushNode_;
74     if (!pushNode->IsEmpty()) {
75         globalWork_.Push(pushNode);
76         pushNode = AllocalWorkNode();
77         if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask()) {
78             heap_->PostParallelGCTask(parallelTask_);
79         }
80     }
81 }
82 
Pop(uint32_t threadId,TaggedObject ** object)83 bool WorkerHelper::Pop(uint32_t threadId, TaggedObject **object)
84 {
85     WorkNode *&popNode = workList_[threadId].popNode_;
86     WorkNode *&pushNode = workList_[threadId].pushNode_;
87     if (!popNode->Pop(reinterpret_cast<uintptr_t *>(object))) {
88         if (!pushNode->IsEmpty()) {
89             WorkNode *tmp = popNode;
90             popNode = pushNode;
91             pushNode = tmp;
92         } else if (!PopWorkNodeFromGlobal(threadId)) {
93             return false;
94         }
95         return popNode->Pop(reinterpret_cast<uintptr_t *>(object));
96     }
97     return true;
98 }
99 
PopWorkNodeFromGlobal(uint32_t threadId)100 bool WorkerHelper::PopWorkNodeFromGlobal(uint32_t threadId)
101 {
102     return globalWork_.Pop(&workList_[threadId].popNode_);
103 }
104 
Finish(size_t & aliveSize)105 void WorkerHelper::Finish(size_t &aliveSize)
106 {
107     for (uint32_t i = 0; i < threadNum_; i++) {
108         WorkNodeHolder &holder = workList_[i];
109         holder.weakQueue_->FinishMarking(continuousQueue_[i]);
110         delete holder.weakQueue_;
111         holder.weakQueue_ = nullptr;
112         if (holder.allocator_ != nullptr) {
113             holder.allocator_->Finalize();
114             delete holder.allocator_;
115             holder.allocator_ = nullptr;
116         }
117         holder.waitUpdate_.clear();
118         aliveSize += holder.aliveSize_;
119     }
120 
121     while (!unuseSpace_.empty()) {
122         const_cast<NativeAreaAllocator *>(heap_->GetNativeAreaAllocator())->FreeBuffer(reinterpret_cast<void *>(
123             unuseSpace_.back()));
124         unuseSpace_.pop_back();
125     }
126 }
127 
Finish(size_t & aliveSize,size_t & promoteSize)128 void WorkerHelper::Finish(size_t &aliveSize, size_t &promoteSize)
129 {
130     Finish(aliveSize);
131     for (uint32_t i = 0; i < threadNum_; i++) {
132         WorkNodeHolder &holder = workList_[i];
133         promoteSize += holder.aliveSize_;
134     }
135 }
136 
Initialize(TriggerGCType gcType,ParallelGCTaskPhase parallelTask)137 void WorkerHelper::Initialize(TriggerGCType gcType, ParallelGCTaskPhase parallelTask)
138 {
139     parallelTask_ = parallelTask;
140     spaceTop_ = markSpace_;
141     markSpaceEnd_ = markSpace_ + SPACE_SIZE;
142     for (uint32_t i = 0; i < threadNum_; i++) {
143         WorkNodeHolder &holder = workList_[i];
144         holder.pushNode_ = AllocalWorkNode();
145         holder.popNode_ = AllocalWorkNode();
146         holder.weakQueue_ = new ProcessQueue();
147         holder.weakQueue_->BeginMarking(heap_, continuousQueue_[i]);
148         holder.aliveSize_ = 0;
149         holder.promoteSize_ = 0;
150         if (gcType != TriggerGCType::OLD_GC) {
151             holder.allocator_ = new TlabAllocator(heap_);
152         }
153     }
154 }
155 
AllocalWorkNode()156 WorkNode *WorkerHelper::AllocalWorkNode()
157 {
158     size_t totalSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
159     // CAS
160     volatile auto atomicField = reinterpret_cast<volatile std::atomic<uintptr_t> *>(&spaceTop_);
161     bool result = false;
162     uintptr_t begin = 0;
163     do {
164         begin = atomicField->load(std::memory_order_acquire);
165         if (begin + totalSize >= markSpaceEnd_) {
166             os::memory::LockHolder lock(mtx_);
167             begin = atomicField->load(std::memory_order_acquire);
168             if (begin + totalSize >= markSpaceEnd_) {
169                 unuseSpace_.emplace_back(markSpace_);
170                 markSpace_ = ToUintPtr(const_cast<NativeAreaAllocator *>(
171                     heap_->GetNativeAreaAllocator())->AllocateBuffer(SPACE_SIZE));
172                 spaceTop_ = markSpace_;
173                 markSpaceEnd_ = markSpace_ + SPACE_SIZE;
174                 begin = spaceTop_;
175             }
176         }
177         result = std::atomic_compare_exchange_strong_explicit(atomicField, &begin, begin + totalSize,
178                                                               std::memory_order_release, std::memory_order_relaxed);
179     } while (!result);
180     Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
181     stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + totalSize);
182     WorkNode *work = reinterpret_cast<WorkNode *>(begin);
183     return new (work) WorkNode(stack);
184 }
185 }  // namespace panda::ecmascript
186