• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_PARALLEL_WORK_HELPER_H
17 #define ECMASCRIPT_MEM_PARALLEL_WORK_HELPER_H
18 
19 #include "ecmascript/mem/mark_stack-inl.h"
20 #include "ecmascript/mem/slots.h"
21 #include "ecmascript/platform/platform.h"
22 
23 namespace panda::ecmascript {
24 using SlotNeedUpdate = std::pair<TaggedObject *, ObjectSlot>;
25 
26 static constexpr uint32_t MARKSTACK_MAX_SIZE = 100;
27 static constexpr uint32_t STACK_AREA_SIZE = sizeof(uintptr_t) * MARKSTACK_MAX_SIZE;
28 static constexpr uint32_t SPACE_SIZE = 8 * 1024;
29 
30 class Heap;
31 class Stack;
32 class SemiSpaceCollector;
33 class TlabAllocator;
34 class Region;
35 
36 enum ParallelGCTaskPhase {
37     SEMI_HANDLE_THREAD_ROOTS_TASK,
38     SEMI_HANDLE_SNAPSHOT_TASK,
39     SEMI_HANDLE_GLOBAL_POOL_TASK,
40     OLD_HANDLE_GLOBAL_POOL_TASK,
41     COMPRESS_HANDLE_GLOBAL_POOL_TASK,
42     CONCURRENT_HANDLE_GLOBAL_POOL_TASK,
43     CONCURRENT_HANDLE_OLD_TO_NEW_TASK,
44     TASK_LAST  // Count of different Task phase
45 };
46 
47 class WorkNode {
48 public:
WorkNode(Stack * stack)49     explicit WorkNode(Stack *stack) : next_(nullptr), stack_(stack) {}
~WorkNode()50     ~WorkNode()
51     {
52         delete stack_;
53         stack_ = nullptr;
54     }
55 
56     NO_COPY_SEMANTIC(WorkNode);
57     NO_MOVE_SEMANTIC(WorkNode);
58 
Push(uintptr_t obj)59     bool Push(uintptr_t obj)
60     {
61         return stack_->PushBackChecked(obj);
62     }
63 
Pop(uintptr_t * obj)64     bool Pop(uintptr_t *obj)
65     {
66         if (IsEmpty()) {
67             return false;
68         }
69         auto object = ToVoidPtr(*obj);
70         if (object != nullptr) {
71             delete reinterpret_cast<WorkNode *>(object);
72         }
73         *obj = stack_->PopBackUnchecked();
74         return true;
75     }
76 
IsEmpty()77     bool IsEmpty() const
78     {
79         return stack_->IsEmpty();
80     }
81 
Next()82     WorkNode *Next() const
83     {
84         return next_;
85     }
86 
SetNext(WorkNode * node)87     void SetNext(WorkNode *node)
88     {
89         next_ = node;
90     }
91 
92 private:
93     WorkNode *next_;
94     Stack *stack_;
95 };
96 
97 class GlobalWorkList {
98 public:
GlobalWorkList()99     GlobalWorkList() : top_(nullptr) {}
100     ~GlobalWorkList() = default;
101 
102     NO_COPY_SEMANTIC(GlobalWorkList);
103     NO_MOVE_SEMANTIC(GlobalWorkList);
104 
Push(WorkNode * node)105     void Push(WorkNode *node)
106     {
107         if (node == nullptr) {
108             return;
109         }
110         os::memory::LockHolder lock(mtx_);
111         node->SetNext(top_);
112         top_ = node;
113     }
114 
Pop(WorkNode ** node)115     bool Pop(WorkNode **node)
116     {
117         os::memory::LockHolder lock(mtx_);
118         if (top_ == nullptr) {
119             return false;
120         }
121         *node = top_;
122         top_ = top_->Next();
123         return true;
124     }
125 
126 private:
127     WorkNode *top_ {nullptr};
128     os::memory::Mutex mtx_;
129 };
130 
131 struct WorkNodeHolder {
132     WorkNode *pushNode_ {nullptr};
133     WorkNode *popNode_ {nullptr};
134     ProcessQueue *weakQueue_ {nullptr};
135     std::vector<SlotNeedUpdate> waitUpdate_;
136     TlabAllocator *allocator_ {nullptr};
137     size_t aliveSize_ = 0;
138     size_t promoteSize_ = 0;
139 };
140 
141 class WorkerHelper final {
142 public:
143     WorkerHelper() = delete;
144     explicit WorkerHelper(Heap *heap, uint32_t threadNum);
145     ~WorkerHelper();
146 
147     void Initialize(TriggerGCType gcType, ParallelGCTaskPhase parallelTask);
148     void Finish(size_t &aliveSize);
149     void Finish(size_t &aliveSize, size_t &promoteSize);
150 
151     bool Push(uint32_t threadId, TaggedObject *object);
152     bool Push(uint32_t threadId, TaggedObject *object, Region *region);
153     bool Pop(uint32_t threadId, TaggedObject **object);
154 
155     bool PopWorkNodeFromGlobal(uint32_t threadId);
156     void PushWorkNodeToGlobal(uint32_t threadId, bool postTask = true);
157 
PushWeakReference(uint32_t threadId,JSTaggedType * weak)158     inline void PushWeakReference(uint32_t threadId, JSTaggedType *weak)
159     {
160         workList_[threadId].weakQueue_->PushBack(weak);
161     }
162 
AddAliveSize(uint32_t threadId,size_t size)163     inline void AddAliveSize(uint32_t threadId, size_t size)
164     {
165         workList_[threadId].aliveSize_ += size;
166     }
167 
AddPromoteSize(uint32_t threadId,size_t size)168     inline void AddPromoteSize(uint32_t threadId, size_t size)
169     {
170         workList_[threadId].promoteSize_ += size;
171     }
172 
GetWeakReferenceQueue(uint32_t threadId)173     inline ProcessQueue *GetWeakReferenceQueue(uint32_t threadId) const
174     {
175         return workList_[threadId].weakQueue_;
176     }
177 
GetTlabAllocator(uint32_t threadId)178     inline TlabAllocator *GetTlabAllocator(uint32_t threadId) const
179     {
180         return workList_[threadId].allocator_;
181     }
182 
PushWaitUpdateSlot(uint32_t threadId,SlotNeedUpdate slot)183     inline void PushWaitUpdateSlot(uint32_t threadId, SlotNeedUpdate slot)
184     {
185         workList_[threadId].waitUpdate_.emplace_back(slot);
186     }
187 
GetSlotNeedUpdate(uint32_t threadId,SlotNeedUpdate * slot)188     inline bool GetSlotNeedUpdate(uint32_t threadId, SlotNeedUpdate *slot)
189     {
190         std::vector<SlotNeedUpdate> &waitUpdate = workList_[threadId].waitUpdate_;
191         if (waitUpdate.empty()) {
192             return false;
193         }
194         *slot = waitUpdate.back();
195         waitUpdate.pop_back();
196         return true;
197     }
198 
199 private:
200     NO_COPY_SEMANTIC(WorkerHelper);
201     NO_MOVE_SEMANTIC(WorkerHelper);
202 
203     WorkNode *AllocalWorkNode();
204 
205     Heap *heap_;
206     uint32_t threadNum_;
207     WorkNodeHolder workList_[MAX_PLATFORM_THREAD_NUM + 1];
208     ContinuousStack<JSTaggedType> *continuousQueue_[MAX_PLATFORM_THREAD_NUM + 1];
209     GlobalWorkList globalWork_;
210     uintptr_t markSpace_;
211     uintptr_t spaceTop_;
212     uintptr_t markSpaceEnd_;
213     std::vector<uintptr_t> unuseSpace_;
214     os::memory::Mutex mtx_;
215     ParallelGCTaskPhase parallelTask_;
216 };
217 }  // namespace panda::ecmascript
218 #endif  // ECMASCRIPT_MEM_PARALLEL_WORK_HELPER_H
219