• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_WORK_MANAGER_H
17 #define ECMASCRIPT_MEM_WORK_MANAGER_H
18 
19 #include "ecmascript/mem/mark_stack.h"
20 #include "ecmascript/mem/slots.h"
21 #include "ecmascript/mem/work_space_chunk.h"
22 #include "ecmascript/taskpool/taskpool.h"
23 
24 namespace panda::ecmascript {
25 using SlotNeedUpdate = std::pair<TaggedObject *, ObjectSlot>;
26 
27 static constexpr uint32_t MARKSTACK_MAX_SIZE = 100;
28 static constexpr uint32_t STACK_AREA_SIZE = sizeof(uintptr_t) * MARKSTACK_MAX_SIZE;
29 
30 class Heap;
31 class SharedHeap;
32 class Stack;
33 class SemiSpaceCollector;
34 class TlabAllocator;
35 class SharedTlabAllocator;
36 class Region;
37 class WorkSpaceChunk;
38 class WorkManager;
39 
40 enum ParallelGCTaskPhase {
41     OLD_HANDLE_GLOBAL_POOL_TASK,
42     COMPRESS_HANDLE_GLOBAL_POOL_TASK,
43     CONCURRENT_HANDLE_GLOBAL_POOL_TASK,
44     UNDEFINED_TASK,
45     TASK_LAST  // Count of different Task phase
46 };
47 
48 enum SharedParallelMarkPhase {
49     SHARED_MARK_TASK,
50     SHARED_COMPRESS_TASK,
51     SHARED_UNDEFINED_TASK,
52     SHARED_TASK_LAST  // Count of different Task phase
53 };
54 
55 class WorkNode {
56 public:
WorkNode(Stack * stack)57     explicit WorkNode(Stack *stack) : next_(nullptr), stack_(stack) {}
58     ~WorkNode() = default;
59 
60     NO_COPY_SEMANTIC(WorkNode);
61     NO_MOVE_SEMANTIC(WorkNode);
62 
PushObject(uintptr_t obj)63     bool PushObject(uintptr_t obj)
64     {
65         return stack_->PushBackChecked(obj);
66     }
67 
PopObject(uintptr_t * obj)68     bool PopObject(uintptr_t *obj)
69     {
70         if (IsEmpty()) {
71             return false;
72         }
73         *obj = stack_->PopBackUnchecked();
74         return true;
75     }
76 
IsEmpty()77     bool IsEmpty() const
78     {
79         return stack_->IsEmpty();
80     }
81 
Next()82     WorkNode *Next() const
83     {
84         return next_;
85     }
86 
SetNext(WorkNode * node)87     void SetNext(WorkNode *node)
88     {
89         next_ = node;
90     }
91 
92 private:
93     WorkNode *next_;
94     Stack *stack_;
95 };
96 
97 class GlobalWorkStack {
98 public:
GlobalWorkStack()99     GlobalWorkStack() : top_(nullptr) {}
100     ~GlobalWorkStack() = default;
101 
102     NO_COPY_SEMANTIC(GlobalWorkStack);
103     NO_MOVE_SEMANTIC(GlobalWorkStack);
104 
Push(WorkNode * node)105     void Push(WorkNode *node)
106     {
107         if (node == nullptr) {
108             return;
109         }
110         LockHolder lock(mtx_);
111         node->SetNext(top_);
112         top_ = node;
113     }
114 
Pop(WorkNode ** node)115     bool Pop(WorkNode **node)
116     {
117         LockHolder lock(mtx_);
118         if (top_ == nullptr) {
119             return false;
120         }
121         *node = top_;
122         top_ = top_->Next();
123         return true;
124     }
125 
Clear()126     void Clear()
127     {
128         if (top_ != nullptr) {
129             LOG_ECMA(ERROR) << "GlobalWorkStack is not nullptr in WorkManager::Finish.";
130         }
131         top_ = nullptr;
132     }
133 
134 private:
135     WorkNode *top_ {nullptr};
136     Mutex mtx_;
137 };
138 
139 class WorkNodeHolder {
140 public:
141     WorkNodeHolder() = default;
142     ~WorkNodeHolder() = default;
143 
144     NO_COPY_SEMANTIC(WorkNodeHolder);
145     NO_MOVE_SEMANTIC(WorkNodeHolder);
146 
147     inline void Setup(Heap *heap, WorkManager *workManager, GlobalWorkStack *workStack);
148     inline void Destroy();
149     inline void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
150     inline void Finish();
151 
152     inline bool Push(TaggedObject *object);
153     inline bool Pop(TaggedObject **object);
154     inline bool PopWorkNodeFromGlobal();
155     inline void PushWorkNodeToGlobal(bool postTask = true);
156 
157     inline void PushWeakReference(JSTaggedType *weak);
158 
159     inline void IncreaseAliveSize(size_t size);
160 
161     inline void IncreasePromotedSize(size_t size);
162 
163     inline ProcessQueue *GetWeakReferenceQueue() const;
164 
165     inline TlabAllocator *GetTlabAllocator() const;
166 private:
167     Heap *heap_ {nullptr};
168     WorkManager *workManager_ {nullptr};
169     GlobalWorkStack *workStack_ {nullptr};
170     ParallelGCTaskPhase parallelGCTaskPhase_ {ParallelGCTaskPhase::UNDEFINED_TASK};
171 
172     WorkNode *inNode_ {nullptr};
173     WorkNode *outNode_ {nullptr};
174     WorkNode *cachedInNode_ {nullptr};
175     ProcessQueue *weakQueue_ {nullptr};
176     ContinuousStack<JSTaggedType> *continuousQueue_ {nullptr};
177     TlabAllocator *allocator_ {nullptr};
178     size_t aliveSize_ {0};
179     size_t promotedSize_ {0};
180 
181     friend class WorkManager;
182 };
183 
184 class WorkManagerBase {
185 public:
186     inline WorkManagerBase(NativeAreaAllocator *allocator);
187     inline virtual ~WorkManagerBase();
188 
GetSpaceChunk()189     WorkSpaceChunk *GetSpaceChunk() const
190     {
191         return const_cast<WorkSpaceChunk *>(&spaceChunk_);
192     }
193 
InitializeBase()194     void InitializeBase()
195     {
196         if (UNLIKELY(workSpace_ == 0)) {
197             InitializeInPostFork();
198         }
199         spaceStart_ = workSpace_;
200         spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
201     }
202 
FinishBase()203     void FinishBase()
204     {
205         while (!agedSpaces_.empty()) {
206             GetSpaceChunk()->Free(reinterpret_cast<void *>(agedSpaces_.back()));
207             agedSpaces_.pop_back();
208         }
209     }
210 
FinishInPreFork()211     void FinishInPreFork()
212     {
213         ASSERT(workSpace_ != 0);
214         GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
215         workSpace_ = 0;
216         spaceStart_ = 0;
217         spaceEnd_ = 0;
218     }
219 
InitializeInPostFork()220     void InitializeInPostFork()
221     {
222         ASSERT(workSpace_ == 0);
223         auto allocatedSpace = GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE);
224         ASSERT(allocatedSpace != nullptr);
225         workSpace_ = ToUintPtr(allocatedSpace);
226     }
227 
228     inline WorkNode *AllocateWorkNode();
Finish()229     virtual size_t Finish()
230     {
231         LOG_ECMA(FATAL) << " WorkManagerBase Finish";
232         return 0;
233     }
234 
235     Mutex mtx_;
236 private:
237     NO_COPY_SEMANTIC(WorkManagerBase);
238     NO_MOVE_SEMANTIC(WorkManagerBase);
239 
240     WorkSpaceChunk spaceChunk_;
241     uintptr_t workSpace_;
242     uintptr_t spaceStart_;
243     uintptr_t spaceEnd_;
244     std::vector<uintptr_t> agedSpaces_;
245 };
246 
247 class WorkManager : public WorkManagerBase {
248 public:
249     WorkManager() = delete;
250     inline WorkManager(Heap *heap, uint32_t threadNum);
251     inline ~WorkManager() override;
252 
253     inline void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
254     inline size_t Finish() override;
255     inline void Finish(size_t &aliveSize, size_t &promotedSize);
256 
GetTotalThreadNum()257     inline uint32_t GetTotalThreadNum()
258     {
259         return threadNum_;
260     }
261 
HasInitialized()262     inline bool HasInitialized() const
263     {
264         return initialized_.load(std::memory_order_acquire);
265     }
266 
GetWorkNodeHolder(uint32_t threadId)267     inline WorkNodeHolder *GetWorkNodeHolder(uint32_t threadId)
268     {
269         return &works_.at(threadId);
270     }
271 
272 private:
273     NO_COPY_SEMANTIC(WorkManager);
274     NO_MOVE_SEMANTIC(WorkManager);
275 
276     Heap *heap_;
277     uint32_t threadNum_;
278     std::array<WorkNodeHolder, MAX_TASKPOOL_THREAD_NUM + 1> works_;
279     GlobalWorkStack workStack_ {};
280     ParallelGCTaskPhase parallelGCTaskPhase_ {ParallelGCTaskPhase::UNDEFINED_TASK};
281     std::atomic<bool> initialized_ {false};
282 };
283 
284 struct SharedGCWorkNodeHolder {
285     WorkNode *inNode_ {nullptr};
286     WorkNode *cachedInNode_ {nullptr};
287     WorkNode *outNode_ {nullptr};
288     ProcessQueue *weakQueue_ {nullptr};
289     SharedTlabAllocator *allocator_ {nullptr};
290     size_t aliveSize_ = 0;
291 };
292 
293 class SharedGCWorkManager : public WorkManagerBase {
294 public:
295     inline SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum);
296     inline ~SharedGCWorkManager() override;
297 
298     inline void Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase);
299     inline size_t Finish() override;
300 
GetTlabAllocator(uint32_t threadId)301     inline SharedTlabAllocator *GetTlabAllocator(uint32_t threadId) const
302     {
303         return works_.at(threadId).allocator_;
304     }
305 
IncreaseAliveSize(uint32_t threadId,size_t size)306     inline void IncreaseAliveSize(uint32_t threadId, size_t size)
307     {
308         works_.at(threadId).aliveSize_ += size;
309     }
310 
311     inline bool Push(uint32_t threadId, TaggedObject *object);
312     inline bool PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object);
313     inline bool Pop(uint32_t threadId, TaggedObject **object);
314 
315     inline bool PopWorkNodeFromGlobal(uint32_t threadId);
316     inline void PushWorkNodeToGlobal(uint32_t threadId, bool postTask = true);
317     inline void PushLocalBufferToGlobal(WorkNode *&node, bool postTask = true);
318 
PushWeakReference(uint32_t threadId,JSTaggedType * weak)319     inline void PushWeakReference(uint32_t threadId, JSTaggedType *weak)
320     {
321         works_.at(threadId).weakQueue_->PushBack(weak);
322     }
323 
GetWeakReferenceQueue(uint32_t threadId)324     inline ProcessQueue *GetWeakReferenceQueue(uint32_t threadId) const
325     {
326         return works_.at(threadId).weakQueue_;
327     }
328 
GetTotalThreadNum()329     inline uint32_t GetTotalThreadNum()
330     {
331         return threadNum_;
332     }
333 
HasInitialized()334     inline bool HasInitialized() const
335     {
336         return initialized_.load(std::memory_order_acquire);
337     }
338 
339 private:
340     NO_COPY_SEMANTIC(SharedGCWorkManager);
341     NO_MOVE_SEMANTIC(SharedGCWorkManager);
342 
343     SharedHeap *sHeap_;
344     uint32_t threadNum_;
345     std::array<SharedGCWorkNodeHolder, MAX_TASKPOOL_THREAD_NUM + 1> works_;
346     std::array<ContinuousStack<JSTaggedType> *, MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_;
347     GlobalWorkStack workStack_;
348     std::atomic<bool> initialized_ {false};
349     SharedParallelMarkPhase sharedTaskPhase_;
350 };
351 }  // namespace panda::ecmascript
352 #endif  // ECMASCRIPT_MEM_WORK_MANAGER_H
353