• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_WORK_MANAGER_H
17 #define ECMASCRIPT_MEM_WORK_MANAGER_H
18 
19 #include "common_components/taskpool/runner.h"
20 #include "ecmascript/cross_vm/work_manager_hybrid.h"
21 #include "ecmascript/mem/mark_stack.h"
22 #include "ecmascript/mem/slots.h"
23 #include "ecmascript/mem/work_space_chunk.h"
24 
25 namespace panda::ecmascript {
26 using SlotNeedUpdate = std::pair<TaggedObject *, ObjectSlot>;
27 
28 static constexpr uint32_t MARKSTACK_MAX_SIZE = 100;
29 static constexpr uint32_t STACK_AREA_SIZE = sizeof(uintptr_t) * MARKSTACK_MAX_SIZE;
30 
31 class Heap;
32 class SharedHeap;
33 class Stack;
34 class SemiSpaceCollector;
35 class TlabAllocator;
36 class SharedTlabAllocator;
37 class Region;
38 class WorkSpaceChunk;
39 class WorkManager;
40 
41 enum ParallelGCTaskPhase {
42     OLD_HANDLE_GLOBAL_POOL_TASK,
43     COMPRESS_HANDLE_GLOBAL_POOL_TASK,
44     CONCURRENT_HANDLE_GLOBAL_POOL_TASK,
45     UNIFIED_HANDLE_GLOBAL_POOL_TASK,
46     UNDEFINED_TASK,
47     TASK_LAST  // Count of different common::Task phase
48 };
49 
50 enum SharedParallelMarkPhase {
51     SHARED_MARK_TASK,
52     SHARED_COMPRESS_TASK,
53     SHARED_UNDEFINED_TASK,
54     SHARED_TASK_LAST  // Count of different common::Task phase
55 };
56 
57 class WorkNode {
58 public:
WorkNode(Stack * stack)59     explicit WorkNode(Stack *stack) : next_(nullptr), stack_(stack) {}
60     ~WorkNode() = default;
61 
62     NO_COPY_SEMANTIC(WorkNode);
63     NO_MOVE_SEMANTIC(WorkNode);
64 
PushObject(uintptr_t obj)65     bool PushObject(uintptr_t obj)
66     {
67         return stack_->PushBackChecked(obj);
68     }
69 
PopObject(uintptr_t * obj)70     bool PopObject(uintptr_t *obj)
71     {
72         if (IsEmpty()) {
73             return false;
74         }
75         *obj = stack_->PopBackUnchecked();
76         return true;
77     }
78 
IsEmpty()79     bool IsEmpty() const
80     {
81         return stack_->IsEmpty();
82     }
83 
Next()84     WorkNode *Next() const
85     {
86         return next_;
87     }
88 
SetNext(WorkNode * node)89     void SetNext(WorkNode *node)
90     {
91         next_ = node;
92     }
93 
94 private:
95     WorkNode *next_;
96     Stack *stack_;
97 };
98 
99 class GlobalWorkStack {
100 public:
GlobalWorkStack()101     GlobalWorkStack() : top_(nullptr) {}
102     ~GlobalWorkStack() = default;
103 
104     NO_COPY_SEMANTIC(GlobalWorkStack);
105     NO_MOVE_SEMANTIC(GlobalWorkStack);
106 
Push(WorkNode * node)107     void Push(WorkNode *node)
108     {
109         if (node == nullptr) {
110             return;
111         }
112         LockHolder lock(mtx_);
113         node->SetNext(top_);
114         top_ = node;
115     }
116 
Pop(WorkNode ** node)117     bool Pop(WorkNode **node)
118     {
119         LockHolder lock(mtx_);
120         if (top_ == nullptr) {
121             return false;
122         }
123         *node = top_;
124         top_ = top_->Next();
125         return true;
126     }
127 
Clear()128     void Clear()
129     {
130         if (top_ != nullptr) {
131             LOG_ECMA(ERROR) << "GlobalWorkStack is not nullptr in WorkManager::Finish.";
132         }
133         top_ = nullptr;
134     }
135 
136 private:
137     WorkNode *top_ {nullptr};
138     Mutex mtx_;
139 };
140 
141 class WorkNodeHolder {
142 public:
143     WorkNodeHolder() = default;
144     ~WorkNodeHolder() = default;
145 
146     NO_COPY_SEMANTIC(WorkNodeHolder);
147     NO_MOVE_SEMANTIC(WorkNodeHolder);
148 
149     inline void Setup(Heap *heap, WorkManager *workManager, GlobalWorkStack *workStack);
150     inline void Destroy();
151     inline void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
152     inline void Finish();
153 
154     inline bool Push(TaggedObject *object);
155     inline bool Pop(TaggedObject **object);
156     inline bool PopWorkNodeFromGlobal();
157     inline void PushWorkNodeToGlobal(bool postTask = true);
158 
159     inline void PushWeakReference(JSTaggedType *weak);
160 
161     inline void PushJSWeakMap(TaggedObject *jsWeakMap);
162 
163     inline void IncreaseAliveSize(size_t size);
164 
165     inline void IncreasePromotedSize(size_t size);
166 
167     inline ProcessQueue *GetWeakReferenceQueue() const;
168 
169     inline JSWeakMapProcessQueue *GetJSWeakMapQueue() const;
170 
171     inline TlabAllocator *GetTlabAllocator() const;
172 
173     inline JSThread *GetJSThread() const;
174 private:
175     Heap *heap_ {nullptr};
176     WorkManager *workManager_ {nullptr};
177     GlobalWorkStack *workStack_ {nullptr};
178     ParallelGCTaskPhase parallelGCTaskPhase_ {ParallelGCTaskPhase::UNDEFINED_TASK};
179 
180     WorkNode *inNode_ {nullptr};
181     WorkNode *outNode_ {nullptr};
182     WorkNode *cachedInNode_ {nullptr};
183     ProcessQueue *weakQueue_ {nullptr};
184     JSWeakMapProcessQueue *jsWeakMapQueue_ {nullptr};
185     ContinuousStack<JSTaggedType> *continuousQueue_ {nullptr};
186     ContinuousStack<TaggedObject> *continuousJSWeakMapQueue_ {nullptr};
187     TlabAllocator *allocator_ {nullptr};
188     size_t aliveSize_ {0};
189     size_t promotedSize_ {0};
190 
191     friend class WorkManager;
192 };
193 
194 class WorkManagerBase {
195 public:
196     inline WorkManagerBase(NativeAreaAllocator *allocator);
197     inline virtual ~WorkManagerBase();
198 
GetSpaceChunk()199     WorkSpaceChunk *GetSpaceChunk() const
200     {
201         return const_cast<WorkSpaceChunk *>(&spaceChunk_);
202     }
203 
InitializeBase()204     void InitializeBase()
205     {
206         if (UNLIKELY(workSpace_ == 0)) {
207             InitializeInPostFork();
208         }
209         spaceStart_ = workSpace_;
210         spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
211     }
212 
FinishBase()213     void FinishBase()
214     {
215         while (!agedSpaces_.empty()) {
216             GetSpaceChunk()->Free(reinterpret_cast<void *>(agedSpaces_.back()));
217             agedSpaces_.pop_back();
218         }
219     }
220 
FinishInPreFork()221     void FinishInPreFork()
222     {
223         ASSERT(workSpace_ != 0);
224         GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
225         workSpace_ = 0;
226         spaceStart_ = 0;
227         spaceEnd_ = 0;
228     }
229 
InitializeInPostFork()230     void InitializeInPostFork()
231     {
232         ASSERT(workSpace_ == 0);
233         auto allocatedSpace = GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE);
234         ASSERT(allocatedSpace != nullptr);
235         workSpace_ = ToUintPtr(allocatedSpace);
236     }
237 
238     inline WorkNode *AllocateWorkNode();
Finish()239     virtual size_t Finish()
240     {
241         LOG_ECMA(FATAL) << " WorkManagerBase Finish";
242         return 0;
243     }
244 
245     Mutex mtx_;
246 private:
247     NO_COPY_SEMANTIC(WorkManagerBase);
248     NO_MOVE_SEMANTIC(WorkManagerBase);
249 
250     WorkSpaceChunk spaceChunk_;
251     uintptr_t workSpace_;
252     uintptr_t spaceStart_;
253     uintptr_t spaceEnd_;
254     std::vector<uintptr_t> agedSpaces_;
255 };
256 
257 class WorkManager : public WorkManagerBase {
258 public:
259     WorkManager() = delete;
260     inline WorkManager(Heap *heap, uint32_t threadNum);
261     inline ~WorkManager() override;
262 
263     inline void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
264     inline size_t Finish() override;
265     inline void Finish(size_t &aliveSize, size_t &promotedSize);
266 
GetTotalThreadNum()267     inline uint32_t GetTotalThreadNum()
268     {
269         return threadNum_;
270     }
271 
HasInitialized()272     inline bool HasInitialized() const
273     {
274         return initialized_.load(std::memory_order_acquire);
275     }
276 
GetWorkNodeHolder(uint32_t threadId)277     inline WorkNodeHolder *GetWorkNodeHolder(uint32_t threadId)
278     {
279         return &works_.at(threadId);
280     }
281     WORKMANAGER_PUBLIC_HYBRID_EXTENSION();
282 private:
283     NO_COPY_SEMANTIC(WorkManager);
284     NO_MOVE_SEMANTIC(WorkManager);
285 
286     Heap *heap_;
287     uint32_t threadNum_;
288     std::array<WorkNodeHolder, common::MAX_TASKPOOL_THREAD_NUM + 1> works_;
289     GlobalWorkStack workStack_ {};
290     ParallelGCTaskPhase parallelGCTaskPhase_ {ParallelGCTaskPhase::UNDEFINED_TASK};
291     std::atomic<bool> initialized_ {false};
292 };
293 
294 struct SharedGCWorkNodeHolder {
295     WorkNode *inNode_ {nullptr};
296     WorkNode *cachedInNode_ {nullptr};
297     WorkNode *outNode_ {nullptr};
298     ProcessQueue *weakQueue_ {nullptr};
299     SharedTlabAllocator *allocator_ {nullptr};
300     size_t aliveSize_ = 0;
301 };
302 
303 class SharedGCWorkManager : public WorkManagerBase {
304 public:
305     inline SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum);
306     inline ~SharedGCWorkManager() override;
307 
308     inline void Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase);
309     inline size_t Finish() override;
310 
GetTlabAllocator(uint32_t threadId)311     inline SharedTlabAllocator *GetTlabAllocator(uint32_t threadId) const
312     {
313         return works_.at(threadId).allocator_;
314     }
315 
IncreaseAliveSize(uint32_t threadId,size_t size)316     inline void IncreaseAliveSize(uint32_t threadId, size_t size)
317     {
318         works_.at(threadId).aliveSize_ += size;
319     }
320 
321     inline bool Push(uint32_t threadId, TaggedObject *object);
322     inline bool PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object);
323     inline bool Pop(uint32_t threadId, TaggedObject **object);
324 
325     inline bool PopWorkNodeFromGlobal(uint32_t threadId);
326     inline void PushWorkNodeToGlobal(uint32_t threadId, bool postTask = true);
327     inline void PushLocalBufferToGlobal(WorkNode *&node, bool postTask = true);
328 
PushWeakReference(uint32_t threadId,JSTaggedType * weak)329     inline void PushWeakReference(uint32_t threadId, JSTaggedType *weak)
330     {
331         works_.at(threadId).weakQueue_->PushBack(weak);
332     }
333 
GetWeakReferenceQueue(uint32_t threadId)334     inline ProcessQueue *GetWeakReferenceQueue(uint32_t threadId) const
335     {
336         return works_.at(threadId).weakQueue_;
337     }
338 
GetTotalThreadNum()339     inline uint32_t GetTotalThreadNum()
340     {
341         return threadNum_;
342     }
343 
HasInitialized()344     inline bool HasInitialized() const
345     {
346         return initialized_.load(std::memory_order_acquire);
347     }
348 
349 private:
350     NO_COPY_SEMANTIC(SharedGCWorkManager);
351     NO_MOVE_SEMANTIC(SharedGCWorkManager);
352 
353     SharedHeap *sHeap_;
354     uint32_t threadNum_;
355     std::array<SharedGCWorkNodeHolder, common::MAX_TASKPOOL_THREAD_NUM + 1> works_;
356     std::array<ContinuousStack<JSTaggedType> *, common::MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_;
357     GlobalWorkStack workStack_;
358     std::atomic<bool> initialized_ {false};
359     SharedParallelMarkPhase sharedTaskPhase_;
360 };
361 }  // namespace panda::ecmascript
362 #endif  // ECMASCRIPT_MEM_WORK_MANAGER_H
363