• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_WORK_MANAGER_H
17 #define ECMASCRIPT_MEM_WORK_MANAGER_H
18 
19 #include "ecmascript/mem/mark_stack.h"
20 #include "ecmascript/mem/slots.h"
21 #include "ecmascript/mem/work_space_chunk.h"
22 #include "ecmascript/taskpool/taskpool.h"
23 
24 namespace panda::ecmascript {
25 using SlotNeedUpdate = std::pair<TaggedObject *, ObjectSlot>;
26 
27 static constexpr uint32_t MARKSTACK_MAX_SIZE = 100;
28 static constexpr uint32_t STACK_AREA_SIZE = sizeof(uintptr_t) * MARKSTACK_MAX_SIZE;
29 
30 class Heap;
31 class Stack;
32 class SemiSpaceCollector;
33 class TlabAllocator;
34 class Region;
35 class WorkSpaceChunk;
36 
37 enum ParallelGCTaskPhase {
38     SEMI_HANDLE_THREAD_ROOTS_TASK,
39     SEMI_HANDLE_SNAPSHOT_TASK,
40     SEMI_HANDLE_GLOBAL_POOL_TASK,
41     OLD_HANDLE_GLOBAL_POOL_TASK,
42     COMPRESS_HANDLE_GLOBAL_POOL_TASK,
43     CONCURRENT_HANDLE_GLOBAL_POOL_TASK,
44     CONCURRENT_HANDLE_OLD_TO_NEW_TASK,
45     UNDEFINED_TASK,
46     TASK_LAST  // Count of different Task phase
47 };
48 
49 class WorkNode {
50 public:
WorkNode(Stack * stack)51     explicit WorkNode(Stack *stack) : next_(nullptr), stack_(stack) {}
52     ~WorkNode() = default;
53 
54     NO_COPY_SEMANTIC(WorkNode);
55     NO_MOVE_SEMANTIC(WorkNode);
56 
PushObject(uintptr_t obj)57     bool PushObject(uintptr_t obj)
58     {
59         return stack_->PushBackChecked(obj);
60     }
61 
PopObject(uintptr_t * obj)62     bool PopObject(uintptr_t *obj)
63     {
64         if (IsEmpty()) {
65             return false;
66         }
67         *obj = stack_->PopBackUnchecked();
68         return true;
69     }
70 
IsEmpty()71     bool IsEmpty() const
72     {
73         return stack_->IsEmpty();
74     }
75 
Next()76     WorkNode *Next() const
77     {
78         return next_;
79     }
80 
SetNext(WorkNode * node)81     void SetNext(WorkNode *node)
82     {
83         next_ = node;
84     }
85 
86 private:
87     WorkNode *next_;
88     Stack *stack_;
89 };
90 
91 class GlobalWorkStack {
92 public:
GlobalWorkStack()93     GlobalWorkStack() : top_(nullptr) {}
94     ~GlobalWorkStack() = default;
95 
96     NO_COPY_SEMANTIC(GlobalWorkStack);
97     NO_MOVE_SEMANTIC(GlobalWorkStack);
98 
Push(WorkNode * node)99     void Push(WorkNode *node)
100     {
101         if (node == nullptr) {
102             return;
103         }
104         LockHolder lock(mtx_);
105         node->SetNext(top_);
106         top_ = node;
107     }
108 
Pop(WorkNode ** node)109     bool Pop(WorkNode **node)
110     {
111         LockHolder lock(mtx_);
112         if (top_ == nullptr) {
113             return false;
114         }
115         *node = top_;
116         top_ = top_->Next();
117         return true;
118     }
119 
120 private:
121     WorkNode *top_ {nullptr};
122     Mutex mtx_;
123 };
124 
125 struct WorkNodeHolder {
126     WorkNode *inNode_ {nullptr};
127     WorkNode *outNode_ {nullptr};
128     ProcessQueue *weakQueue_ {nullptr};
129     std::vector<SlotNeedUpdate> pendingUpdateSlots_;
130     TlabAllocator *allocator_ {nullptr};
131     size_t aliveSize_ = 0;
132     size_t promotedSize_ = 0;
133 };
134 
135 class WorkManager final {
136 public:
137     WorkManager() = delete;
138     WorkManager(Heap *heap, uint32_t threadNum);
139     ~WorkManager();
140 
141     void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
142     size_t Finish();
143     void Finish(size_t &aliveSize, size_t &promotedSize);
144 
145     bool Push(uint32_t threadId, TaggedObject *object);
146     bool Push(uint32_t threadId, TaggedObject *object, Region *region);
147     bool Pop(uint32_t threadId, TaggedObject **object);
148 
149     bool PopWorkNodeFromGlobal(uint32_t threadId);
150     void PushWorkNodeToGlobal(uint32_t threadId, bool postTask = true);
151 
PushWeakReference(uint32_t threadId,JSTaggedType * weak)152     inline void PushWeakReference(uint32_t threadId, JSTaggedType *weak)
153     {
154         works_.at(threadId).weakQueue_->PushBack(weak);
155     }
156 
IncreaseAliveSize(uint32_t threadId,size_t size)157     inline void IncreaseAliveSize(uint32_t threadId, size_t size)
158     {
159         works_.at(threadId).aliveSize_ += size;
160     }
161 
IncreasePromotedSize(uint32_t threadId,size_t size)162     inline void IncreasePromotedSize(uint32_t threadId, size_t size)
163     {
164         works_.at(threadId).promotedSize_ += size;
165     }
166 
GetWeakReferenceQueue(uint32_t threadId)167     inline ProcessQueue *GetWeakReferenceQueue(uint32_t threadId) const
168     {
169         return works_.at(threadId).weakQueue_;
170     }
171 
GetTlabAllocator(uint32_t threadId)172     inline TlabAllocator *GetTlabAllocator(uint32_t threadId) const
173     {
174         return works_.at(threadId).allocator_;
175     }
176 
PushSlotNeedUpdate(uint32_t threadId,SlotNeedUpdate slot)177     inline void PushSlotNeedUpdate(uint32_t threadId, SlotNeedUpdate slot)
178     {
179         works_.at(threadId).pendingUpdateSlots_.emplace_back(slot);
180     }
181 
GetSlotNeedUpdate(uint32_t threadId,SlotNeedUpdate * slot)182     inline bool GetSlotNeedUpdate(uint32_t threadId, SlotNeedUpdate *slot)
183     {
184         std::vector<SlotNeedUpdate> &pendingUpdateSlots = works_.at(threadId).pendingUpdateSlots_;
185         if (pendingUpdateSlots.empty()) {
186             return false;
187         }
188         *slot = pendingUpdateSlots.back();
189         pendingUpdateSlots.pop_back();
190         return true;
191     }
192 
GetTotalThreadNum()193     inline uint32_t GetTotalThreadNum()
194     {
195         return threadNum_;
196     }
HasInitialized()197     inline bool HasInitialized() const
198     {
199         return initialized_.load(std::memory_order_acquire);
200     }
201 
GetSpaceChunk()202     WorkSpaceChunk *GetSpaceChunk() const
203     {
204         return const_cast<WorkSpaceChunk *>(&spaceChunk_);
205     }
206 
207 private:
208     NO_COPY_SEMANTIC(WorkManager);
209     NO_MOVE_SEMANTIC(WorkManager);
210 
211     WorkNode *AllocateWorkNode();
212 
213     Heap *heap_;
214     uint32_t threadNum_;
215     WorkSpaceChunk spaceChunk_;
216     std::array<WorkNodeHolder, MAX_TASKPOOL_THREAD_NUM + 1> works_;
217     std::array<ContinuousStack<JSTaggedType> *, MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_;
218     GlobalWorkStack workStack_;
219     uintptr_t workSpace_;
220     uintptr_t spaceStart_;
221     uintptr_t spaceEnd_;
222     std::vector<uintptr_t> agedSpaces_;
223     Mutex mtx_;
224     ParallelGCTaskPhase parallelGCTaskPhase_;
225     std::atomic<bool> initialized_ {false};
226 };
227 }  // namespace panda::ecmascript
228 #endif  // ECMASCRIPT_MEM_WORK_MANAGER_H
229