1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/concurrent_marker.h"
17
18 #include "common_components/taskpool/taskpool.h"
19 #include "ecmascript/mem/idle_gc_trigger.h"
20 #include "ecmascript/mem/old_gc_visitor-inl.h"
21 #include "ecmascript/mem/parallel_marker.h"
22 #include "ecmascript/mem/young_gc_visitor-inl.h"
23 #include "ecmascript/runtime_call_id.h"
24
25 namespace panda::ecmascript {
26 size_t ConcurrentMarker::taskCounts_ = 0;
27 Mutex ConcurrentMarker::taskCountMutex_;
28
ConcurrentMarker(Heap * heap,EnableConcurrentMarkType type)29 ConcurrentMarker::ConcurrentMarker(Heap *heap, EnableConcurrentMarkType type)
30 : heap_(heap),
31 vm_(heap->GetEcmaVM()),
32 thread_(vm_->GetJSThread()),
33 workManager_(heap->GetWorkManager()),
34 enableMarkType_(type)
35 {
36 thread_->SetMarkStatus(MarkStatus::READY_TO_MARK);
37 }
38
TryIncreaseTaskCounts()39 bool ConcurrentMarker::TryIncreaseTaskCounts()
40 {
41 size_t taskPoolSize = common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
42 {
43 LockHolder holder(taskCountMutex_);
44 // total counts of running concurrent mark tasks should be less than taskPoolSize
45 if (taskCounts_ + 1 < taskPoolSize) {
46 taskCounts_++;
47 return true;
48 }
49 }
50 LOG_FULL(INFO) << "Concurrent mark tasks in taskPool are full";
51 return false;
52 }
53
EnableConcurrentMarking(EnableConcurrentMarkType type)54 void ConcurrentMarker::EnableConcurrentMarking(EnableConcurrentMarkType type)
55 {
56 if (IsConfigDisabled()) {
57 return;
58 }
59 if (IsEnabled() && !thread_->IsReadyToConcurrentMark() && type == EnableConcurrentMarkType::DISABLE) {
60 enableMarkType_ = EnableConcurrentMarkType::REQUEST_DISABLE;
61 } else {
62 enableMarkType_ = type;
63 }
64 }
65
MarkRoots()66 void ConcurrentMarker::MarkRoots()
67 {
68 if (heap_->IsYoungMark()) {
69 YoungGCMarkRootVisitor youngGCMarkRootVisitor(workManager_->GetWorkNodeHolder(MAIN_THREAD_INDEX));
70 heap_->GetNonMovableMarker()->MarkRoots(youngGCMarkRootVisitor);
71 } else {
72 OldGCMarkRootVisitor oldGCMarkRootVisitor(workManager_->GetWorkNodeHolder(MAIN_THREAD_INDEX));
73 heap_->GetNonMovableMarker()->MarkRoots(oldGCMarkRootVisitor);
74 }
75 }
76
Mark()77 void ConcurrentMarker::Mark()
78 {
79 GCStats *gcStats = heap_->GetEcmaVM()->GetEcmaGCStats();
80 RecursionScope recurScope(this);
81 TRACE_GC(GCStats::Scope::ScopeId::ConcurrentMark, gcStats);
82 LOG_GC(DEBUG) << "ConcurrentMarker: Concurrent Marking Begin";
83 ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK,
84 ("ConcurrentMarker::Mark" + std::to_string(heap_->IsFullMarkRequested())
85 + ";MarkReason" + std::to_string(static_cast<int>(gcStats->GetMarkReason()))
86 + ";Sensitive" + std::to_string(static_cast<int>(heap_->GetSensitiveStatus()))
87 + ";IsInBackground" + std::to_string(heap_->IsInBackground())
88 + ";Startup" + std::to_string(static_cast<int>(heap_->GetStartupStatus()))
89 + ";ConMark" + std::to_string(static_cast<int>(heap_->GetJSThread()->GetMarkStatus()))
90 + ";Young" + std::to_string(heap_->GetNewSpace()->GetCommittedSize())
91 + ";Old" + std::to_string(heap_->GetOldSpace()->GetCommittedSize())
92 + ";TotalCommit" + std::to_string(heap_->GetCommittedSize())
93 + ";NativeBindingSize" + std::to_string(heap_->GetNativeBindingSize())
94 + ";NativeLimitSize" + std::to_string(heap_->GetGlobalSpaceNativeLimit())).c_str(), "");
95 MEM_ALLOCATE_AND_GC_TRACE(vm_, ConcurrentMarking);
96 ASSERT(runningTaskCount_ == 0);
97 runningTaskCount_.fetch_add(1, std::memory_order_relaxed);
98 InitializeMarking();
99 clockScope_.Reset();
100 runningTaskCount_.fetch_sub(1, std::memory_order_relaxed);
101 heap_->PostParallelGCTask(ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK);
102 }
103
Finish()104 void ConcurrentMarker::Finish()
105 {
106 workManager_->Finish();
107 }
108
ReMark()109 void ConcurrentMarker::ReMark()
110 {
111 TRACE_GC(GCStats::Scope::ScopeId::ReMark, heap_->GetEcmaVM()->GetEcmaGCStats());
112 LOG_GC(DEBUG) << "ConcurrentMarker: Remarking Begin";
113 MEM_ALLOCATE_AND_GC_TRACE(vm_, ReMarking);
114 Marker *nonMovableMarker = heap_->GetNonMovableMarker();
115 MarkRoots();
116 nonMovableMarker->ProcessMarkStack(MAIN_THREAD_INDEX);
117 heap_->WaitRunningTaskFinished();
118 // MarkJitCodeMap must be call after other mark work finish to make sure which jserror object js alive.
119 nonMovableMarker->MarkJitCodeMap(MAIN_THREAD_INDEX);
120 }
121
HandleMarkingFinished(GCReason gcReason)122 void ConcurrentMarker::HandleMarkingFinished(GCReason gcReason) // js-thread wait for sweep
123 {
124 TriggerGCType gcType;
125 if (heap_->IsConcurrentFullMark()) {
126 gcType = TriggerGCType::OLD_GC;
127 } else {
128 gcType = TriggerGCType::YOUNG_GC;
129 }
130 heap_->CollectGarbage(gcType, gcReason);
131 }
132
WaitMarkingFinished()133 void ConcurrentMarker::WaitMarkingFinished() // call in EcmaVm thread, wait for mark finished
134 {
135 LockHolder lock(waitMarkingFinishedMutex_);
136 while (!markingFinished_) {
137 waitMarkingFinishedCV_.Wait(&waitMarkingFinishedMutex_);
138 }
139 }
140
Reset(bool revertCSet)141 void ConcurrentMarker::Reset(bool revertCSet)
142 {
143 ASSERT(runningTaskCount_ == 0);
144 Finish();
145 thread_->SetMarkStatus(MarkStatus::READY_TO_MARK);
146 isConcurrentMarking_ = false;
147 markingFinished_ = false;
148 notifyMarkingFinished_ = false;
149 if (revertCSet) {
150 // Partial gc clear cset when evacuation allocator finalize
151 heap_->GetOldSpace()->RevertCSet();
152 auto callback = [](Region *region) {
153 region->ResetRegionTypeFlag();
154 region->ClearMarkGCBitset();
155 region->ClearCrossRegionRSet();
156 region->ResetAliveObject();
157 };
158 if (heap_->IsConcurrentFullMark()) {
159 heap_->EnumerateRegions(callback);
160 } else {
161 heap_->EnumerateNewSpaceRegions(callback);
162 }
163 }
164 }
165
InitializeMarking()166 void ConcurrentMarker::InitializeMarking()
167 {
168 MEM_ALLOCATE_AND_GC_TRACE(vm_, ConcurrentMarkingInitialize);
169 heap_->Prepare();
170 ASSERT(VerifyAllRegionsNonFresh());
171 heap_->GetNewSpace()->RecordCurrentRegionAsHalfFresh();
172 isConcurrentMarking_ = true;
173 thread_->SetMarkStatus(MarkStatus::MARKING);
174
175 if (heap_->IsConcurrentFullMark()) {
176 heapObjectSize_ = heap_->GetHeapObjectSize();
177 heap_->GetOldSpace()->SelectCSet();
178 heap_->GetAppSpawnSpace()->EnumerateRegions([](Region *current) {
179 current->ClearMarkGCBitset();
180 current->ClearCrossRegionRSet();
181 });
182 // The alive object size of Region in OldSpace will be recalculated.
183 heap_->EnumerateNonNewSpaceRegions([](Region *current) {
184 current->ResetAliveObject();
185 });
186 } else {
187 heapObjectSize_ = heap_->GetNewSpace()->GetHeapObjectSize();
188 }
189 workManager_->Initialize(TriggerGCType::OLD_GC, ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK);
190 if (heap_->IsYoungMark()) {
191 NonMovableMarker *marker = static_cast<NonMovableMarker*>(heap_->GetNonMovableMarker());
192 {
193 ECMA_BYTRACE_NAME(HITRACE_LEVEL_COMMERCIAL, HITRACE_TAG_ARK, "GC::MarkOldToNew", "");
194 marker->ProcessOldToNewNoMarkStack(MAIN_THREAD_INDEX);
195 }
196 marker->ProcessSnapshotRSetNoMarkStack(MAIN_THREAD_INDEX);
197 }
198 MarkRoots();
199 workManager_->GetWorkNodeHolder(MAIN_THREAD_INDEX)->PushWorkNodeToGlobal(false);
200 }
201
ShouldNotifyMarkingFinished()202 bool ConcurrentMarker::ShouldNotifyMarkingFinished()
203 {
204 if (runningTaskCount_.fetch_sub(1, std::memory_order_relaxed) != 1) {
205 return false;
206 }
207 return reinterpret_cast<std::atomic<bool>*>(¬ifyMarkingFinished_)
208 ->exchange(true, std::memory_order_relaxed) == false;
209 }
210
FinishMarking()211 void ConcurrentMarker::FinishMarking()
212 {
213 LockHolder lock(waitMarkingFinishedMutex_);
214 ASSERT(!markingFinished_);
215 ASSERT(notifyMarkingFinished_);
216 float spendTime = clockScope_.TotalSpentTime();
217 if (heap_->IsYoungMark()) {
218 heapObjectSize_ = heap_->GetNewSpace()->GetHeapObjectSize();
219 } else if (heap_->IsConcurrentFullMark()) {
220 heapObjectSize_ = heap_->GetHeapObjectSize();
221 }
222 SetDuration(spendTime);
223 if (heap_->IsFullMarkRequested()) {
224 heap_->SetFullMarkRequestedState(false);
225 }
226 thread_->SetMarkStatus(MarkStatus::MARK_FINISHED);
227 thread_->SetCheckSafePointStatus();
228 markingFinished_ = true;
229 waitMarkingFinishedCV_.Signal();
230 DecreaseTaskCounts();
231 }
232
ProcessConcurrentMarkTask(uint32_t threadId)233 void ConcurrentMarker::ProcessConcurrentMarkTask(uint32_t threadId)
234 {
235 runningTaskCount_.fetch_add(1, std::memory_order_relaxed);
236 heap_->GetNonMovableMarker()->ProcessMarkStack(threadId);
237 if (ShouldNotifyMarkingFinished()) {
238 FinishMarking();
239 heap_->GetIdleGCTrigger()->TryPostHandleMarkFinished();
240 }
241 }
242
VerifyAllRegionsNonFresh()243 bool ConcurrentMarker::VerifyAllRegionsNonFresh()
244 {
245 bool ok = true;
246 heap_->EnumerateRegions([&ok](Region *region) {
247 ok &= !region->IsFreshRegion() && !region->IsHalfFreshRegion();
248 });
249 return ok;
250 }
251 } // namespace panda::ecmascript
252