1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "common_components/heap/ark_collector/ark_collector.h"
16
17 #include "common_components/common_runtime/hooks.h"
18 #include "common_components/log/log.h"
19 #include "common_components/mutator/mutator_manager-inl.h"
20 #include "common_components/heap/verification.h"
21 #include "common_interfaces/heap/heap_visitor.h"
22 #include "common_interfaces/objects/ref_field.h"
23 #include "common_interfaces/profiler/heap_profiler_listener.h"
24 #include "common_components/objects/string_table_internal.h"
25 #include "common_components/heap/allocator/fix_heap.h"
26
27 #ifdef ENABLE_QOS
28 #include "qos.h"
29 #endif
30
31 namespace common {
IsUnmovableFromObject(BaseObject * obj) const32 bool ArkCollector::IsUnmovableFromObject(BaseObject* obj) const
33 {
34 // filter const string object.
35 if (!Heap::IsHeapAddress(obj)) {
36 return false;
37 }
38
39 RegionDesc* regionInfo = nullptr;
40 regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast<uintptr_t>(obj));
41 return regionInfo->IsUnmovableFromRegion();
42 }
43
MarkObject(BaseObject * obj) const44 bool ArkCollector::MarkObject(BaseObject* obj) const
45 {
46 bool marked = RegionSpace::MarkObject(obj);
47 if (!marked) {
48 RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast<HeapAddress>(obj));
49 DCHECK_CC(!region->IsGarbageRegion());
50 DLOG(TRACE, "mark obj %p<%p> in region %p(%u)@%#zx, live %u", obj, obj->GetTypeInfo(),
51 region, region->GetRegionType(), region->GetRegionStart(), region->GetLiveByteCount());
52 }
53 return marked;
54 }
55
56 // this api updates current pointer as well as old pointer, caller should take care of this.
57 template<bool copy>
TryUpdateRefFieldImpl(BaseObject * obj,RefField<> & field,BaseObject * & fromObj,BaseObject * & toObj) const58 bool ArkCollector::TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& field, BaseObject*& fromObj,
59 BaseObject*& toObj) const
60 {
61 RefField<> oldRef(field);
62 fromObj = oldRef.GetTargetObject();
63 if (IsFromObject(fromObj)) { //LCOV_EXCL_BR_LINE
64 if (copy) { //LCOV_EXCL_BR_LINE
65 toObj = const_cast<ArkCollector*>(this)->TryForwardObject(fromObj);
66 if (toObj != nullptr) { //LCOV_EXCL_BR_LINE
67 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(fromObj),
68 reinterpret_cast<uintptr_t>(toObj),
69 toObj->GetSize());
70 }
71 } else { //LCOV_EXCL_BR_LINE
72 toObj = FindToVersion(fromObj);
73 }
74 if (toObj == nullptr) { //LCOV_EXCL_BR_LINE
75 return false;
76 }
77 RefField<> tmpField(toObj, oldRef.IsWeak());
78 if (field.CompareExchange(oldRef.GetFieldValue(), tmpField.GetFieldValue())) { //LCOV_EXCL_BR_LINE
79 if (obj != nullptr) { //LCOV_EXCL_BR_LINE
80 DLOG(TRACE, "update obj %p<%p>(%zu)+%zu ref-field@%p: %#zx -> %#zx", obj, obj->GetTypeInfo(),
81 obj->GetSize(), BaseObject::FieldOffset(obj, &field), &field, oldRef.GetFieldValue(),
82 tmpField.GetFieldValue());
83 } else { //LCOV_EXCL_BR_LINE
84 DLOG(TRACE, "update ref@%p: 0x%zx -> %p", &field, oldRef.GetFieldValue(), toObj);
85 }
86 return true;
87 } else { //LCOV_EXCL_BR_LINE
88 if (obj != nullptr) { //LCOV_EXCL_BR_LINE
89 DLOG(TRACE,
90 "update obj %p<%p>(%zu)+%zu but cas failed ref-field@%p: %#zx(%#zx) -> %#zx but cas failed ",
91 obj, obj->GetTypeInfo(), obj->GetSize(), BaseObject::FieldOffset(obj, &field), &field,
92 oldRef.GetFieldValue(), field.GetFieldValue(), tmpField.GetFieldValue());
93 } else { //LCOV_EXCL_BR_LINE
94 DLOG(TRACE, "update but cas failed ref@%p: 0x%zx(%zx) -> %p", &field, oldRef.GetFieldValue(),
95 field.GetFieldValue(), toObj);
96 }
97 return true;
98 }
99 }
100
101 return false;
102 }
103
TryUpdateRefField(BaseObject * obj,RefField<> & field,BaseObject * & newRef) const104 bool ArkCollector::TryUpdateRefField(BaseObject* obj, RefField<>& field, BaseObject*& newRef) const
105 {
106 BaseObject* oldRef = nullptr;
107 return TryUpdateRefFieldImpl<false>(obj, field, oldRef, newRef);
108 }
109
TryForwardRefField(BaseObject * obj,RefField<> & field,BaseObject * & newRef) const110 bool ArkCollector::TryForwardRefField(BaseObject* obj, RefField<>& field, BaseObject*& newRef) const
111 {
112 BaseObject* oldRef = nullptr;
113 return TryUpdateRefFieldImpl<true>(obj, field, oldRef, newRef);
114 }
115
116 // this api untags current pointer as well as old pointer, caller should take care of this.
TryUntagRefField(BaseObject * obj,RefField<> & field,BaseObject * & target) const117 bool ArkCollector::TryUntagRefField(BaseObject* obj, RefField<>& field, BaseObject*& target) const
118 {
119 for (;;) { //LCOV_EXCL_BR_LINE
120 RefField<> oldRef(field);
121 if (oldRef.IsTagged()) { //LCOV_EXCL_BR_LINE
122 target = oldRef.GetTargetObject();
123 RefField<> newRef(target);
124 if (field.CompareExchange(oldRef.GetFieldValue(), newRef.GetFieldValue())) { //LCOV_EXCL_BR_LINE
125 if (obj != nullptr) { //LCOV_EXCL_BR_LINE
126 DLOG(FIX, "untag obj %p<%p>(%zu) ref-field@%p: %#zx -> %#zx", obj, obj->GetTypeInfo(),
127 obj->GetSize(), &field, oldRef.GetFieldValue(), newRef.GetFieldValue());
128 } else { //LCOV_EXCL_BR_LINE
129 DLOG(FIX, "untag ref@%p: %#zx -> %#zx", &field, oldRef.GetFieldValue(), newRef.GetFieldValue());
130 }
131
132 return true;
133 }
134 } else { //LCOV_EXCL_BR_LINE
135 return false;
136 }
137 }
138
139 return false;
140 }
141
142 static void MarkingRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field,
143 WorkStack &workStack, RegionDesc *targetRegion);
144 // note each ref-field will not be marked twice, so each old pointer the markingr meets must come from previous gc.
MarkingRefField(BaseObject * obj,RefField<> & field,WorkStack & workStack,WeakStack & weakStack,const GCReason gcReason)145 static void MarkingRefField(BaseObject *obj, RefField<> &field, WorkStack &workStack,
146 WeakStack &weakStack, const GCReason gcReason)
147 {
148 RefField<> oldField(field);
149 BaseObject* targetObj = oldField.GetTargetObject();
150
151 if (!Heap::IsTaggedObject(oldField.GetFieldValue())) {
152 return;
153 }
154 // field is tagged object, should be in heap
155 DCHECK_CC(Heap::IsHeapAddress(targetObj));
156
157 auto targetRegion = RegionDesc::GetAliveRegionDescAt(reinterpret_cast<MAddress>((void*)targetObj));
158 if (gcReason != GC_REASON_YOUNG && oldField.IsWeak()) {
159 DLOG(TRACE, "marking: skip weak obj when full gc, object: %p@%p, targetObj: %p", obj, &field, targetObj);
160 // weak ref is cleared after roots pre-forward, so there might be a to-version weak ref which also need to be
161 // cleared, offset recorded here will help us find it
162 weakStack.push_back(std::make_shared<std::tuple<RefField<>*, size_t>>(
163 &field, reinterpret_cast<uintptr_t>(&field) - reinterpret_cast<uintptr_t>(obj)));
164 return;
165 }
166
167 // cannot skip objects in EXEMPTED_FROM_REGION, because its rset is incomplete
168 if (gcReason == GC_REASON_YOUNG && !targetRegion->IsInYoungSpace()) {
169 DLOG(TRACE, "marking: skip non-young object %p@%p, target object: %p<%p>(%zu)",
170 obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize());
171 return;
172 }
173 common::MarkingRefField(obj, targetObj, field, workStack, targetRegion);
174 }
175
176 // note each ref-field will not be marked twice, so each old pointer the markingr meets must come from previous gc.
MarkingRefField(BaseObject * obj,BaseObject * targetObj,RefField<> & field,WorkStack & workStack,RegionDesc * targetRegion)177 static void MarkingRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field,
178 WorkStack &workStack, RegionDesc *targetRegion)
179 {
180 if (targetRegion->IsNewObjectSinceMarking(targetObj)) {
181 DLOG(TRACE, "marking: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize());
182 return;
183 }
184
185 if (targetRegion->MarkObject(targetObj)) {
186 DLOG(TRACE, "marking: obj has been marked %p", targetObj);
187 return;
188 }
189
190 DLOG(TRACE, "marking obj %p ref@%p: %p<%p>(%zu)",
191 obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize());
192 workStack.push_back(targetObj);
193 }
194
CreateMarkingObjectRefFieldsVisitor(WorkStack * workStack,WeakStack * weakStack)195 MarkingCollector::MarkingRefFieldVisitor ArkCollector::CreateMarkingObjectRefFieldsVisitor(WorkStack *workStack,
196 WeakStack *weakStack)
197 {
198 MarkingRefFieldVisitor visitor;
199
200 if (gcReason_ == GCReason::GC_REASON_YOUNG) {
201 visitor.SetVisitor([obj = visitor.GetClosure(), workStack, weakStack](RefField<> &field) {
202 const GCReason gcReason = GCReason::GC_REASON_YOUNG;
203 MarkingRefField(*obj, field, *workStack, *weakStack, gcReason);
204 });
205 } else {
206 visitor.SetVisitor([obj = visitor.GetClosure(), workStack, weakStack](RefField<> &field) {
207 const GCReason gcReason = GCReason::GC_REASON_HEU;
208 MarkingRefField(*obj, field, *workStack, *weakStack, gcReason);
209 });
210 }
211 return visitor;
212 }
213
MarkingObjectRefFields(BaseObject * obj,MarkingRefFieldVisitor * data)214 void ArkCollector::MarkingObjectRefFields(BaseObject *obj, MarkingRefFieldVisitor *data)
215 {
216 data->SetMarkingRefFieldArgs(obj);
217 obj->ForEachRefField(data->GetRefFieldVisitor());
218 }
219
FixRefField(BaseObject * obj,RefField<> & field) const220 void ArkCollector::FixRefField(BaseObject* obj, RefField<>& field) const
221 {
222 RefField<> oldField(field);
223 BaseObject* targetObj = oldField.GetTargetObject();
224 if (!Heap::IsTaggedObject(oldField.GetFieldValue())) {
225 return;
226 }
227 // target object could be null or non-heap for some static variable.
228 if (!Heap::IsHeapAddress(targetObj)) {
229 return;
230 }
231
232 RegionDesc::InlinedRegionMetaData *refRegion = RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData(
233 reinterpret_cast<uintptr_t>(targetObj));
234 bool isFrom = refRegion->IsFromRegion();
235 bool isInRcent = refRegion->IsInRecentSpace();
236 if (isInRcent) {
237 RegionDesc::InlinedRegionMetaData *objRegion = RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData(
238 reinterpret_cast<uintptr_t>(obj));
239 if (!objRegion->IsInRecentSpace() &&
240 objRegion->MarkRSetCardTable(obj)) {
241 DLOG(TRACE,
242 "fix phase update point-out remember set of region %p, obj "
243 "%p, ref: <%p>",
244 objRegion, obj, targetObj->GetTypeInfo());
245 }
246 return;
247 } else if (!isFrom) {
248 return;
249 }
250 BaseObject* latest = FindToVersion(targetObj);
251
252 if (latest == nullptr) { return; }
253
254 CHECK_CC(latest->IsValidObject());
255 RefField<> newField(latest, oldField.IsWeak());
256 if (field.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
257 DLOG(FIX, "fix obj %p+%zu ref@%p: %#zx => %p<%p>(%zu)", obj, obj->GetSize(), &field,
258 oldField.GetFieldValue(), latest, latest->GetTypeInfo(), latest->GetSize());
259 }
260 }
261
FixObjectRefFields(BaseObject * obj) const262 void ArkCollector::FixObjectRefFields(BaseObject* obj) const
263 {
264 DLOG(FIX, "fix obj %p<%p>(%zu)", obj, obj->GetTypeInfo(), obj->GetSize());
265 auto refFunc = [this, obj](RefField<>& field) { FixRefField(obj, field); };
266 obj->ForEachRefField(refFunc);
267 }
268
ForwardUpdateRawRef(ObjectRef & root)269 BaseObject* ArkCollector::ForwardUpdateRawRef(ObjectRef& root)
270 {
271 auto& refField = reinterpret_cast<RefField<>&>(root);
272 RefField<> oldField(refField);
273 BaseObject* oldObj = oldField.GetTargetObject();
274 DLOG(FIX, "try fix raw-ref @%p: %p", &root, oldObj);
275 if (IsFromObject(oldObj)) {
276 BaseObject* toVersion = TryForwardObject(oldObj);
277 CHECK_CC(toVersion != nullptr);
278 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(oldObj),
279 reinterpret_cast<uintptr_t>(toVersion),
280 toVersion->GetSize());
281 RefField<> newField(toVersion);
282 // CAS failure means some mutator or gc thread writes a new ref (must be a to-object), no need to retry.
283 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
284 DLOG(FIX, "fix raw-ref @%p: %p -> %p", &root, oldObj, toVersion);
285 return toVersion;
286 }
287 }
288
289 return oldObj;
290 }
291
292 class RemarkAndPreforwardVisitor {
293 public:
RemarkAndPreforwardVisitor(WorkStack & localStack,ArkCollector * collector)294 RemarkAndPreforwardVisitor(WorkStack &localStack, ArkCollector *collector)
295 : localStack_(localStack), collector_(collector) {}
296
operator ()(RefField<> & refField)297 void operator()(RefField<> &refField)
298 {
299 RefField<> oldField(refField);
300 BaseObject* oldObj = oldField.GetTargetObject();
301 DLOG(FIX, "visit raw-ref @%p: %p", &refField, oldObj);
302
303 auto regionType =
304 RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData(reinterpret_cast<uintptr_t>(oldObj))
305 ->GetRegionType();
306 if (regionType == RegionDesc::RegionType::FROM_REGION) {
307 BaseObject* toVersion = collector_->TryForwardObject(oldObj);
308 if (toVersion == nullptr) { //LCOV_EXCL_BR_LINE
309 Heap::throwOOM();
310 return;
311 }
312 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(oldObj),
313 reinterpret_cast<uintptr_t>(toVersion),
314 toVersion->GetSize());
315 RefField<> newField(toVersion);
316 // CAS failure means some mutator or gc thread writes a new ref (must be a to-object), no need to retry.
317 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
318 DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion);
319 }
320 MarkToObject(oldObj, toVersion);
321 } else {
322 if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) {
323 MarkObject(oldObj);
324 } else if (RegionSpace::IsYoungSpaceObject(oldObj) && !RegionSpace::IsNewObjectSinceMarking(oldObj) &&
325 !RegionSpace::IsMarkedObject(oldObj)) {
326 // RSet don't protect exempted objects, we need to mark it
327 MarkObject(oldObj);
328 }
329 }
330 }
331
332 private:
MarkObject(BaseObject * object)333 void MarkObject(BaseObject *object)
334 {
335 if (!RegionSpace::IsNewObjectSinceMarking(object) && !collector_->MarkObject(object)) {
336 localStack_.push_back(object);
337 }
338 }
339
MarkToObject(BaseObject * oldVersion,BaseObject * toVersion)340 void MarkToObject(BaseObject *oldVersion, BaseObject *toVersion)
341 {
342 // We've checked oldVersion is in fromSpace, no need to check markingLine
343 if (!collector_->MarkObject(oldVersion)) {
344 // No need to count oldVersion object size, as it has been copied.
345 collector_->MarkObject(toVersion);
346 // oldVersion don't have valid type info, cannot push it
347 localStack_.push_back(toVersion);
348 }
349 }
350
351 private:
352 WorkStack &localStack_;
353 ArkCollector *collector_;
354 };
355
356 class RemarkingAndPreforwardTask : public common::Task {
357 public:
RemarkingAndPreforwardTask(ArkCollector * collector,WorkStack & localStack,TaskPackMonitor & monitor,std::function<Mutator * ()> & next)358 RemarkingAndPreforwardTask(ArkCollector *collector, WorkStack &localStack, TaskPackMonitor &monitor,
359 std::function<Mutator*()>& next)
360 : Task(0), visitor_(localStack, collector), monitor_(monitor), getNextMutator_(next)
361 {}
362
Run(uint32_t threadIndex)363 bool Run([[maybe_unused]] uint32_t threadIndex) override
364 {
365 ThreadLocal::SetThreadType(ThreadType::GC_THREAD);
366 Mutator *mutator = getNextMutator_();
367 while (mutator != nullptr) {
368 VisitMutatorRoot(visitor_, *mutator);
369 mutator = getNextMutator_();
370 }
371 ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR);
372 ThreadLocal::ClearAllocBufferRegion();
373 monitor_.NotifyFinishOne();
374 return true;
375 }
376
377 private:
378 RemarkAndPreforwardVisitor visitor_;
379 TaskPackMonitor &monitor_;
380 std::function<Mutator*()> &getNextMutator_;
381 };
382
ParallelRemarkAndPreforward(WorkStack & workStack)383 void ArkCollector::ParallelRemarkAndPreforward(WorkStack& workStack)
384 {
385 std::vector<Mutator*> taskList;
386 MutatorManager &mutatorManager = MutatorManager::Instance();
387 mutatorManager.VisitAllMutators([&taskList](Mutator &mutator) {
388 taskList.push_back(&mutator);
389 });
390 std::atomic<int> taskIter = 0;
391 std::function<Mutator*()> getNextMutator = [&taskIter, &taskList]() -> Mutator* {
392 uint32_t idx = static_cast<uint32_t>(taskIter.fetch_add(1U, std::memory_order_relaxed));
393 if (idx < taskList.size()) {
394 return taskList[idx];
395 }
396 return nullptr;
397 };
398
399 const uint32_t runningWorkers = std::min<uint32_t>(GetGCThreadCount(true), taskList.size());
400 uint32_t parallelCount = runningWorkers + 1; // 1 :DaemonThread
401 TaskPackMonitor monitor(runningWorkers, runningWorkers);
402 WorkStack localStack[parallelCount];
403 for (uint32_t i = 1; i < parallelCount; ++i) {
404 GetThreadPool()->PostTask(std::make_unique<RemarkingAndPreforwardTask>(this, localStack[i], monitor,
405 getNextMutator));
406 }
407 // Run in daemon thread.
408 RemarkAndPreforwardVisitor visitor(localStack[0], this);
409 VisitGlobalRoots(visitor);
410 Mutator *mutator = getNextMutator();
411 while (mutator != nullptr) {
412 VisitMutatorRoot(visitor, *mutator);
413 mutator = getNextMutator();
414 }
415 monitor.WaitAllFinished();
416 for (uint32_t i = 0; i < parallelCount; ++i) {
417 workStack.insert(localStack[i]);
418 }
419 }
420
RemarkAndPreforwardStaticRoots(WorkStack & workStack)421 void ArkCollector::RemarkAndPreforwardStaticRoots(WorkStack& workStack)
422 {
423 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::RemarkAndPreforwardStaticRoots", "");
424 const uint32_t maxWorkers = GetGCThreadCount(true) - 1;
425 if (maxWorkers > 0) {
426 ParallelRemarkAndPreforward(workStack);
427 } else {
428 RemarkAndPreforwardVisitor visitor(workStack, this);
429 VisitSTWRoots(visitor);
430 }
431 }
432
PreforwardConcurrentRoots()433 void ArkCollector::PreforwardConcurrentRoots()
434 {
435 RefFieldVisitor visitor = [this](RefField<> &refField) {
436 RefField<> oldField(refField);
437 BaseObject *oldObj = oldField.GetTargetObject();
438 DLOG(FIX, "visit raw-ref @%p: %p", &refField, oldObj);
439 if (IsFromObject(oldObj)) {
440 BaseObject *toVersion = TryForwardObject(oldObj);
441 ASSERT_LOGF(toVersion != nullptr, "TryForwardObject failed");
442 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(oldObj),
443 reinterpret_cast<uintptr_t>(toVersion),
444 toVersion->GetSize());
445 RefField<> newField(toVersion);
446 // CAS failure means some mutator or gc thread writes a new ref (must be a to-object), no need to retry.
447 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
448 DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion);
449 }
450 }
451 };
452 VisitConcurrentRoots(visitor);
453 }
454
PreforwardStaticWeakRoots()455 void ArkCollector::PreforwardStaticWeakRoots()
456 {
457 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PreforwardStaticRoots", "");
458
459 WeakRefFieldVisitor weakVisitor = GetWeakRefFieldVisitor();
460 VisitWeakRoots(weakVisitor);
461 InvokeSharedNativePointerCallbacks();
462 MutatorManager::Instance().VisitAllMutators([](Mutator& mutator) {
463 // Request finalize callback in each vm-thread when gc finished.
464 mutator.SetFinalizeRequest();
465 });
466
467 AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer();
468 if (LIKELY_CC(allocBuffer != nullptr)) {
469 allocBuffer->ClearRegions();
470 }
471 }
472
PreforwardConcurrencyModelRoots()473 void ArkCollector::PreforwardConcurrencyModelRoots()
474 {
475 LOG_COMMON(FATAL) << "Unresolved fatal";
476 UNREACHABLE_CC();
477 }
478
479 class EnumRootsBuffer {
480 public:
481 EnumRootsBuffer();
482 void UpdateBufferSize();
GetBuffer()483 CArrayList<BaseObject *> *GetBuffer() { return &buffer_; }
484
485 private:
486 static size_t bufferSize_;
487 CArrayList<BaseObject *> buffer_;
488 };
489
490 size_t EnumRootsBuffer::bufferSize_ = 16;
EnumRootsBuffer()491 EnumRootsBuffer::EnumRootsBuffer() : buffer_(bufferSize_)
492 {
493 buffer_.clear(); // memset to zero and allocated real memory
494 }
495
UpdateBufferSize()496 void EnumRootsBuffer::UpdateBufferSize()
497 {
498 if (buffer_.empty()) {
499 return;
500 }
501 const size_t decreaseBufferThreshold = bufferSize_ >> 2;
502 if (buffer_.size() < decreaseBufferThreshold) {
503 bufferSize_ = bufferSize_ >> 1;
504 } else {
505 bufferSize_ = std::max(buffer_.capacity(), bufferSize_);
506 }
507 if (buffer_.capacity() > UINT16_MAX) {
508 LOG_COMMON(INFO) << "too many roots, allocated buffer too large: " << buffer_.size() << ", allocate "
509 << (static_cast<double>(buffer_.capacity()) / MB);
510 }
511 }
512
513 template <ArkCollector::EnumRootsPolicy policy>
EnumRoots()514 CArrayList<BaseObject *> ArkCollector::EnumRoots()
515 {
516 STWParam stwParam{"wgc-enumroot"};
517 EnumRootsBuffer buffer;
518 CArrayList<common::BaseObject *> *results = buffer.GetBuffer();
519 common::RefFieldVisitor visitor = [&results](RefField<>& field) { results->push_back(field.GetTargetObject()); };
520
521 if constexpr (policy == EnumRootsPolicy::NO_STW_AND_NO_FLIP_MUTATOR) {
522 EnumRootsImpl<VisitRoots>(visitor);
523 } else if constexpr (policy == EnumRootsPolicy::STW_AND_NO_FLIP_MUTATOR) {
524 ScopedStopTheWorld stw(stwParam);
525 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL,
526 ("CMCGC::EnumRoots-STW-bufferSize(" + std::to_string(results->capacity()) + ")").c_str(), "");
527 EnumRootsImpl<VisitRoots>(visitor);
528 } else if constexpr (policy == EnumRootsPolicy::STW_AND_FLIP_MUTATOR) {
529 auto rootSet = EnumRootsFlip(stwParam, visitor);
530 for (const auto &roots : rootSet) {
531 std::copy(roots.begin(), roots.end(), std::back_inserter(*results));
532 }
533 VisitConcurrentRoots(visitor);
534 }
535 buffer.UpdateBufferSize();
536 GetGCStats().recordSTWTime(stwParam.GetElapsedNs());
537 return std::move(*results);
538 }
539
MarkingHeap(const CArrayList<BaseObject * > & collectedRoots)540 void ArkCollector::MarkingHeap(const CArrayList<BaseObject *> &collectedRoots)
541 {
542 COMMON_PHASE_TIMER("marking live objects");
543 markedObjectCount_.store(0, std::memory_order_relaxed);
544 TransitionToGCPhase(GCPhase::GC_PHASE_MARK, true);
545
546 MarkingRoots(collectedRoots);
547 ProcessFinalizers();
548 ExemptFromSpace();
549 }
550
PostMarking()551 void ArkCollector::PostMarking()
552 {
553 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PostMarking", "");
554 COMMON_PHASE_TIMER("PostMarking");
555 TransitionToGCPhase(GC_PHASE_POST_MARK, true);
556
557 // clear satb buffer when gc finish tracing.
558 SatbBuffer::Instance().ClearBuffer();
559
560 WVerify::VerifyAfterMark(*this);
561 }
562
GetWeakRefFieldVisitor()563 WeakRefFieldVisitor ArkCollector::GetWeakRefFieldVisitor()
564 {
565 return [this](RefField<> &refField) -> bool {
566 RefField<> oldField(refField);
567 BaseObject *oldObj = oldField.GetTargetObject();
568 if (gcReason_ == GC_REASON_YOUNG) {
569 if (RegionSpace::IsYoungSpaceObject(oldObj) && !IsMarkedObject(oldObj) &&
570 !RegionSpace::IsNewObjectSinceMarking(oldObj)) {
571 return false;
572 }
573 } else {
574 if (!IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceMarking(oldObj)) {
575 return false;
576 }
577 }
578
579 DLOG(FIX, "visit weak raw-ref @%p: %p", &refField, oldObj);
580 if (IsFromObject(oldObj)) {
581 BaseObject *toVersion = TryForwardObject(oldObj);
582 CHECK_CC(toVersion != nullptr);
583 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(oldObj),
584 reinterpret_cast<uintptr_t>(toVersion),
585 toVersion->GetSize());
586 RefField<> newField(toVersion);
587 // CAS failure means some mutator or gc thread writes a new ref (must be
588 // a to-object), no need to retry.
589 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
590 DLOG(FIX, "fix weak raw-ref @%p: %p -> %p", &refField, oldObj, toVersion);
591 }
592 }
593 return true;
594 };
595 }
596
GetPrefowardRefFieldVisitor()597 RefFieldVisitor ArkCollector::GetPrefowardRefFieldVisitor()
598 {
599 return [this](RefField<> &refField) -> void {
600 RefField<> oldField(refField);
601 BaseObject *oldObj = oldField.GetTargetObject();
602 if (IsFromObject(oldObj)) {
603 BaseObject *toVersion = TryForwardObject(oldObj);
604 CHECK_CC(toVersion != nullptr);
605 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(oldObj),
606 reinterpret_cast<uintptr_t>(toVersion),
607 toVersion->GetSize());
608 RefField<> newField(toVersion);
609 // CAS failure means some mutator or gc thread writes a new ref (must be
610 // a to-object), no need to retry.
611 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
612 DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion);
613 }
614 }
615 };
616 }
617
PreforwardFlip()618 void ArkCollector::PreforwardFlip()
619 {
620 auto remarkAndForwardGlobalRoot = [this]() {
621 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PreforwardFlip[STW]", "");
622 SetGCThreadQosPriority(common::PriorityMode::STW);
623 ASSERT_LOGF(GetThreadPool() != nullptr, "thread pool is null");
624 TransitionToGCPhase(GCPhase::GC_PHASE_FINAL_MARK, true);
625 Remark();
626 PostMarking();
627 reinterpret_cast<RegionSpace&>(theAllocator_).PrepareForward();
628
629 TransitionToGCPhase(GCPhase::GC_PHASE_PRECOPY, true);
630 WeakRefFieldVisitor weakVisitor = GetWeakRefFieldVisitor();
631 SetGCThreadQosPriority(common::PriorityMode::FOREGROUND);
632
633 if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) {
634 // only visit weak roots that may reference young objects
635 VisitDynamicWeakGlobalRoots(weakVisitor);
636 } else {
637 VisitDynamicWeakGlobalRoots(weakVisitor);
638 VisitDynamicWeakGlobalRootsOld(weakVisitor);
639 }
640 };
641 FlipFunction forwardMutatorRoot = [this](Mutator &mutator) {
642 WeakRefFieldVisitor weakVisitor = GetWeakRefFieldVisitor();
643 VisitWeakMutatorRoot(weakVisitor, mutator);
644 RefFieldVisitor visitor = GetPrefowardRefFieldVisitor();
645 VisitMutatorPreforwardRoot(visitor, mutator);
646 // Request finalize callback in each vm-thread when gc finished.
647 mutator.SetFinalizeRequest();
648 };
649 STWParam stwParam{"final-mark"};
650 MutatorManager::Instance().FlipMutators(stwParam, remarkAndForwardGlobalRoot, &forwardMutatorRoot);
651 InvokeSharedNativePointerCallbacks();
652 GetGCStats().recordSTWTime(stwParam.GetElapsedNs());
653 AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer();
654 if (LIKELY_CC(allocBuffer != nullptr)) {
655 allocBuffer->ClearRegions();
656 }
657 }
658
Preforward()659 void ArkCollector::Preforward()
660 {
661 COMMON_PHASE_TIMER("Preforward");
662 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::Preforward[STW]", "");
663 TransitionToGCPhase(GCPhase::GC_PHASE_PRECOPY, true);
664
665 [[maybe_unused]] Taskpool *threadPool = GetThreadPool();
666 ASSERT_LOGF(threadPool != nullptr, "thread pool is null");
667
668 // copy and fix finalizer roots.
669 // Only one root task, no need to post task.
670 PreforwardStaticWeakRoots();
671 RefFieldVisitor visitor = GetPrefowardRefFieldVisitor();
672 VisitPreforwardRoots(visitor);
673 }
674
ConcurrentPreforward()675 void ArkCollector::ConcurrentPreforward()
676 {
677 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::ConcurrentPreforward", "");
678 PreforwardConcurrentRoots();
679 ProcessStringTable();
680 }
681
PrepareFix()682 void ArkCollector::PrepareFix()
683 {
684 if (Heap::GetHeap().GetGCReason() == GCReason::GC_REASON_YOUNG) {
685 // string table objects are always not in young space, skip it
686 return;
687 }
688
689 COMMON_PHASE_TIMER("PrepareFix");
690
691 // we cannot re-enter STW, check it first
692 if (!MutatorManager::Instance().WorldStopped()) {
693 STWParam prepareFixStwParam{"wgc-preparefix"};
694 ScopedStopTheWorld stw(prepareFixStwParam);
695 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PrepareFix[STW]", "");
696
697 #ifndef GC_STW_STRINGTABLE
698 auto *baseRuntime = BaseRuntime::GetInstance();
699 auto& stringTable = reinterpret_cast<BaseStringTableImpl&>(baseRuntime->GetStringTable());
700 stringTable.GetInternalTable()->GetCleaner()->CleanUp();
701 #endif
702
703 GetGCStats().recordSTWTime(prepareFixStwParam.GetElapsedNs());
704 } else {
705 #ifndef GC_STW_STRINGTABLE
706 auto *baseRuntime = BaseRuntime::GetInstance();
707 auto& stringTable = reinterpret_cast<BaseStringTableImpl&>(baseRuntime->GetStringTable());
708 stringTable.GetInternalTable()->GetCleaner()->CleanUp();
709 #endif
710 }
711 }
712
ParallelFixHeap()713 void ArkCollector::ParallelFixHeap()
714 {
715 auto& regionSpace = reinterpret_cast<RegionSpace&>(theAllocator_);
716 auto taskList = regionSpace.CollectFixTasks();
717 std::atomic<int> taskIter = 0;
718 std::function<FixHeapTask *()> getNextTask = [&taskIter, &taskList]() -> FixHeapTask* {
719 uint32_t idx = static_cast<uint32_t>(taskIter.fetch_add(1U, std::memory_order_relaxed));
720 if (idx < taskList.size()) {
721 return &taskList[idx];
722 }
723 return nullptr;
724 };
725
726 const uint32_t runningWorkers = GetGCThreadCount(true) - 1;
727 uint32_t parallelCount = runningWorkers + 1; // 1 :DaemonThread
728 FixHeapWorker::Result results[parallelCount];
729 {
730 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::FixHeap [Parallel]", "");
731 // Fix heap
732 TaskPackMonitor monitor(runningWorkers, runningWorkers);
733 for (uint32_t i = 1; i < parallelCount; ++i) {
734 GetThreadPool()->PostTask(std::make_unique<FixHeapWorker>(this, monitor, results[i], getNextTask));
735 }
736
737 FixHeapWorker gcWorker(this, monitor, results[0], getNextTask);
738 auto task = getNextTask();
739 while (task != nullptr) {
740 gcWorker.DispatchRegionFixTask(task);
741 task = getNextTask();
742 }
743 monitor.WaitAllFinished();
744 }
745
746 {
747 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::Post FixHeap Clear [Parallel]", "");
748 // Post clear task
749 TaskPackMonitor monitor(runningWorkers, runningWorkers);
750 for (uint32_t i = 1; i < parallelCount; ++i) {
751 GetThreadPool()->PostTask(std::make_unique<PostFixHeapWorker>(results[i], monitor));
752 }
753
754 PostFixHeapWorker gcWorker(results[0], monitor);
755 gcWorker.PostClearTask();
756 PostFixHeapWorker::CollectEmptyRegions();
757 monitor.WaitAllFinished();
758 }
759 }
760
FixHeap()761 void ArkCollector::FixHeap()
762 {
763 TransitionToGCPhase(GCPhase::GC_PHASE_FIX, true);
764 COMMON_PHASE_TIMER("FixHeap");
765 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::FixHeap", "");
766 ParallelFixHeap();
767
768 WVerify::VerifyAfterFix(*this);
769 }
770
DoGarbageCollection()771 void ArkCollector::DoGarbageCollection()
772 {
773 const bool isNotYoungGC = gcReason_ != GCReason::GC_REASON_YOUNG;
774 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::DoGarbageCollection", "");
775 if (gcMode_ == GCMode::STW) { // 2: stw-gc
776 #ifdef ENABLE_CMC_RB_DFX
777 WVerify::DisableReadBarrierDFX(*this);
778 #endif
779 STWParam stwParam{"stw-gc"};
780 {
781 ScopedStopTheWorld stw(stwParam);
782 auto collectedRoots = EnumRoots<EnumRootsPolicy::NO_STW_AND_NO_FLIP_MUTATOR>();
783 MarkingHeap(collectedRoots);
784 TransitionToGCPhase(GCPhase::GC_PHASE_FINAL_MARK, true);
785 Remark();
786 PostMarking();
787
788 Preforward();
789 ConcurrentPreforward();
790 // reclaim large objects should after preforward(may process weak ref) and
791 // before fix heap(may clear live bit)
792 if (isNotYoungGC) {
793 CollectLargeGarbage();
794 }
795 SweepThreadLocalJitFort();
796
797 CopyFromSpace();
798 WVerify::VerifyAfterForward(*this);
799
800 PrepareFix();
801 FixHeap();
802 if (isNotYoungGC) {
803 CollectPinnedGarbage();
804 }
805
806 TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true);
807
808 ClearAllGCInfo();
809 CollectSmallSpace();
810
811 #if defined(ENABLE_CMC_RB_DFX)
812 WVerify::EnableReadBarrierDFX(*this);
813 #endif
814 }
815 GetGCStats().recordSTWTime(stwParam.GetElapsedNs());
816 return;
817 } else if (gcMode_ == GCMode::CONCURRENT_MARK) { // 1: concurrent-mark
818 auto collectedRoots = EnumRoots<EnumRootsPolicy::STW_AND_NO_FLIP_MUTATOR>();
819 MarkingHeap(collectedRoots);
820 STWParam finalMarkStwParam{"final-mark"};
821 {
822 ScopedStopTheWorld stw(finalMarkStwParam, true, GCPhase::GC_PHASE_FINAL_MARK);
823 Remark();
824 PostMarking();
825 reinterpret_cast<RegionSpace&>(theAllocator_).PrepareForward();
826 Preforward();
827 }
828 GetGCStats().recordSTWTime(finalMarkStwParam.GetElapsedNs());
829 ConcurrentPreforward();
830 // reclaim large objects should after preforward(may process weak ref) and
831 // before fix heap(may clear live bit)
832 if (isNotYoungGC) {
833 CollectLargeGarbage();
834 }
835 SweepThreadLocalJitFort();
836
837 CopyFromSpace();
838 WVerify::VerifyAfterForward(*this);
839
840 PrepareFix();
841 FixHeap();
842 if (isNotYoungGC) {
843 CollectPinnedGarbage();
844 }
845
846 TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true);
847 ClearAllGCInfo();
848 CollectSmallSpace();
849 return;
850 }
851
852 auto collectedRoots = EnumRoots<EnumRootsPolicy::STW_AND_FLIP_MUTATOR>();
853 MarkingHeap(collectedRoots);
854 PreforwardFlip();
855 ConcurrentPreforward();
856 // reclaim large objects should after preforward(may process weak ref)
857 // and before fix heap(may clear live bit)
858 if (isNotYoungGC) {
859 CollectLargeGarbage();
860 }
861 SweepThreadLocalJitFort();
862
863 CopyFromSpace();
864 WVerify::VerifyAfterForward(*this);
865
866 PrepareFix();
867 FixHeap();
868 if (isNotYoungGC) {
869 CollectPinnedGarbage();
870 }
871
872 TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true);
873 ClearAllGCInfo();
874 RegionSpace &space = reinterpret_cast<RegionSpace &>(theAllocator_);
875 space.DumpAllRegionSummary("Peak GC log");
876 space.DumpAllRegionStats("region statistics when gc ends");
877 CollectSmallSpace();
878 }
879
EnumRootsFlip(STWParam & param,const common::RefFieldVisitor & visitor)880 CArrayList<CArrayList<BaseObject *>> ArkCollector::EnumRootsFlip(STWParam& param,
881 const common::RefFieldVisitor &visitor)
882 {
883 const auto enumGlobalRoots = [this, &visitor]() {
884 SetGCThreadQosPriority(common::PriorityMode::STW);
885 EnumRootsImpl<VisitGlobalRoots>(visitor);
886 SetGCThreadQosPriority(common::PriorityMode::FOREGROUND);
887 };
888
889 std::mutex stackMutex;
890 CArrayList<CArrayList<BaseObject *>> rootSet; // allcate for each mutator
891 FlipFunction enumMutatorRoot = [&rootSet, &stackMutex](Mutator &mutator) {
892 CArrayList<BaseObject *> roots;
893 RefFieldVisitor localVisitor = [&roots](RefField<> &root) { roots.emplace_back(root.GetTargetObject()); };
894 VisitMutatorRoot(localVisitor, mutator);
895 std::lock_guard<std::mutex> lockGuard(stackMutex);
896 rootSet.emplace_back(std::move(roots));
897 };
898 MutatorManager::Instance().FlipMutators(param, enumGlobalRoots, &enumMutatorRoot);
899 return rootSet;
900 }
901
ProcessStringTable()902 void ArkCollector::ProcessStringTable()
903 {
904 #ifdef GC_STW_STRINGTABLE
905 return;
906 #endif
907 if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) {
908 // no need to fix weak ref in young gc
909 return;
910 }
911
912 WeakRefFieldVisitor weakVisitor = [this](RefField<> &refField) -> bool {
913 auto isSurvivor = [this](BaseObject* oldObj) {
914 RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast<uintptr_t>(oldObj));
915 return (gcReason_ == GC_REASON_YOUNG && !region->IsInYoungSpace())
916 || region->IsMarkedObject(oldObj)
917 || region->IsNewObjectSinceMarking(oldObj)
918 || region->IsToRegion();
919 };
920
921 RefField<> oldField(refField);
922 BaseObject *oldObj = oldField.GetTargetObject();
923 if (oldObj == nullptr) {
924 return false;
925 }
926 if (!isSurvivor(oldObj)) {
927 // CAS failure means some mutator or gc thread writes a new ref (must be
928 // a to-object), no need to retry.
929 RefField<> newField(nullptr);
930 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
931 DLOG(FIX, "fix weak raw-ref @%p: %p -> %p", &refField, oldObj, nullptr);
932 }
933 return false;
934 }
935 DLOG(FIX, "visit weak raw-ref @%p: %p", &refField, oldObj);
936 if (IsFromObject(oldObj)) {
937 BaseObject *toVersion = TryForwardObject(oldObj);
938 CHECK_CC(toVersion != nullptr);
939 RefField<> newField(toVersion);
940 // CAS failure means some mutator or gc thread writes a new ref (must be
941 // a to-object), no need to retry.
942 if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) {
943 DLOG(FIX, "fix weak raw-ref @%p: %p -> %p", &refField, oldObj, toVersion);
944 }
945 }
946 return true;
947 };
948 auto* baseRuntime = BaseRuntime::GetInstance();
949 auto& stringTable = reinterpret_cast<BaseStringTableImpl&>(baseRuntime->GetStringTable());
950 auto stringTableCleaner = stringTable.GetInternalTable()->GetCleaner();
951 stringTableCleaner->PostSweepWeakRefTask(weakVisitor);
952 stringTableCleaner->JoinAndWaitSweepWeakRefTask(weakVisitor);
953 }
954
955
ProcessFinalizers()956 void ArkCollector::ProcessFinalizers()
957 {
958 std::function<bool(BaseObject*)> finalizable = [this](BaseObject* obj) { return !IsMarkedObject(obj); };
959 FinalizerProcessor& fp = collectorResources_.GetFinalizerProcessor();
960 fp.EnqueueFinalizables(finalizable, snapshotFinalizerNum_);
961 fp.Notify();
962 }
963
ForwardObject(BaseObject * obj)964 BaseObject* ArkCollector::ForwardObject(BaseObject* obj)
965 {
966 BaseObject* to = TryForwardObject(obj);
967 if (to != nullptr) {
968 HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast<uintptr_t>(obj),
969 reinterpret_cast<uintptr_t>(to),
970 to->GetSize());
971 }
972 return (to != nullptr) ? to : obj;
973 }
974
TryForwardObject(BaseObject * obj)975 BaseObject* ArkCollector::TryForwardObject(BaseObject* obj)
976 {
977 return CopyObjectImpl(obj);
978 }
979
980 // ConcurrentGC
CopyObjectImpl(BaseObject * obj)981 BaseObject* ArkCollector::CopyObjectImpl(BaseObject* obj)
982 {
983 // reconsider phase difference between mutator and GC thread during transition.
984 if (IsGcThread()) {
985 CHECK_CC(GetGCPhase() == GCPhase::GC_PHASE_PRECOPY || GetGCPhase() == GCPhase::GC_PHASE_COPY ||
986 GetGCPhase() == GCPhase::GC_PHASE_FIX || GetGCPhase() == GCPhase::GC_PHASE_FINAL_MARK);
987 } else {
988 auto phase = Mutator::GetMutator()->GetMutatorPhase();
989 CHECK_CC(phase == GCPhase::GC_PHASE_PRECOPY || phase == GCPhase::GC_PHASE_COPY ||
990 phase == GCPhase::GC_PHASE_FIX);
991 }
992
993 do {
994 BaseStateWord oldWord = obj->GetBaseStateWord();
995
996 // 1. object has already been forwarded
997 if (obj->IsForwarded()) {
998 auto toObj = GetForwardingPointer(obj);
999 DLOG(COPY, "skip forwarded obj %p -> %p<%p>(%zu)", obj, toObj, toObj->GetTypeInfo(), toObj->GetSize());
1000 return toObj;
1001 }
1002
1003 // ConcurrentGC
1004 // 2. object is being forwarded, spin until it is forwarded (or gets its own forwarded address)
1005 if (oldWord.IsForwarding()) {
1006 sched_yield();
1007 continue;
1008 }
1009
1010 // 3. hope we can copy this object
1011 if (obj->TryLockExclusive(oldWord)) {
1012 return CopyObjectAfterExclusive(obj);
1013 }
1014 } while (true);
1015 LOG_COMMON(FATAL) << "forwardObject exit in wrong path";
1016 UNREACHABLE_CC();
1017 return nullptr;
1018 }
1019
CopyObjectAfterExclusive(BaseObject * obj)1020 BaseObject* ArkCollector::CopyObjectAfterExclusive(BaseObject* obj)
1021 {
1022 size_t size = RegionSpace::GetAllocSize(*obj);
1023 // 8: size of free object, but free object can not be copied.
1024 if (size == 8) {
1025 LOG_COMMON(FATAL) << "forward free obj: " << obj <<
1026 "is survived: " << (IsSurvivedObject(obj) ? "true" : "false");
1027 }
1028 BaseObject* toObj = fwdTable_.RouteObject(obj, size);
1029 if (toObj == nullptr) {
1030 // ConcurrentGC
1031 obj->UnlockExclusive(BaseStateWord::ForwardState::NORMAL);
1032 return toObj;
1033 }
1034 DLOG(COPY, "copy obj %p<%p>(%zu) to %p", obj, obj->GetTypeInfo(), size, toObj);
1035 CopyObject(*obj, *toObj, size);
1036
1037 ASSERT_LOGF(IsToObject(toObj), "Copy object to invalid region");
1038 toObj->SetForwardState(BaseStateWord::ForwardState::NORMAL);
1039
1040 std::atomic_thread_fence(std::memory_order_release);
1041 obj->SetSizeForwarded(size);
1042 // Avoid seeing the fwd pointer before observing the size modification
1043 // when calling GetSize during the CopyPhase.
1044 std::atomic_thread_fence(std::memory_order_release);
1045 obj->SetForwardingPointerAfterExclusive(toObj);
1046 return toObj;
1047 }
1048
ClearAllGCInfo()1049 void ArkCollector::ClearAllGCInfo()
1050 {
1051 COMMON_PHASE_TIMER("ClearAllGCInfo");
1052 RegionSpace& space = reinterpret_cast<RegionSpace&>(theAllocator_);
1053 space.ClearAllGCInfo();
1054 reinterpret_cast<RegionSpace&>(theAllocator_).ClearJitFortAwaitingMark();
1055 }
1056
CollectSmallSpace()1057 void ArkCollector::CollectSmallSpace()
1058 {
1059 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CollectSmallSpace", "");
1060 GCStats& stats = GetGCStats();
1061 RegionSpace& space = reinterpret_cast<RegionSpace&>(theAllocator_);
1062 {
1063 COMMON_PHASE_TIMER("CollectFromSpaceGarbage");
1064 stats.collectedBytes += stats.smallGarbageSize;
1065 if (gcReason_ == GC_REASON_APPSPAWN) {
1066 VLOG(DEBUG, "APPSPAWN GC Collect");
1067 space.CollectAppSpawnSpaceGarbage();
1068 } else {
1069 space.CollectFromSpaceGarbage();
1070 space.HandlePromotion();
1071 }
1072 }
1073
1074 size_t candidateBytes = stats.fromSpaceSize + stats.pinnedSpaceSize + stats.largeSpaceSize;
1075 stats.garbageRatio = (candidateBytes > 0) ? static_cast<float>(stats.collectedBytes) / candidateBytes : 0;
1076
1077 stats.liveBytesAfterGC = space.GetAllocatedBytes();
1078
1079 VLOG(INFO,
1080 "collect %zu B: old small %zu - %zu B, old pinned %zu - %zu B, old large %zu - %zu B. garbage ratio %.2f%%",
1081 stats.collectedBytes, stats.fromSpaceSize, stats.smallGarbageSize, stats.pinnedSpaceSize,
1082 stats.pinnedGarbageSize, stats.largeSpaceSize, stats.largeGarbageSize,
1083 stats.garbageRatio * 100); // The base of the percentage is 100
1084 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CollectSmallSpace END", (
1085 "collect:" + std::to_string(stats.collectedBytes) +
1086 "B;old small:" + std::to_string(stats.fromSpaceSize) +
1087 "-" + std::to_string(stats.smallGarbageSize) +
1088 "B;old pinned:" + std::to_string(stats.pinnedSpaceSize) +
1089 "-" + std::to_string(stats.pinnedGarbageSize) +
1090 "B;old large:" + std::to_string(stats.largeSpaceSize) +
1091 "-" + std::to_string(stats.largeGarbageSize) +
1092 "B;garbage ratio:" + std::to_string(stats.garbageRatio)
1093 ).c_str());
1094
1095 collectorResources_.GetFinalizerProcessor().NotifyToReclaimGarbage();
1096 }
1097
SetGCThreadQosPriority(common::PriorityMode mode)1098 void ArkCollector::SetGCThreadQosPriority(common::PriorityMode mode)
1099 {
1100 #ifdef ENABLE_QOS
1101 LOG_COMMON(DEBUG) << "SetGCThreadQosPriority gettid " << gettid();
1102 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::SetGCThreadQosPriority", "");
1103 switch (mode) {
1104 case PriorityMode::STW: {
1105 OHOS::QOS::SetQosForOtherThread(OHOS::QOS::QosLevel::QOS_USER_INTERACTIVE, gettid());
1106 break;
1107 }
1108 case PriorityMode::FOREGROUND: {
1109 OHOS::QOS::SetQosForOtherThread(OHOS::QOS::QosLevel::QOS_USER_INITIATED, gettid());
1110 break;
1111 }
1112 case PriorityMode::BACKGROUND: {
1113 OHOS::QOS::ResetQosForOtherThread(gettid());
1114 break;
1115 }
1116 default:
1117 UNREACHABLE();
1118 break;
1119 }
1120 common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(mode);
1121 #endif
1122 }
1123
ShouldIgnoreRequest(GCRequest & request)1124 bool ArkCollector::ShouldIgnoreRequest(GCRequest& request) { return request.ShouldBeIgnored(); }
1125 } // namespace common
1126