1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/parallel_evacuator-inl.h"
17
18 #include "ecmascript/mem/parallel_evacuator_visitor-inl.h"
19 #include "ecmascript/mem/tlab_allocator-inl.h"
20 #include "ecmascript/mem/work_manager-inl.h"
21 #include "ecmascript/runtime_call_id.h"
22
23 namespace panda::ecmascript {
Initialize()24 void ParallelEvacuator::Initialize()
25 {
26 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorInitialize);
27 waterLine_ = heap_->GetNewSpace()->GetWaterLine();
28 heap_->SwapNewSpace();
29 allocator_ = new TlabAllocator(heap_);
30 promotedSize_ = 0;
31 hasNewToOldRegions_ = false;
32 }
33
Finalize()34 void ParallelEvacuator::Finalize()
35 {
36 TRACE_GC(GCStats::Scope::ScopeId::Finalize, heap_->GetEcmaVM()->GetEcmaGCStats());
37 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::Finalize");
38 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorFinalize);
39 delete allocator_;
40 evacuateWorkloadSet_.Clear();
41 updateWorkloadSet_.Clear();
42 }
43
Evacuate()44 void ParallelEvacuator::Evacuate()
45 {
46 Initialize();
47 EvacuateSpace();
48 UpdateReference();
49 SweepNewToOldRegions();
50 Finalize();
51 }
52
SweepNewToOldRegions()53 void ParallelEvacuator::SweepNewToOldRegions()
54 {
55 TRACE_GC(GCStats::Scope::ScopeId::SweepNewToOldRegions, heap_->GetEcmaVM()->GetEcmaGCStats());
56 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::SweepNewToOldRegions");
57 if (!hasNewToOldRegions_) {
58 return;
59 }
60 heap_->GetSweeper()->SweepNewToOldRegions();
61 if (!heap_->IsConcurrentFullMark()) {
62 heap_->GetSweeper()->PostTask();
63 }
64 }
65
UpdateTrackInfo()66 void ParallelEvacuator::UpdateTrackInfo()
67 {
68 for (uint32_t i = 0; i <= MAX_TASKPOOL_THREAD_NUM; i++) {
69 auto &trackInfoSet = ArrayTrackInfoSet(i);
70 for (auto &each : trackInfoSet) {
71 auto trackInfoVal = JSTaggedValue(each);
72 if (!trackInfoVal.IsHeapObject() || !trackInfoVal.IsWeak()) {
73 continue;
74 }
75 auto trackInfo = trackInfoVal.GetWeakReferentUnChecked();
76 trackInfo = UpdateAddressAfterEvacation(trackInfo);
77 if (trackInfo) {
78 heap_->GetEcmaVM()->GetPGOProfiler()->UpdateTrackSpaceFlag(trackInfo, RegionSpaceFlag::IN_OLD_SPACE);
79 }
80 }
81 trackInfoSet.clear();
82 }
83 }
84
ProcessFromSpaceEvacuation()85 void ParallelEvacuator::ProcessFromSpaceEvacuation()
86 {
87 std::vector<std::pair<size_t, Region*>> sortRegion;
88 sortRegion.reserve(heap_->GetFromSpaceDuringEvacuation()->GetRegionCount());
89 heap_->GetFromSpaceDuringEvacuation()->EnumerateRegions([this, &sortRegion](Region *current) {
90 if (current->IsFreshRegion() && TryWholeRegionEvacuate(current, RegionEvacuateType::REGION_NEW_TO_NEW)) {
91 return;
92 }
93 sortRegion.emplace_back(current->AliveObject(), current);
94 });
95 std::sort(sortRegion.begin(), sortRegion.end());
96 for (auto iter = sortRegion.rbegin(); iter != sortRegion.rend(); iter++) {
97 Region *region = iter->second;
98 auto type = SelectRegionEvacuateType(region);
99 if (TryWholeRegionEvacuate(region, type)) {
100 continue;
101 }
102 evacuateWorkloadSet_.Add(std::make_unique<EvacuateWorkload>(this, region));
103 }
104 }
105
EvacuateSpace()106 void ParallelEvacuator::EvacuateSpace()
107 {
108 TRACE_GC(GCStats::Scope::ScopeId::EvacuateSpace, heap_->GetEcmaVM()->GetEcmaGCStats());
109 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::EvacuateSpace");
110 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuator);
111 auto &workloadSet = evacuateWorkloadSet_;
112 if (heap_->IsConcurrentFullMark() || heap_->IsYoungMark()) {
113 ProcessFromSpaceEvacuation();
114 heap_->GetOldSpace()->EnumerateCollectRegionSet([this, &workloadSet](Region *current) {
115 workloadSet.Add(std::make_unique<EvacuateWorkload>(this, current));
116 });
117 }
118 workloadSet.PrepareWorkloads();
119 if (heap_->IsParallelGCEnabled()) {
120 LockHolder holder(mutex_);
121 parallel_ = CalculateEvacuationThreadNum();
122 ASSERT(parallel_ >= 0);
123 evacuateTaskNum_ = static_cast<uint32_t>(parallel_);
124 for (uint32_t i = 1; i <= evacuateTaskNum_; i++) {
125 Taskpool::GetCurrentTaskpool()->PostTask(
126 std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), i, this));
127 }
128 } else {
129 evacuateTaskNum_ = 0;
130 }
131 {
132 GCStats::Scope sp2(GCStats::Scope::ScopeId::EvacuateRegion, heap_->GetEcmaVM()->GetEcmaGCStats());
133 EvacuateSpace(allocator_, MAIN_THREAD_INDEX, 0, true);
134 }
135
136 {
137 GCStats::Scope sp2(GCStats::Scope::ScopeId::WaitFinish, heap_->GetEcmaVM()->GetEcmaGCStats());
138 WaitFinished();
139 }
140 }
141
EvacuateSpace(TlabAllocator * allocator,uint32_t threadIndex,uint32_t idOrder,bool isMain)142 bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, uint32_t idOrder, bool isMain)
143 {
144 UpdateRecordWeakReferenceInParallel(idOrder);
145
146 auto &arrayTrackInfoSet = ArrayTrackInfoSet(threadIndex);
147 DrainWorkloads(evacuateWorkloadSet_, [&](std::unique_ptr<Workload> ®ion) {
148 EvacuateRegion(allocator, region->GetRegion(), arrayTrackInfoSet);
149 });
150 allocator->Finalize();
151 if (!isMain) {
152 LockHolder holder(mutex_);
153 if (--parallel_ <= 0) {
154 condition_.SignalAll();
155 }
156 }
157 return true;
158 }
159
UpdateRecordWeakReferenceInParallel(uint32_t idOrder)160 void ParallelEvacuator::UpdateRecordWeakReferenceInParallel(uint32_t idOrder)
161 {
162 auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
163 for (uint32_t i = idOrder; i < totalThreadCount; i += (evacuateTaskNum_ + 1)) {
164 ProcessQueue *queue = heap_->GetWorkManager()->GetWorkNodeHolder(i)->GetWeakReferenceQueue();
165 while (true) {
166 auto obj = queue->PopBack();
167 if (UNLIKELY(obj == nullptr)) {
168 break;
169 }
170 ObjectSlot slot(ToUintPtr(obj));
171 JSTaggedType value = slot.GetTaggedType();
172 if (JSTaggedValue(value).IsWeak()) {
173 ASSERT(heap_->IsConcurrentFullMark());
174 Region *objectRegion = Region::ObjectAddressToRange(value);
175 if (!objectRegion->InYoungSpaceOrCSet() && !objectRegion->InSharedHeap() &&
176 (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(value))) {
177 slot.Clear();
178 }
179 }
180 }
181 }
182 }
183
EvacuateRegion(TlabAllocator * allocator,Region * region,std::unordered_set<JSTaggedType> & trackSet)184 void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region,
185 std::unordered_set<JSTaggedType> &trackSet)
186 {
187 bool isInOldGen = region->InOldSpace();
188 bool isBelowAgeMark = region->BelowAgeMark();
189 bool pgoEnabled = heap_->GetJSThread()->IsPGOProfilerEnable();
190 bool inHeapProfiler = heap_->InHeapProfiler();
191 size_t promotedSize = 0;
192 region->IterateAllMarkedBits([this, ®ion, &isInOldGen, &isBelowAgeMark, &pgoEnabled,
193 &promotedSize, &allocator, &trackSet, inHeapProfiler](void *mem) {
194 ASSERT(region->InRange(ToUintPtr(mem)));
195 auto header = reinterpret_cast<TaggedObject *>(mem);
196 auto klass = header->GetClass();
197 auto size = klass->SizeFromJSHClass(header);
198
199 uintptr_t address = 0;
200 bool actualPromoted = false;
201 bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
202 if (hasAgeMark) {
203 address = allocator->Allocate(size, OLD_SPACE);
204 actualPromoted = true;
205 promotedSize += size;
206 } else if (isInOldGen) {
207 address = allocator->Allocate(size, OLD_SPACE);
208 actualPromoted = true;
209 } else {
210 address = allocator->Allocate(size, SEMI_SPACE);
211 if (address == 0) {
212 address = allocator->Allocate(size, OLD_SPACE);
213 actualPromoted = true;
214 promotedSize += size;
215 }
216 }
217 ASSERT(address != 0);
218
219 if (memcpy_s(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size) != EOK) { // LOCV_EXCL_BR_LINE
220 LOG_FULL(FATAL) << "memcpy_s failed";
221 }
222 if (inHeapProfiler) {
223 heap_->OnMoveEvent(reinterpret_cast<uintptr_t>(mem), reinterpret_cast<TaggedObject *>(address), size);
224 }
225 if (pgoEnabled) {
226 if (actualPromoted && klass->IsJSArray()) {
227 auto trackInfo = JSArray::Cast(header)->GetTrackInfo();
228 trackSet.emplace(trackInfo.GetRawData());
229 }
230 }
231 Barriers::SetPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
232
233 if (actualPromoted) {
234 SetObjectFieldRSet(reinterpret_cast<TaggedObject *>(address), klass);
235 }
236 });
237 promotedSize_.fetch_add(promotedSize);
238 }
239
UpdateReference()240 void ParallelEvacuator::UpdateReference()
241 {
242 TRACE_GC(GCStats::Scope::ScopeId::UpdateReference, heap_->GetEcmaVM()->GetEcmaGCStats());
243 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
244 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateReference");
245 // Update reference pointers
246 uint32_t youngeRegionMoveCount = 0;
247 uint32_t youngeRegionCopyCount = 0;
248 uint32_t oldRegionCount = 0;
249 auto &workloadSet = updateWorkloadSet_;
250 heap_->GetNewSpace()->EnumerateRegions([&](Region *current) {
251 if (current->InNewToNewSet()) {
252 workloadSet.Add(
253 std::make_unique<UpdateAndSweepNewRegionWorkload>(
254 this, current, heap_->IsYoungMark()));
255 youngeRegionMoveCount++;
256 } else {
257 workloadSet.Add(
258 std::make_unique<UpdateNewRegionWorkload>(this, current, heap_->IsYoungMark()));
259 youngeRegionCopyCount++;
260 }
261 });
262 heap_->EnumerateOldSpaceRegions([this, &oldRegionCount, &workloadSet](Region *current) {
263 if (current->InCollectSet()) {
264 return;
265 }
266 if (current->InNewToOldSet()) {
267 hasNewToOldRegions_ = true;
268 promotedSize_.fetch_add(current->AliveObject(), std::memory_order_relaxed);
269 workloadSet.Add(std::make_unique<UpdateNewToOldEvacuationWorkload>(this, current, heap_->IsYoungMark()));
270 } else {
271 workloadSet.Add(std::make_unique<UpdateRSetWorkload>(this, current));
272 }
273 oldRegionCount++;
274 });
275 heap_->EnumerateSnapshotSpaceRegions([this, &workloadSet](Region *current) {
276 workloadSet.Add(std::make_unique<UpdateRSetWorkload>(this, current));
277 });
278 workloadSet.PrepareWorkloads();
279 LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
280 << youngeRegionMoveCount
281 << "younge space region compact coping count:" << youngeRegionCopyCount
282 << "old space region count:" << oldRegionCount;
283
284 if (heap_->IsParallelGCEnabled()) {
285 LockHolder holder(mutex_);
286 parallel_ = CalculateUpdateThreadNum();
287 for (int i = 0; i < parallel_; i++) {
288 Taskpool::GetCurrentTaskpool()->PostTask(
289 std::make_unique<UpdateReferenceTask>(heap_->GetJSThread()->GetThreadId(), this));
290 }
291 }
292 {
293 GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateRoot, heap_->GetEcmaVM()->GetEcmaGCStats());
294 UpdateRoot();
295 }
296
297 {
298 GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateWeekRef, heap_->GetEcmaVM()->GetEcmaGCStats());
299 if (heap_->IsYoungMark()) {
300 UpdateWeakReferenceOpt<TriggerGCType::YOUNG_GC>();
301 } else {
302 UpdateWeakReferenceOpt<TriggerGCType::OLD_GC>();
303 }
304 }
305 {
306 GCStats::Scope sp2(GCStats::Scope::ScopeId::ProceeWorkload, heap_->GetEcmaVM()->GetEcmaGCStats());
307 ProcessWorkloads(true);
308 }
309 WaitFinished();
310
311 if (heap_->GetJSThread()->IsPGOProfilerEnable()) {
312 UpdateTrackInfo();
313 }
314 }
315
UpdateRoot()316 void ParallelEvacuator::UpdateRoot()
317 {
318 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
319 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateRoot");
320
321 ObjectXRay::VisitVMRoots(heap_->GetEcmaVM(), updateRootVisitor_, VMRootVisitType::UPDATE_ROOT);
322 }
323
324 template<TriggerGCType gcType>
UpdateWeakReferenceOpt()325 void ParallelEvacuator::UpdateWeakReferenceOpt()
326 {
327 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReferenceOpt);
328 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReferenceOpt");
329 WeakRootVisitor gcUpdateWeak = [](TaggedObject *header) -> TaggedObject* {
330 Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
331 ASSERT(objectRegion != nullptr);
332 if constexpr (gcType == TriggerGCType::YOUNG_GC) {
333 if (!objectRegion->InYoungSpace()) {
334 if (objectRegion->InNewToOldSet() && !objectRegion->Test(header)) {
335 return nullptr;
336 }
337 return header;
338 }
339 } else if constexpr (gcType == TriggerGCType::OLD_GC) {
340 if (!objectRegion->InYoungSpaceOrCSet()) {
341 if (!objectRegion->InSharedHeap() && (objectRegion->GetMarkGCBitset() == nullptr ||
342 !objectRegion->Test(header))) {
343 return nullptr;
344 }
345 return header;
346 }
347 } else { // LOCV_EXCL_BR_LINE
348 LOG_GC(FATAL) << "WeakRootVisitor: not support gcType yet";
349 UNREACHABLE();
350 }
351 if (objectRegion->InNewToNewSet()) {
352 if (objectRegion->Test(header)) {
353 return header;
354 }
355 } else {
356 MarkWord markWord(header);
357 if (markWord.IsForwardingAddress()) {
358 return markWord.ToForwardingAddress();
359 }
360 }
361 return nullptr;
362 };
363
364 heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
365 heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
366 heap_->GetEcmaVM()->GetJSThread()->UpdateJitCodeMapReference(gcUpdateWeak);
367 }
368
UpdateRSet(Region * region)369 void ParallelEvacuator::UpdateRSet(Region *region)
370 {
371 auto cb = [this](void *mem) -> bool {
372 ObjectSlot slot(ToUintPtr(mem));
373 return UpdateOldToNewObjectSlot(slot);
374 };
375
376 if (heap_->GetSweeper()->IsSweeping()) {
377 if (region->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
378 // Region is safe while update remember set
379 region->MergeOldToNewRSetForCS();
380 region->MergeLocalToShareRSetForCS();
381 } else {
382 region->AtomicIterateAllSweepingRSetBits(cb);
383 }
384 }
385 region->IterateAllOldToNewBits(cb);
386 if (heap_->IsYoungMark()) {
387 return;
388 }
389 region->IterateAllCrossRegionBits([this](void *mem) {
390 ObjectSlot slot(ToUintPtr(mem));
391 UpdateCrossRegionObjectSlot(slot);
392 });
393 region->DeleteCrossRegionRSet();
394 }
395
396 template<TriggerGCType gcType, bool needUpdateLocalToShare>
UpdateNewRegionReference(Region * region)397 void ParallelEvacuator::UpdateNewRegionReference(Region *region)
398 {
399 UpdateNewObjectFieldVisitor<gcType, needUpdateLocalToShare> updateFieldVisitor(this);
400 Region *current = heap_->GetNewSpace()->GetCurrentRegion();
401 auto curPtr = region->GetBegin();
402 uintptr_t endPtr = 0;
403 if (region == current) {
404 auto top = heap_->GetNewSpace()->GetTop();
405 endPtr = curPtr + region->GetAllocatedBytes(top);
406 } else {
407 endPtr = curPtr + region->GetAllocatedBytes();
408 }
409
410 size_t objSize = 0;
411 while (curPtr < endPtr) {
412 auto freeObject = FreeObject::Cast(curPtr);
413 // If curPtr is freeObject, It must to mark unpoison first.
414 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(freeObject), TaggedObject::TaggedObjectSize());
415 if (!freeObject->IsFreeObject()) {
416 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
417 auto klass = obj->GetClass();
418 UpdateNewObjectField<gcType, needUpdateLocalToShare>(obj, klass, updateFieldVisitor);
419 objSize = klass->SizeFromJSHClass(obj);
420 } else {
421 freeObject->AsanUnPoisonFreeObject();
422 objSize = freeObject->Available();
423 freeObject->AsanPoisonFreeObject();
424 }
425 curPtr += objSize;
426 CHECK_OBJECT_SIZE(objSize);
427 }
428 CHECK_REGION_END(curPtr, endPtr);
429 }
430
431 template<TriggerGCType gcType, bool needUpdateLocalToShare>
UpdateAndSweepNewRegionReference(Region * region)432 void ParallelEvacuator::UpdateAndSweepNewRegionReference(Region *region)
433 {
434 UpdateNewObjectFieldVisitor<gcType, needUpdateLocalToShare> updateFieldVisitor(this);
435 uintptr_t freeStart = region->GetBegin();
436 uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
437 region->IterateAllMarkedBits([&](void *mem) {
438 ASSERT(region->InRange(ToUintPtr(mem)));
439 auto header = reinterpret_cast<TaggedObject *>(mem);
440 JSHClass *klass = header->GetClass();
441 UpdateNewObjectField<gcType, needUpdateLocalToShare>(header, klass, updateFieldVisitor);
442
443 uintptr_t freeEnd = ToUintPtr(mem);
444 if (freeStart != freeEnd) {
445 size_t freeSize = freeEnd - freeStart;
446 FreeObject::FillFreeObject(heap_, freeStart, freeSize);
447 region->ClearLocalToShareRSetInRange(freeStart, freeEnd);
448 }
449
450 freeStart = freeEnd + klass->SizeFromJSHClass(header);
451 });
452 CHECK_REGION_END(freeStart, freeEnd);
453 if (freeStart < freeEnd) {
454 FreeObject::FillFreeObject(heap_, freeStart, freeEnd - freeStart);
455 region->ClearLocalToShareRSetInRange(freeStart, freeEnd);
456 }
457 }
458
459 template <TriggerGCType gcType, bool needUpdateLocalToShare>
UpdateNewObjectFieldVisitor(ParallelEvacuator * evacuator)460 ParallelEvacuator::UpdateNewObjectFieldVisitor<gcType, needUpdateLocalToShare>::UpdateNewObjectFieldVisitor(
461 ParallelEvacuator *evacuator) : evacuator_(evacuator) {}
462
463 template <TriggerGCType gcType, bool needUpdateLocalToShare>
VisitObjectRangeImpl(TaggedObject * root,ObjectSlot start,ObjectSlot end,VisitObjectArea area)464 void ParallelEvacuator::UpdateNewObjectFieldVisitor<gcType, needUpdateLocalToShare>::VisitObjectRangeImpl(
465 TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area)
466 {
467 if (UNLIKELY(area == VisitObjectArea::IN_OBJECT)) {
468 JSHClass *hclass = root->GetClass();
469 ASSERT(!hclass->IsAllTaggedProp());
470 int index = 0;
471 TaggedObject *dst = hclass->GetLayout().GetTaggedObject();
472 LayoutInfo *layout = LayoutInfo::UncheckCast(dst);
473 ObjectSlot realEnd = start;
474 realEnd += layout->GetPropertiesCapacity();
475 end = end > realEnd ? realEnd : end;
476 for (ObjectSlot slot = start; slot < end; slot++) {
477 auto attr = layout->GetAttr(index++);
478 if (attr.IsTaggedRep()) {
479 evacuator_->UpdateNewObjectSlot<gcType, needUpdateLocalToShare>(slot);
480 }
481 }
482 return;
483 }
484 for (ObjectSlot slot = start; slot < end; slot++) {
485 evacuator_->UpdateNewObjectSlot<gcType, needUpdateLocalToShare>(slot);
486 }
487 }
488
489 template<TriggerGCType gcType, bool needUpdateLocalToShare>
UpdateNewObjectField(TaggedObject * object,JSHClass * cls,UpdateNewObjectFieldVisitor<gcType,needUpdateLocalToShare> & updateFieldVisitor)490 void ParallelEvacuator::UpdateNewObjectField(TaggedObject *object, JSHClass *cls,
491 UpdateNewObjectFieldVisitor<gcType, needUpdateLocalToShare> &updateFieldVisitor)
492 {
493 ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls, updateFieldVisitor);
494 }
495
496 template<TriggerGCType gcType>
UpdateNewToOldEvacuationReference(Region * region,uint32_t threadIndex)497 void ParallelEvacuator::UpdateNewToOldEvacuationReference(Region *region, uint32_t threadIndex)
498 {
499 std::unordered_set<JSTaggedType> *sets = &arrayTrackInfoSets_[threadIndex];
500 NewToOldEvacuationVisitor<gcType> visitor(heap_, sets, this);
501 region->IterateAllMarkedBits(visitor);
502 }
503
WaitFinished()504 void ParallelEvacuator::WaitFinished()
505 {
506 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::WaitFinished");
507 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
508 if (parallel_ > 0) {
509 LockHolder holder(mutex_);
510 while (parallel_ > 0) {
511 condition_.Wait(&mutex_);
512 }
513 }
514 }
515
ProcessWorkloads(bool isMain,uint32_t threadIndex)516 bool ParallelEvacuator::ProcessWorkloads(bool isMain, uint32_t threadIndex)
517 {
518 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::ProcessWorkloads");
519 DrainWorkloads(updateWorkloadSet_, [&](std::unique_ptr<Workload> ®ion) {
520 region->Process(isMain, threadIndex);
521 });
522 if (!isMain) {
523 LockHolder holder(mutex_);
524 if (--parallel_ <= 0) {
525 condition_.SignalAll();
526 }
527 }
528 return true;
529 }
530
531 template <typename WorkloadCallback>
DrainWorkloads(WorkloadSet & workloadSet,WorkloadCallback callback)532 void ParallelEvacuator::DrainWorkloads(WorkloadSet &workloadSet, WorkloadCallback callback)
533 {
534 std::unique_ptr<Workload> region;
535 while (workloadSet.HasRemaningWorkload()) {
536 std::optional<size_t> index = workloadSet.GetNextIndex();
537 if (!index.has_value()) {
538 return;
539 }
540 size_t count = workloadSet.GetWorkloadCount();
541 size_t finishedCount = 0;
542 for (size_t i = index.value(); i < count; i++) {
543 region = workloadSet.TryGetWorkload(i);
544 if (region == nullptr) {
545 break;
546 }
547 callback(region);
548 finishedCount++;
549 }
550 if (finishedCount && workloadSet.FetchSubAndCheckWorkloadCount(finishedCount)) {
551 return;
552 }
553 }
554 }
555
PrepareWorkloads()556 void ParallelEvacuator::WorkloadSet::PrepareWorkloads()
557 {
558 size_t size = workloads_.size();
559 remainingWorkloadNum_.store(size, std::memory_order_relaxed);
560 /*
561 Construct indexList_ containing starting indices for multi-threaded acquire workload.
562 The construction method starts with the interval [0, size] and recursively
563 selects midpoints as starting indices for subintervals.
564 The first starting index is 0 to ensure no workloads are missed.
565 */
566 indexList_.reserve(size);
567 indexList_.emplace_back(0);
568 std::vector<std::pair<size_t, size_t>> pairList{{0, size}};
569 pairList.reserve(size);
570 while (!pairList.empty()) {
571 auto [start, end] = pairList.back();
572 pairList.pop_back();
573 size_t mid = (start + end) >> 1;
574 indexList_.emplace_back(mid);
575 if (end - mid > 1U) {
576 pairList.emplace_back(mid, end);
577 }
578 if (mid - start > 1U) {
579 pairList.emplace_back(start, mid);
580 }
581 }
582 }
583
GetNextIndex()584 std::optional<size_t> ParallelEvacuator::WorkloadSet::GetNextIndex()
585 {
586 size_t cursor = indexCursor_.fetch_add(1, std::memory_order_relaxed);
587 if (cursor >= indexList_.size()) {
588 return std::nullopt;
589 }
590 return indexList_[cursor];
591 }
592
TryGetWorkload(size_t index)593 std::unique_ptr<ParallelEvacuator::Workload> ParallelEvacuator::WorkloadSet::TryGetWorkload(size_t index)
594 {
595 std::unique_ptr<Workload> workload;
596 if (workloads_.at(index).first.TryAcquire()) {
597 workload = std::move(workloads_[index].second);
598 }
599 return workload;
600 }
601
Clear()602 void ParallelEvacuator::WorkloadSet::Clear()
603 {
604 workloads_.clear();
605 indexList_.clear();
606 indexCursor_.store(0, std::memory_order_relaxed);
607 remainingWorkloadNum_.store(0, std::memory_order_relaxed);
608 }
609
EvacuationTask(int32_t id,uint32_t idOrder,ParallelEvacuator * evacuator)610 ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, uint32_t idOrder, ParallelEvacuator *evacuator)
611 : Task(id), idOrder_(idOrder), evacuator_(evacuator)
612 {
613 allocator_ = new TlabAllocator(evacuator->heap_);
614 }
615
~EvacuationTask()616 ParallelEvacuator::EvacuationTask::~EvacuationTask()
617 {
618 delete allocator_;
619 }
620
Run(uint32_t threadIndex)621 bool ParallelEvacuator::EvacuationTask::Run(uint32_t threadIndex)
622 {
623 return evacuator_->EvacuateSpace(allocator_, threadIndex, idOrder_);
624 }
625
Run(uint32_t threadIndex)626 bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
627 {
628 evacuator_->ProcessWorkloads(false, threadIndex);
629 return true;
630 }
631
Process(bool isMain,uint32_t threadIndex)632 bool ParallelEvacuator::EvacuateWorkload::Process([[maybe_unused]] bool isMain, [[maybe_unused]] uint32_t threadIndex)
633 {
634 return true;
635 }
636
Process(bool isMain,uint32_t threadIndex)637 bool ParallelEvacuator::UpdateRSetWorkload::Process([[maybe_unused]] bool isMain,
638 [[maybe_unused]] uint32_t threadIndex)
639 {
640 GetEvacuator()->UpdateRSet(GetRegion());
641 return true;
642 }
643
Process(bool isMain,uint32_t threadIndex)644 bool ParallelEvacuator::UpdateNewRegionWorkload::Process([[maybe_unused]] bool isMain,
645 [[maybe_unused]] uint32_t threadIndex)
646 {
647 if (isYoungGC_) {
648 GetEvacuator()->UpdateNewRegionReference<TriggerGCType::YOUNG_GC, true>(GetRegion());
649 } else {
650 GetEvacuator()->UpdateNewRegionReference<TriggerGCType::OLD_GC, true>(GetRegion());
651 }
652 return true;
653 }
654
Process(bool isMain,uint32_t threadIndex)655 bool ParallelEvacuator::UpdateAndSweepNewRegionWorkload::Process([[maybe_unused]] bool isMain,
656 [[maybe_unused]] uint32_t threadIndex)
657 {
658 if (isYoungGC_) {
659 GetEvacuator()->UpdateAndSweepNewRegionReference<TriggerGCType::YOUNG_GC, false>(GetRegion());
660 } else {
661 GetEvacuator()->UpdateAndSweepNewRegionReference<TriggerGCType::OLD_GC, false>(GetRegion());
662 }
663 return true;
664 }
665
Process(bool isMain,uint32_t threadIndex)666 bool ParallelEvacuator::UpdateNewToOldEvacuationWorkload::Process([[maybe_unused]] bool isMain, uint32_t threadIndex)
667 {
668 if (isYoungGC_) {
669 GetEvacuator()->UpdateNewToOldEvacuationReference<TriggerGCType::YOUNG_GC>(GetRegion(), threadIndex);
670 } else {
671 GetEvacuator()->UpdateNewToOldEvacuationReference<TriggerGCType::OLD_GC>(GetRegion(), threadIndex);
672 }
673 return true;
674 }
675 } // namespace panda::ecmascript
676