1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/parallel_evacuator-inl.h"
17
18 #include "ecmascript/mem/tlab_allocator-inl.h"
19 #include "ecmascript/runtime_call_id.h"
20
21 namespace panda::ecmascript {
Initialize()22 void ParallelEvacuator::Initialize()
23 {
24 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorInitialize);
25 waterLine_ = heap_->GetNewSpace()->GetWaterLine();
26 if (heap_->IsEdenMark()) {
27 heap_->ReleaseEdenAllocator();
28 } else {
29 ASSERT(heap_->IsYoungMark() || heap_->IsFullMark());
30 heap_->SwapNewSpace();
31 }
32 allocator_ = new TlabAllocator(heap_);
33 promotedSize_ = 0;
34 edenToYoungSize_ = 0;
35 }
36
Finalize()37 void ParallelEvacuator::Finalize()
38 {
39 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorFinalize);
40 delete allocator_;
41 }
42
Evacuate()43 void ParallelEvacuator::Evacuate()
44 {
45 Initialize();
46 EvacuateSpace();
47 UpdateReference();
48 Finalize();
49 }
50
UpdateTrackInfo()51 void ParallelEvacuator::UpdateTrackInfo()
52 {
53 for (uint32_t i = 0; i <= MAX_TASKPOOL_THREAD_NUM; i++) {
54 auto &trackInfoSet = ArrayTrackInfoSet(i);
55 for (auto &each : trackInfoSet) {
56 auto trackInfoVal = JSTaggedValue(each);
57 if (!trackInfoVal.IsHeapObject() || !trackInfoVal.IsWeak()) {
58 continue;
59 }
60 auto trackInfo = trackInfoVal.GetWeakReferentUnChecked();
61 trackInfo = UpdateAddressAfterEvacation(trackInfo);
62 if (trackInfo) {
63 heap_->GetEcmaVM()->GetPGOProfiler()->UpdateTrackSpaceFlag(trackInfo, RegionSpaceFlag::IN_OLD_SPACE);
64 }
65 }
66 trackInfoSet.clear();
67 }
68 }
69
EvacuateSpace()70 void ParallelEvacuator::EvacuateSpace()
71 {
72 TRACE_GC(GCStats::Scope::ScopeId::EvacuateSpace, heap_->GetEcmaVM()->GetEcmaGCStats());
73 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::EvacuateSpace");
74 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuator);
75 if (heap_->IsEdenMark()) {
76 heap_->GetEdenSpace()->EnumerateRegions([this] (Region *current) {
77 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
78 });
79 } else if (heap_->IsConcurrentFullMark() || heap_->IsYoungMark()) {
80 heap_->GetEdenSpace()->EnumerateRegions([this] (Region *current) {
81 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
82 });
83 heap_->GetFromSpaceDuringEvacuation()->EnumerateRegions([this] (Region *current) {
84 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
85 });
86 heap_->GetOldSpace()->EnumerateCollectRegionSet([this](Region *current) {
87 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
88 });
89 }
90 if (heap_->IsParallelGCEnabled()) {
91 LockHolder holder(mutex_);
92 parallel_ = CalculateEvacuationThreadNum();
93 for (int i = 0; i < parallel_; i++) {
94 Taskpool::GetCurrentTaskpool()->PostTask(
95 std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), this));
96 }
97 }
98 {
99 GCStats::Scope sp2(GCStats::Scope::ScopeId::EvacuateRegion, heap_->GetEcmaVM()->GetEcmaGCStats());
100 EvacuateSpace(allocator_, MAIN_THREAD_INDEX, true);
101 }
102
103 {
104 GCStats::Scope sp2(GCStats::Scope::ScopeId::WaitFinish, heap_->GetEcmaVM()->GetEcmaGCStats());
105 WaitFinished();
106 }
107
108 if (heap_->GetJSThread()->IsPGOProfilerEnable()) {
109 UpdateTrackInfo();
110 }
111 }
112
EvacuateSpace(TlabAllocator * allocator,uint32_t threadIndex,bool isMain)113 bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, bool isMain)
114 {
115 std::unique_ptr<Workload> region = GetWorkloadSafe();
116 auto &arrayTrackInfoSet = ArrayTrackInfoSet(threadIndex);
117 while (region != nullptr) {
118 EvacuateRegion(allocator, region->GetRegion(), arrayTrackInfoSet);
119 region = GetWorkloadSafe();
120 }
121 allocator->Finalize();
122 if (!isMain) {
123 LockHolder holder(mutex_);
124 if (--parallel_ <= 0) {
125 condition_.SignalAll();
126 }
127 }
128 return true;
129 }
130
EvacuateRegion(TlabAllocator * allocator,Region * region,std::unordered_set<JSTaggedType> & trackSet)131 void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region,
132 std::unordered_set<JSTaggedType> &trackSet)
133 {
134 bool isInEden = region->InEdenSpace();
135 bool isInOldGen = region->InOldSpace();
136 bool isBelowAgeMark = region->BelowAgeMark();
137 bool pgoEnabled = heap_->GetJSThread()->IsPGOProfilerEnable();
138 bool inHeapProfiler = heap_->InHeapProfiler();
139 size_t promotedSize = 0;
140 size_t edenToYoungSize = 0;
141 if (WholeRegionEvacuate(region)) {
142 return;
143 }
144 region->IterateAllMarkedBits([this, ®ion, &isInOldGen, &isBelowAgeMark, isInEden, &pgoEnabled,
145 &promotedSize, &allocator, &trackSet, &edenToYoungSize, inHeapProfiler](void *mem) {
146 ASSERT(region->InRange(ToUintPtr(mem)));
147 auto header = reinterpret_cast<TaggedObject *>(mem);
148 auto klass = header->GetClass();
149 auto size = klass->SizeFromJSHClass(header);
150
151 uintptr_t address = 0;
152 bool actualPromoted = false;
153 bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
154 if (hasAgeMark) {
155 address = allocator->Allocate(size, OLD_SPACE);
156 actualPromoted = true;
157 promotedSize += size;
158 } else if (isInOldGen) {
159 address = allocator->Allocate(size, OLD_SPACE);
160 actualPromoted = true;
161 } else {
162 address = allocator->Allocate(size, SEMI_SPACE);
163 if (address == 0) {
164 address = allocator->Allocate(size, OLD_SPACE);
165 actualPromoted = true;
166 promotedSize += size;
167 } else if (isInEden) {
168 edenToYoungSize += size;
169 }
170 }
171 LOG_ECMA_IF(address == 0, FATAL) << "Evacuate object failed:" << size;
172
173 if (memcpy_s(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size) != EOK) {
174 LOG_FULL(FATAL) << "memcpy_s failed";
175 }
176 if (inHeapProfiler) {
177 heap_->OnMoveEvent(reinterpret_cast<uintptr_t>(mem), reinterpret_cast<TaggedObject *>(address), size);
178 }
179 if (pgoEnabled) {
180 if (actualPromoted && klass->IsJSArray()) {
181 auto trackInfo = JSArray::Cast(header)->GetTrackInfo();
182 trackSet.emplace(trackInfo.GetRawData());
183 }
184 }
185 Barriers::SetPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
186
187 if (actualPromoted) {
188 SetObjectFieldRSet<false>(reinterpret_cast<TaggedObject *>(address), klass);
189 } else if (isInEden) {
190 SetObjectFieldRSet<true>(reinterpret_cast<TaggedObject *>(address), klass);
191 } else if (region->HasLocalToShareRememberedSet()) {
192 UpdateLocalToShareRSet(reinterpret_cast<TaggedObject *>(address), klass);
193 }
194 });
195 promotedSize_.fetch_add(promotedSize);
196 edenToYoungSize_.fetch_add(edenToYoungSize);
197 }
198
UpdateReference()199 void ParallelEvacuator::UpdateReference()
200 {
201 TRACE_GC(GCStats::Scope::ScopeId::UpdateReference, heap_->GetEcmaVM()->GetEcmaGCStats());
202 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
203 // Update reference pointers
204 uint32_t youngeRegionMoveCount = 0;
205 uint32_t youngeRegionCopyCount = 0;
206 uint32_t oldRegionCount = 0;
207 if (heap_->IsEdenMark()) {
208 heap_->GetNewSpace()->EnumerateRegions([&] ([[maybe_unused]] Region *current) {
209 AddWorkload(std::make_unique<UpdateNewToEdenRSetWorkload>(this, current));
210 });
211 } else {
212 heap_->GetNewSpace()->EnumerateRegions([&] (Region *current) {
213 if (current->InNewToNewSet()) {
214 AddWorkload(std::make_unique<UpdateAndSweepNewRegionWorkload>(this, current, heap_->IsYoungMark()));
215 youngeRegionMoveCount++;
216 } else {
217 AddWorkload(std::make_unique<UpdateNewRegionWorkload>(this, current, heap_->IsYoungMark()));
218 youngeRegionCopyCount++;
219 }
220 });
221 }
222 heap_->EnumerateOldSpaceRegions([this, &oldRegionCount] (Region *current) {
223 if (current->InCollectSet()) {
224 return;
225 }
226 AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current, heap_->IsEdenMark()));
227 oldRegionCount++;
228 });
229 heap_->EnumerateSnapshotSpaceRegions([this] (Region *current) {
230 AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current, heap_->IsEdenMark()));
231 });
232 LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
233 << youngeRegionMoveCount
234 << "younge space region compact coping count:" << youngeRegionCopyCount
235 << "old space region count:" << oldRegionCount;
236
237 if (heap_->IsParallelGCEnabled()) {
238 LockHolder holder(mutex_);
239 parallel_ = CalculateUpdateThreadNum();
240 for (int i = 0; i < parallel_; i++) {
241 Taskpool::GetCurrentTaskpool()->PostTask(
242 std::make_unique<UpdateReferenceTask>(heap_->GetJSThread()->GetThreadId(), this));
243 }
244 }
245 {
246 GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateRoot, heap_->GetEcmaVM()->GetEcmaGCStats());
247 UpdateRoot();
248 }
249
250 {
251 GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateWeekRef, heap_->GetEcmaVM()->GetEcmaGCStats());
252 if (heap_->IsEdenMark()) {
253 UpdateWeakReference();
254 } else if (heap_->IsYoungMark()) {
255 UpdateWeakReferenceOpt<TriggerGCType::YOUNG_GC>();
256 } else {
257 UpdateWeakReferenceOpt<TriggerGCType::OLD_GC>();
258 }
259 }
260 {
261 GCStats::Scope sp2(GCStats::Scope::ScopeId::ProceeWorkload, heap_->GetEcmaVM()->GetEcmaGCStats());\
262 ProcessWorkloads(true);
263 }
264 WaitFinished();
265 }
266
UpdateRoot()267 void ParallelEvacuator::UpdateRoot()
268 {
269 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
270 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateRoot");
271 RootVisitor gcUpdateYoung = [this]([[maybe_unused]] Root type, ObjectSlot slot) {
272 UpdateObjectSlot(slot);
273 };
274 RootRangeVisitor gcUpdateRangeYoung = [this]([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) {
275 for (ObjectSlot slot = start; slot < end; slot++) {
276 UpdateObjectSlot(slot);
277 }
278 };
279 RootBaseAndDerivedVisitor gcUpdateDerived =
280 []([[maybe_unused]] Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) {
281 if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
282 derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
283 }
284 };
285
286 ObjectXRay::VisitVMRoots(heap_->GetEcmaVM(), gcUpdateYoung, gcUpdateRangeYoung, gcUpdateDerived,
287 VMRootVisitType::UPDATE_ROOT);
288 }
289
UpdateRecordWeakReference()290 void ParallelEvacuator::UpdateRecordWeakReference()
291 {
292 auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
293 for (uint32_t i = 0; i < totalThreadCount; i++) {
294 ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
295
296 while (true) {
297 auto obj = queue->PopBack();
298 if (UNLIKELY(obj == nullptr)) {
299 break;
300 }
301 ObjectSlot slot(ToUintPtr(obj));
302 JSTaggedValue value(slot.GetTaggedType());
303 if (value.IsWeak()) {
304 UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
305 }
306 }
307 }
308 }
309
UpdateWeakReference()310 void ParallelEvacuator::UpdateWeakReference()
311 {
312 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
313 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReference");
314 UpdateRecordWeakReference();
315 bool isFullMark = heap_->IsConcurrentFullMark();
316 bool isEdenMark = heap_->IsEdenMark();
317 WeakRootVisitor gcUpdateWeak = [isFullMark, isEdenMark](TaggedObject *header) -> TaggedObject* {
318 Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
319 if (UNLIKELY(objectRegion == nullptr)) {
320 LOG_GC(ERROR) << "PartialGC updateWeakReference: region is nullptr, header is " << header;
321 return nullptr;
322 }
323 // The weak object in shared heap is always alive during partialGC.
324 if (objectRegion->InSharedHeap()) {
325 return header;
326 }
327 if (isEdenMark) {
328 if (!objectRegion->InEdenSpace()) {
329 return header;
330 }
331 MarkWord markWord(header);
332 if (markWord.IsForwardingAddress()) {
333 return markWord.ToForwardingAddress();
334 }
335 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
336 }
337 if (objectRegion->InGeneralNewSpaceOrCSet()) {
338 if (objectRegion->InNewToNewSet()) {
339 if (objectRegion->Test(header)) {
340 return header;
341 }
342 } else {
343 MarkWord markWord(header);
344 if (markWord.IsForwardingAddress()) {
345 return markWord.ToForwardingAddress();
346 }
347 }
348 return nullptr;
349 }
350 if (isFullMark) {
351 if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(header)) {
352 return nullptr;
353 }
354 }
355 return header;
356 };
357
358 heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
359 heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
360 heap_->GetEcmaVM()->GetJSThread()->UpdateJitCodeMapReference(gcUpdateWeak);
361 }
362
363 template<TriggerGCType gcType>
UpdateRecordWeakReferenceOpt()364 void ParallelEvacuator::UpdateRecordWeakReferenceOpt()
365 {
366 auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
367 for (uint32_t i = 0; i < totalThreadCount; i++) {
368 ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
369
370 while (true) {
371 auto obj = queue->PopBack();
372 if (UNLIKELY(obj == nullptr)) {
373 break;
374 }
375 ObjectSlot slot(ToUintPtr(obj));
376 JSTaggedValue value(slot.GetTaggedType());
377 if (value.IsHeapObject()) {
378 UpdateWeakObjectSlotOpt<gcType>(value, slot);
379 }
380 }
381 }
382 }
383
384 template<TriggerGCType gcType>
UpdateWeakReferenceOpt()385 void ParallelEvacuator::UpdateWeakReferenceOpt()
386 {
387 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
388 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReference");
389 UpdateRecordWeakReferenceOpt<gcType>();
390 WeakRootVisitor gcUpdateWeak = [](TaggedObject *header) -> TaggedObject* {
391 Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
392 ASSERT(objectRegion != nullptr);
393 if constexpr (gcType == TriggerGCType::YOUNG_GC) {
394 if (!objectRegion->InGeneralNewSpace()) {
395 return header;
396 }
397 } else if constexpr (gcType == TriggerGCType::OLD_GC) {
398 if (!objectRegion->InGeneralNewSpaceOrCSet()) {
399 if (!objectRegion->InSharedHeap() && (objectRegion->GetMarkGCBitset() == nullptr ||
400 !objectRegion->Test(header))) {
401 return nullptr;
402 }
403 return header;
404 }
405 } else {
406 LOG_GC(FATAL) << "WeakRootVisitor: not support gcType yet";
407 UNREACHABLE();
408 }
409 if (objectRegion->InNewToNewSet()) {
410 if (objectRegion->Test(header)) {
411 return header;
412 }
413 } else {
414 MarkWord markWord(header);
415 if (markWord.IsForwardingAddress()) {
416 return markWord.ToForwardingAddress();
417 }
418 }
419 return nullptr;
420 };
421
422 heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
423 heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
424 heap_->GetEcmaVM()->GetJSThread()->UpdateJitCodeMapReference(gcUpdateWeak);
425 }
426
427 template<bool IsEdenGC>
UpdateRSet(Region * region)428 void ParallelEvacuator::UpdateRSet(Region *region)
429 {
430 auto cb = [this](void *mem) -> bool {
431 ObjectSlot slot(ToUintPtr(mem));
432 return UpdateOldToNewObjectSlot<IsEdenGC>(slot);
433 };
434
435 if (heap_->GetSweeper()->IsSweeping()) {
436 if (region->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
437 // Region is safe while update remember set
438 region->MergeOldToNewRSetForCS();
439 region->MergeLocalToShareRSetForCS();
440 } else {
441 region->AtomicIterateAllSweepingRSetBits(cb);
442 }
443 }
444 region->IterateAllOldToNewBits(cb);
445 if (heap_->IsYoungMark()) {
446 region->DeleteCrossRegionRSet();
447 return;
448 }
449 if constexpr (IsEdenGC) {
450 region->IterateAllCrossRegionBits([this](void *mem) {
451 ObjectSlot slot(ToUintPtr(mem));
452 UpdateObjectSlot(slot);
453 });
454 } else {
455 region->IterateAllCrossRegionBits([this](void *mem) {
456 ObjectSlot slot(ToUintPtr(mem));
457 UpdateObjectSlotOpt<TriggerGCType::OLD_GC>(slot);
458 });
459 }
460 region->DeleteCrossRegionRSet();
461 }
462
UpdateNewToEdenRSetReference(Region * region)463 void ParallelEvacuator::UpdateNewToEdenRSetReference(Region *region)
464 {
465 auto cb = [this](void *mem) -> bool {
466 ObjectSlot slot(ToUintPtr(mem));
467 return UpdateNewToEdenObjectSlot(slot);
468 };
469 region->IterateAllNewToEdenBits(cb);
470 region->ClearNewToEdenRSet();
471 }
472
473 template<TriggerGCType gcType>
UpdateNewRegionReference(Region * region)474 void ParallelEvacuator::UpdateNewRegionReference(Region *region)
475 {
476 Region *current = heap_->GetNewSpace()->GetCurrentRegion();
477 auto curPtr = region->GetBegin();
478 uintptr_t endPtr = 0;
479 if (region == current) {
480 auto top = heap_->GetNewSpace()->GetTop();
481 endPtr = curPtr + region->GetAllocatedBytes(top);
482 } else {
483 endPtr = curPtr + region->GetAllocatedBytes();
484 }
485
486 size_t objSize = 0;
487 while (curPtr < endPtr) {
488 auto freeObject = FreeObject::Cast(curPtr);
489 // If curPtr is freeObject, It must to mark unpoison first.
490 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(freeObject), TaggedObject::TaggedObjectSize());
491 if (!freeObject->IsFreeObject()) {
492 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
493 auto klass = obj->GetClass();
494 UpdateNewObjectField<gcType>(obj, klass);
495 objSize = klass->SizeFromJSHClass(obj);
496 } else {
497 freeObject->AsanUnPoisonFreeObject();
498 objSize = freeObject->Available();
499 freeObject->AsanPoisonFreeObject();
500 }
501 curPtr += objSize;
502 CHECK_OBJECT_SIZE(objSize);
503 }
504 CHECK_REGION_END(curPtr, endPtr);
505 }
506
507 template<TriggerGCType gcType>
UpdateAndSweepNewRegionReference(Region * region)508 void ParallelEvacuator::UpdateAndSweepNewRegionReference(Region *region)
509 {
510 uintptr_t freeStart = region->GetBegin();
511 uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
512 region->IterateAllMarkedBits([&](void *mem) {
513 ASSERT(region->InRange(ToUintPtr(mem)));
514 auto header = reinterpret_cast<TaggedObject *>(mem);
515 JSHClass *klass = header->GetClass();
516 UpdateNewObjectField<gcType>(header, klass);
517
518 uintptr_t freeEnd = ToUintPtr(mem);
519 if (freeStart != freeEnd) {
520 size_t freeSize = freeEnd - freeStart;
521 FreeObject::FillFreeObject(heap_, freeStart, freeSize);
522 region->ClearLocalToShareRSetInRange(freeStart, freeEnd);
523 }
524
525 freeStart = freeEnd + klass->SizeFromJSHClass(header);
526 });
527 CHECK_REGION_END(freeStart, freeEnd);
528 if (freeStart < freeEnd) {
529 FreeObject::FillFreeObject(heap_, freeStart, freeEnd - freeStart);
530 region->ClearLocalToShareRSetInRange(freeStart, freeEnd);
531 }
532 }
533
534 template<TriggerGCType gcType>
UpdateNewObjectField(TaggedObject * object,JSHClass * cls)535 void ParallelEvacuator::UpdateNewObjectField(TaggedObject *object, JSHClass *cls)
536 {
537 ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls,
538 [this](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
539 if (area == VisitObjectArea::IN_OBJECT) {
540 if (VisitBodyInObj(root, start, end,
541 [&](ObjectSlot slot) { UpdateObjectSlotOpt<gcType>(slot); })) {
542 return;
543 };
544 }
545 for (ObjectSlot slot = start; slot < end; slot++) {
546 UpdateObjectSlotOpt<gcType>(slot);
547 }
548 });
549 }
550
WaitFinished()551 void ParallelEvacuator::WaitFinished()
552 {
553 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
554 if (parallel_ > 0) {
555 LockHolder holder(mutex_);
556 while (parallel_ > 0) {
557 condition_.Wait(&mutex_);
558 }
559 }
560 }
561
ProcessWorkloads(bool isMain)562 bool ParallelEvacuator::ProcessWorkloads(bool isMain)
563 {
564 std::unique_ptr<Workload> region = GetWorkloadSafe();
565 while (region != nullptr) {
566 region->Process(isMain);
567 region = GetWorkloadSafe();
568 }
569 if (!isMain) {
570 LockHolder holder(mutex_);
571 if (--parallel_ <= 0) {
572 condition_.SignalAll();
573 }
574 }
575 return true;
576 }
577
EvacuationTask(int32_t id,ParallelEvacuator * evacuator)578 ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, ParallelEvacuator *evacuator)
579 : Task(id), evacuator_(evacuator)
580 {
581 allocator_ = new TlabAllocator(evacuator->heap_);
582 }
583
~EvacuationTask()584 ParallelEvacuator::EvacuationTask::~EvacuationTask()
585 {
586 delete allocator_;
587 }
588
Run(uint32_t threadIndex)589 bool ParallelEvacuator::EvacuationTask::Run(uint32_t threadIndex)
590 {
591 return evacuator_->EvacuateSpace(allocator_, threadIndex);
592 }
593
Run(uint32_t threadIndex)594 bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
595 {
596 evacuator_->ProcessWorkloads(false);
597 return true;
598 }
599
Process(bool isMain)600 bool ParallelEvacuator::EvacuateWorkload::Process([[maybe_unused]] bool isMain)
601 {
602 return true;
603 }
604
Process(bool isMain)605 bool ParallelEvacuator::UpdateRSetWorkload::Process([[maybe_unused]] bool isMain)
606 {
607 if (isEdenGC_) {
608 GetEvacuator()->UpdateRSet<true>(GetRegion());
609 } else {
610 GetEvacuator()->UpdateRSet<false>(GetRegion());
611 }
612 return true;
613 }
614
Process(bool isMain)615 bool ParallelEvacuator::UpdateNewToEdenRSetWorkload::Process([[maybe_unused]] bool isMain)
616 {
617 GetEvacuator()->UpdateNewToEdenRSetReference(GetRegion());
618 return true;
619 }
620
621
Process(bool isMain)622 bool ParallelEvacuator::UpdateNewRegionWorkload::Process([[maybe_unused]] bool isMain)
623 {
624 if (isYoungGC_) {
625 GetEvacuator()->UpdateNewRegionReference<TriggerGCType::YOUNG_GC>(GetRegion());
626 } else {
627 GetEvacuator()->UpdateNewRegionReference<TriggerGCType::OLD_GC>(GetRegion());
628 }
629 return true;
630 }
631
Process(bool isMain)632 bool ParallelEvacuator::UpdateAndSweepNewRegionWorkload::Process([[maybe_unused]] bool isMain)
633 {
634 if (isYoungGC_) {
635 GetEvacuator()->UpdateAndSweepNewRegionReference<TriggerGCType::YOUNG_GC>(GetRegion());
636 } else {
637 GetEvacuator()->UpdateAndSweepNewRegionReference<TriggerGCType::OLD_GC>(GetRegion());
638 }
639 return true;
640 }
641 } // namespace panda::ecmascript
642