1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/parallel_evacuator-inl.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/barriers-inl.h"
20 #include "ecmascript/mem/clock_scope.h"
21 #include "ecmascript/mem/concurrent_sweeper.h"
22 #include "ecmascript/mem/gc_bitset.h"
23 #include "ecmascript/mem/heap-inl.h"
24 #include "ecmascript/mem/mem.h"
25 #include "ecmascript/mem/space-inl.h"
26 #include "ecmascript/mem/tlab_allocator-inl.h"
27 #include "ecmascript/mem/visitor.h"
28 #include "ecmascript/mem/gc_stats.h"
29 #include "ecmascript/ecma_string_table.h"
30 #include "ecmascript/runtime_call_id.h"
31
32 namespace panda::ecmascript {
Initialize()33 void ParallelEvacuator::Initialize()
34 {
35 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorInitialize);
36 waterLine_ = heap_->GetNewSpace()->GetWaterLine();
37 heap_->SwapNewSpace();
38 allocator_ = new TlabAllocator(heap_);
39 promotedSize_ = 0;
40 }
41
Finalize()42 void ParallelEvacuator::Finalize()
43 {
44 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorFinalize);
45 delete allocator_;
46 }
47
Evacuate()48 void ParallelEvacuator::Evacuate()
49 {
50 Initialize();
51 EvacuateSpace();
52 UpdateReference();
53 Finalize();
54 }
55
UpdateTrackInfo()56 void ParallelEvacuator::UpdateTrackInfo()
57 {
58 for (uint32_t i = 0; i <= MAX_TASKPOOL_THREAD_NUM; i++) {
59 auto &trackInfoSet = ArrayTrackInfoSet(i);
60 for (auto &each : trackInfoSet) {
61 auto trackInfoVal = JSTaggedValue(each);
62 if (!trackInfoVal.IsHeapObject() || !trackInfoVal.IsWeak()) {
63 continue;
64 }
65 auto trackInfo = trackInfoVal.GetWeakReferentUnChecked();
66 trackInfo = UpdateAddressAfterEvacation(trackInfo);
67 if (trackInfo) {
68 heap_->GetEcmaVM()->GetPGOProfiler()->UpdateTrackSpaceFlag(trackInfo, RegionSpaceFlag::IN_OLD_SPACE);
69 }
70 }
71 trackInfoSet.clear();
72 }
73 }
74
EvacuateSpace()75 void ParallelEvacuator::EvacuateSpace()
76 {
77 TRACE_GC(GCStats::Scope::ScopeId::EvacuateSpace, heap_->GetEcmaVM()->GetEcmaGCStats());
78 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::EvacuateSpace");
79 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuator);
80 heap_->GetFromSpaceDuringEvacuation()->EnumerateRegions([this] (Region *current) {
81 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
82 });
83 heap_->GetOldSpace()->EnumerateCollectRegionSet(
84 [this](Region *current) {
85 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
86 });
87 if (heap_->IsParallelGCEnabled()) {
88 LockHolder holder(mutex_);
89 parallel_ = CalculateEvacuationThreadNum();
90 for (int i = 0; i < parallel_; i++) {
91 Taskpool::GetCurrentTaskpool()->PostTask(
92 std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), this));
93 }
94 }
95
96 EvacuateSpace(allocator_, 0, true);
97 WaitFinished();
98 if (heap_->GetJSThread()->IsPGOProfilerEnable()) {
99 UpdateTrackInfo();
100 }
101 }
102
EvacuateSpace(TlabAllocator * allocator,uint32_t threadIndex,bool isMain)103 bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, bool isMain)
104 {
105 std::unique_ptr<Workload> region = GetWorkloadSafe();
106 auto &arrayTrackInfoSet = ArrayTrackInfoSet(threadIndex);
107 while (region != nullptr) {
108 EvacuateRegion(allocator, region->GetRegion(), arrayTrackInfoSet);
109 region = GetWorkloadSafe();
110 }
111 allocator->Finalize();
112 if (!isMain) {
113 LockHolder holder(mutex_);
114 if (--parallel_ <= 0) {
115 condition_.SignalAll();
116 }
117 }
118 return true;
119 }
120
EvacuateRegion(TlabAllocator * allocator,Region * region,std::unordered_set<JSTaggedType> & trackSet)121 void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region,
122 std::unordered_set<JSTaggedType> &trackSet)
123 {
124 bool isInOldGen = region->InOldSpace();
125 bool isBelowAgeMark = region->BelowAgeMark();
126 bool pgoEnabled = heap_->GetJSThread()->IsPGOProfilerEnable();
127 size_t promotedSize = 0;
128 if (!isBelowAgeMark && !isInOldGen && IsWholeRegionEvacuate(region)) {
129 if (heap_->MoveYoungRegionSync(region)) {
130 return;
131 }
132 }
133 region->IterateAllMarkedBits([this, ®ion, &isInOldGen, &isBelowAgeMark, &pgoEnabled,
134 &promotedSize, &allocator, &trackSet](void *mem) {
135 ASSERT(region->InRange(ToUintPtr(mem)));
136 auto header = reinterpret_cast<TaggedObject *>(mem);
137 auto klass = header->GetClass();
138 auto size = klass->SizeFromJSHClass(header);
139
140 uintptr_t address = 0;
141 bool actualPromoted = false;
142 bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
143 if (hasAgeMark) {
144 address = allocator->Allocate(size, OLD_SPACE);
145 actualPromoted = true;
146 promotedSize += size;
147 } else if (isInOldGen) {
148 address = allocator->Allocate(size, OLD_SPACE);
149 actualPromoted = true;
150 } else {
151 address = allocator->Allocate(size, SEMI_SPACE);
152 if (address == 0) {
153 address = allocator->Allocate(size, OLD_SPACE);
154 actualPromoted = true;
155 promotedSize += size;
156 }
157 }
158 LOG_ECMA_IF(address == 0, FATAL) << "Evacuate object failed:" << size;
159
160 if (memcpy_s(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size) != EOK) {
161 LOG_FULL(FATAL) << "memcpy_s failed";
162 }
163 heap_->OnMoveEvent(reinterpret_cast<uintptr_t>(mem), reinterpret_cast<TaggedObject *>(address), size);
164 if (pgoEnabled) {
165 if (actualPromoted && klass->IsJSArray()) {
166 auto trackInfo = JSArray::Cast(header)->GetTrackInfo();
167 trackSet.emplace(trackInfo.GetRawData());
168 }
169 }
170 Barriers::SetPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
171
172 if (UNLIKELY(heap_->ShouldVerifyHeap())) {
173 VerifyHeapObject(reinterpret_cast<TaggedObject *>(address));
174 }
175 if (actualPromoted) {
176 SetObjectFieldRSet(reinterpret_cast<TaggedObject *>(address), klass);
177 }
178 });
179 promotedSize_.fetch_add(promotedSize);
180 }
181
VerifyHeapObject(TaggedObject * object)182 void ParallelEvacuator::VerifyHeapObject(TaggedObject *object)
183 {
184 auto klass = object->GetClass();
185 objXRay_.VisitObjectBody<VisitType::OLD_GC_VISIT>(object, klass,
186 [&](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
187 if (area == VisitObjectArea::IN_OBJECT) {
188 if (VisitBodyInObj(root, start, end, [&](ObjectSlot slot) { VerifyValue(object, slot); })) {
189 return;
190 };
191 }
192 for (ObjectSlot slot = start; slot < end; slot++) {
193 VerifyValue(object, slot);
194 }
195 });
196 }
197
VerifyValue(TaggedObject * object,ObjectSlot slot)198 void ParallelEvacuator::VerifyValue(TaggedObject *object, ObjectSlot slot)
199 {
200 JSTaggedValue value(slot.GetTaggedType());
201 if (value.IsHeapObject()) {
202 if (value.IsWeakForHeapObject()) {
203 return;
204 }
205 Region *objectRegion = Region::ObjectAddressToRange(value.GetTaggedObject());
206 if (!heap_->IsConcurrentFullMark() && !objectRegion->InYoungSpace()) {
207 return;
208 }
209 if (!objectRegion->Test(value.GetTaggedObject()) && !objectRegion->InAppSpawnSpace()) {
210 LOG_GC(FATAL) << "Miss mark value: " << value.GetTaggedObject()
211 << ", body address:" << slot.SlotAddress()
212 << ", header address:" << object;
213 }
214 }
215 }
216
UpdateReference()217 void ParallelEvacuator::UpdateReference()
218 {
219 TRACE_GC(GCStats::Scope::ScopeId::UpdateReference, heap_->GetEcmaVM()->GetEcmaGCStats());
220 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
221 // Update reference pointers
222 uint32_t youngeRegionMoveCount = 0;
223 uint32_t youngeRegionCopyCount = 0;
224 uint32_t oldRegionCount = 0;
225 heap_->GetNewSpace()->EnumerateRegions([&] (Region *current) {
226 if (current->InNewToNewSet()) {
227 AddWorkload(std::make_unique<UpdateAndSweepNewRegionWorkload>(this, current));
228 youngeRegionMoveCount++;
229 } else {
230 AddWorkload(std::make_unique<UpdateNewRegionWorkload>(this, current));
231 youngeRegionCopyCount++;
232 }
233 });
234 heap_->EnumerateOldSpaceRegions([this, &oldRegionCount] (Region *current) {
235 if (current->InCollectSet()) {
236 return;
237 }
238 AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
239 oldRegionCount++;
240 });
241 heap_->EnumerateSnapshotSpaceRegions([this] (Region *current) {
242 AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
243 });
244 LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
245 << youngeRegionMoveCount
246 << "younge space region compact coping count:" << youngeRegionCopyCount
247 << "old space region count:" << oldRegionCount;
248
249 if (heap_->IsParallelGCEnabled()) {
250 LockHolder holder(mutex_);
251 parallel_ = CalculateUpdateThreadNum();
252 for (int i = 0; i < parallel_; i++) {
253 Taskpool::GetCurrentTaskpool()->PostTask(
254 std::make_unique<UpdateReferenceTask>(heap_->GetJSThread()->GetThreadId(), this));
255 }
256 }
257
258 UpdateRoot();
259 UpdateWeakReference();
260 ProcessWorkloads(true);
261 WaitFinished();
262 }
263
UpdateRoot()264 void ParallelEvacuator::UpdateRoot()
265 {
266 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
267 RootVisitor gcUpdateYoung = [this]([[maybe_unused]] Root type, ObjectSlot slot) {
268 UpdateObjectSlot(slot);
269 };
270 RootRangeVisitor gcUpdateRangeYoung = [this]([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) {
271 for (ObjectSlot slot = start; slot < end; slot++) {
272 UpdateObjectSlot(slot);
273 }
274 };
275 RootBaseAndDerivedVisitor gcUpdateDerived =
276 []([[maybe_unused]] Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) {
277 if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
278 derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
279 }
280 };
281
282 objXRay_.VisitVMRoots(gcUpdateYoung, gcUpdateRangeYoung, gcUpdateDerived);
283 }
284
UpdateRecordWeakReference()285 void ParallelEvacuator::UpdateRecordWeakReference()
286 {
287 auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
288 for (uint32_t i = 0; i < totalThreadCount; i++) {
289 ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
290
291 while (true) {
292 auto obj = queue->PopBack();
293 if (UNLIKELY(obj == nullptr)) {
294 break;
295 }
296 ObjectSlot slot(ToUintPtr(obj));
297 JSTaggedValue value(slot.GetTaggedType());
298 if (value.IsWeak()) {
299 UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
300 }
301 }
302 }
303 }
304
UpdateWeakReference()305 void ParallelEvacuator::UpdateWeakReference()
306 {
307 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
308 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReference");
309 UpdateRecordWeakReference();
310 auto stringTable = heap_->GetEcmaVM()->GetEcmaStringTable();
311 bool isFullMark = heap_->IsConcurrentFullMark();
312 WeakRootVisitor gcUpdateWeak = [isFullMark](TaggedObject *header) {
313 Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
314 if (!objectRegion) {
315 LOG_GC(ERROR) << "PartialGC updateWeakReference: region is nullptr, header is " << header;
316 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
317 }
318 if (objectRegion->InYoungSpaceOrCSet()) {
319 if (objectRegion->InNewToNewSet()) {
320 if (objectRegion->Test(header)) {
321 return header;
322 }
323 } else {
324 MarkWord markWord(header);
325 if (markWord.IsForwardingAddress()) {
326 return markWord.ToForwardingAddress();
327 }
328 }
329 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
330 }
331 if (isFullMark) {
332 if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(header)) {
333 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
334 }
335 }
336 return header;
337 };
338 if (isFullMark) {
339 // Only old gc will sweep string table.
340 stringTable->SweepWeakReference(gcUpdateWeak);
341 }
342
343 heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
344 heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
345 }
346
UpdateRSet(Region * region)347 void ParallelEvacuator::UpdateRSet(Region *region)
348 {
349 auto cb = [this](void *mem) -> bool {
350 ObjectSlot slot(ToUintPtr(mem));
351 return UpdateOldToNewObjectSlot(slot);
352 };
353 if (heap_->GetSweeper()->IsSweeping()) {
354 if (region->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
355 // Region is safe while update remember set
356 region->MergeRSetForConcurrentSweeping();
357 } else {
358 region->AtomicIterateAllSweepingRSetBits(cb);
359 }
360 }
361 region->IterateAllOldToNewBits(cb);
362 region->IterateAllCrossRegionBits([this](void *mem) {
363 ObjectSlot slot(ToUintPtr(mem));
364 UpdateObjectSlot(slot);
365 });
366 region->ClearCrossRegionRSet();
367 }
368
UpdateNewRegionReference(Region * region)369 void ParallelEvacuator::UpdateNewRegionReference(Region *region)
370 {
371 Region *current = heap_->GetNewSpace()->GetCurrentRegion();
372 auto curPtr = region->GetBegin();
373 uintptr_t endPtr = 0;
374 if (region == current) {
375 auto top = heap_->GetNewSpace()->GetTop();
376 endPtr = curPtr + region->GetAllocatedBytes(top);
377 } else {
378 endPtr = curPtr + region->GetAllocatedBytes();
379 }
380
381 size_t objSize = 0;
382 while (curPtr < endPtr) {
383 auto freeObject = FreeObject::Cast(curPtr);
384 // If curPtr is freeObject, It must to mark unpoison first.
385 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(freeObject), TaggedObject::TaggedObjectSize());
386 if (!freeObject->IsFreeObject()) {
387 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
388 auto klass = obj->GetClass();
389 UpdateNewObjectField(obj, klass);
390 objSize = klass->SizeFromJSHClass(obj);
391 } else {
392 freeObject->AsanUnPoisonFreeObject();
393 objSize = freeObject->Available();
394 freeObject->AsanPoisonFreeObject();
395 }
396 curPtr += objSize;
397 CHECK_OBJECT_SIZE(objSize);
398 }
399 CHECK_REGION_END(curPtr, endPtr);
400 }
401
UpdateAndSweepNewRegionReference(Region * region)402 void ParallelEvacuator::UpdateAndSweepNewRegionReference(Region *region)
403 {
404 uintptr_t freeStart = region->GetBegin();
405 uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
406 region->IterateAllMarkedBits([&](void *mem) {
407 ASSERT(region->InRange(ToUintPtr(mem)));
408 auto header = reinterpret_cast<TaggedObject *>(mem);
409 JSHClass *klass = header->GetClass();
410 UpdateNewObjectField(header, klass);
411
412 uintptr_t freeEnd = ToUintPtr(mem);
413 if (freeStart != freeEnd) {
414 size_t freeSize = freeEnd - freeStart;
415 FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeSize);
416 SemiSpace *toSpace = heap_->GetNewSpace();
417 toSpace->DecreaseSurvivalObjectSize(freeSize);
418 }
419
420 freeStart = freeEnd + klass->SizeFromJSHClass(header);
421 });
422 CHECK_REGION_END(freeStart, freeEnd);
423 if (freeStart < freeEnd) {
424 FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeEnd - freeStart);
425 }
426 }
427
UpdateNewObjectField(TaggedObject * object,JSHClass * cls)428 void ParallelEvacuator::UpdateNewObjectField(TaggedObject *object, JSHClass *cls)
429 {
430 objXRay_.VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls,
431 [this](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
432 if (area == VisitObjectArea::IN_OBJECT) {
433 if (VisitBodyInObj(root, start, end, [&](ObjectSlot slot) { UpdateObjectSlot(slot); })) {
434 return;
435 };
436 }
437 for (ObjectSlot slot = start; slot < end; slot++) {
438 UpdateObjectSlot(slot);
439 }
440 });
441 }
442
WaitFinished()443 void ParallelEvacuator::WaitFinished()
444 {
445 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
446 if (parallel_ > 0) {
447 LockHolder holder(mutex_);
448 while (parallel_ > 0) {
449 condition_.Wait(&mutex_);
450 }
451 }
452 }
453
ProcessWorkloads(bool isMain)454 bool ParallelEvacuator::ProcessWorkloads(bool isMain)
455 {
456 std::unique_ptr<Workload> region = GetWorkloadSafe();
457 while (region != nullptr) {
458 region->Process(isMain);
459 region = GetWorkloadSafe();
460 }
461 if (!isMain) {
462 LockHolder holder(mutex_);
463 if (--parallel_ <= 0) {
464 condition_.SignalAll();
465 }
466 }
467 return true;
468 }
469
EvacuationTask(int32_t id,ParallelEvacuator * evacuator)470 ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, ParallelEvacuator *evacuator)
471 : Task(id), evacuator_(evacuator)
472 {
473 allocator_ = new TlabAllocator(evacuator->heap_);
474 }
475
~EvacuationTask()476 ParallelEvacuator::EvacuationTask::~EvacuationTask()
477 {
478 delete allocator_;
479 }
480
Run(uint32_t threadIndex)481 bool ParallelEvacuator::EvacuationTask::Run(uint32_t threadIndex)
482 {
483 return evacuator_->EvacuateSpace(allocator_, threadIndex);
484 }
485
Run(uint32_t threadIndex)486 bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
487 {
488 evacuator_->ProcessWorkloads(false);
489 return true;
490 }
491
Process(bool isMain)492 bool ParallelEvacuator::EvacuateWorkload::Process([[maybe_unused]] bool isMain)
493 {
494 return true;
495 }
496
Process(bool isMain)497 bool ParallelEvacuator::UpdateRSetWorkload::Process([[maybe_unused]] bool isMain)
498 {
499 GetEvacuator()->UpdateRSet(GetRegion());
500 return true;
501 }
502
Process(bool isMain)503 bool ParallelEvacuator::UpdateNewRegionWorkload::Process([[maybe_unused]] bool isMain)
504 {
505 GetEvacuator()->UpdateNewRegionReference(GetRegion());
506 return true;
507 }
508
Process(bool isMain)509 bool ParallelEvacuator::UpdateAndSweepNewRegionWorkload::Process([[maybe_unused]] bool isMain)
510 {
511 GetEvacuator()->UpdateAndSweepNewRegionReference(GetRegion());
512 return true;
513 }
514 } // namespace panda::ecmascript
515