• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/parallel_evacuator-inl.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/barriers-inl.h"
20 #include "ecmascript/mem/clock_scope.h"
21 #include "ecmascript/mem/gc_bitset.h"
22 #include "ecmascript/mem/heap.h"
23 #include "ecmascript/mem/mem.h"
24 #include "ecmascript/mem/space-inl.h"
25 #include "ecmascript/mem/tlab_allocator-inl.h"
26 #include "ecmascript/mem/visitor.h"
27 #include "ecmascript/mem/gc_stats.h"
28 #include "ecmascript/ecma_string_table.h"
29 #include "ecmascript/runtime_call_id.h"
30 
31 namespace panda::ecmascript {
Initialize()32 void ParallelEvacuator::Initialize()
33 {
34     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorInitialize);
35     waterLine_ = heap_->GetNewSpace()->GetWaterLine();
36     heap_->SwapNewSpace();
37     allocator_ = new TlabAllocator(heap_);
38     promotedSize_ = 0;
39 }
40 
Finalize()41 void ParallelEvacuator::Finalize()
42 {
43     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorFinalize);
44     delete allocator_;
45 }
46 
Evacuate()47 void ParallelEvacuator::Evacuate()
48 {
49     Initialize();
50     EvacuateSpace();
51     UpdateReference();
52     Finalize();
53 }
54 
EvacuateSpace()55 void ParallelEvacuator::EvacuateSpace()
56 {
57     TRACE_GC(GCStats::Scope::ScopeId::EvacuateSpace, heap_->GetEcmaVM()->GetEcmaGCStats());
58     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuator);
59     heap_->GetFromSpaceDuringEvacuation()->EnumerateRegions([this] (Region *current) {
60         AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
61     });
62     heap_->GetOldSpace()->EnumerateCollectRegionSet(
63         [this](Region *current) {
64             AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
65         });
66     if (heap_->IsParallelGCEnabled()) {
67         os::memory::LockHolder holder(mutex_);
68         parallel_ = CalculateEvacuationThreadNum();
69         for (int i = 0; i < parallel_; i++) {
70             Taskpool::GetCurrentTaskpool()->PostTask(
71                 std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), this));
72         }
73     }
74 
75     EvacuateSpace(allocator_, true);
76     WaitFinished();
77 }
78 
EvacuateSpace(TlabAllocator * allocator,bool isMain)79 bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, bool isMain)
80 {
81     std::unique_ptr<Workload> region = GetWorkloadSafe();
82     while (region != nullptr) {
83         EvacuateRegion(allocator, region->GetRegion());
84         region = GetWorkloadSafe();
85     }
86     allocator->Finalize();
87     if (!isMain) {
88         os::memory::LockHolder holder(mutex_);
89         if (--parallel_ <= 0) {
90             condition_.SignalAll();
91         }
92     }
93     return true;
94 }
95 
EvacuateRegion(TlabAllocator * allocator,Region * region)96 void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region)
97 {
98     bool isInOldGen = region->InOldSpace();
99     bool isBelowAgeMark = region->BelowAgeMark();
100     size_t promotedSize = 0;
101     if (!isBelowAgeMark && !isInOldGen && IsWholeRegionEvacuate(region)) {
102         if (heap_->MoveYoungRegionSync(region)) {
103             return;
104         }
105     }
106     region->IterateAllMarkedBits([this, &region, &isInOldGen, &isBelowAgeMark,
107                                   &promotedSize, &allocator](void *mem) {
108         ASSERT(region->InRange(ToUintPtr(mem)));
109         auto header = reinterpret_cast<TaggedObject *>(mem);
110         auto klass = header->GetClass();
111         auto size = klass->SizeFromJSHClass(header);
112 
113         uintptr_t address = 0;
114         bool actualPromoted = false;
115         bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
116         if (hasAgeMark) {
117             address = allocator->Allocate(size, OLD_SPACE);
118             actualPromoted = true;
119             promotedSize += size;
120         } else if (isInOldGen) {
121             address = allocator->Allocate(size, OLD_SPACE);
122             actualPromoted = true;
123         } else {
124             address = allocator->Allocate(size, SEMI_SPACE);
125             if (address == 0) {
126                 address = allocator->Allocate(size, OLD_SPACE);
127                 actualPromoted = true;
128                 promotedSize += size;
129             }
130         }
131         LOG_ECMA_IF(address == 0, FATAL) << "Evacuate object failed:" << size;
132 
133         if (memcpy_s(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size) != EOK) {
134             LOG_FULL(FATAL) << "memcpy_s failed";
135         }
136         heap_->OnMoveEvent(reinterpret_cast<uintptr_t>(mem), reinterpret_cast<TaggedObject *>(address), size);
137         Barriers::SetPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
138 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
139         VerifyHeapObject(reinterpret_cast<TaggedObject *>(address));
140 #endif
141         if (actualPromoted) {
142             SetObjectFieldRSet(reinterpret_cast<TaggedObject *>(address), klass);
143         }
144     });
145     promotedSize_.fetch_add(promotedSize);
146 }
147 
VerifyHeapObject(TaggedObject * object)148 void ParallelEvacuator::VerifyHeapObject(TaggedObject *object)
149 {
150     auto klass = object->GetClass();
151     objXRay_.VisitObjectBody<VisitType::OLD_GC_VISIT>(object, klass,
152         [&](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
153             if (area == VisitObjectArea::IN_OBJECT) {
154                 if (VisitBodyInObj(root, start, end, [&](ObjectSlot slot) { VerifyValue(object, slot); })) {
155                     return;
156                 };
157             }
158             for (ObjectSlot slot = start; slot < end; slot++) {
159                 VerifyValue(object, slot);
160             }
161         });
162 }
163 
VerifyValue(TaggedObject * object,ObjectSlot slot)164 void ParallelEvacuator::VerifyValue(TaggedObject *object, ObjectSlot slot)
165 {
166     JSTaggedValue value(slot.GetTaggedType());
167     if (value.IsHeapObject()) {
168         if (value.IsWeakForHeapObject()) {
169             return;
170         }
171         Region *objectRegion = Region::ObjectAddressToRange(value.GetTaggedObject());
172         if (!heap_->IsFullMark() && !objectRegion->InYoungSpace()) {
173             return;
174         }
175         if (!objectRegion->Test(value.GetTaggedObject()) && !objectRegion->InAppSpawnSpace()) {
176             LOG_GC(FATAL) << "Miss mark value: " << value.GetTaggedObject()
177                                 << ", body address:" << slot.SlotAddress()
178                                 << ", header address:" << object;
179         }
180     }
181 }
182 
UpdateReference()183 void ParallelEvacuator::UpdateReference()
184 {
185     TRACE_GC(GCStats::Scope::ScopeId::UpdateReference, heap_->GetEcmaVM()->GetEcmaGCStats());
186     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
187     // Update reference pointers
188     uint32_t youngeRegionMoveCount = 0;
189     uint32_t youngeRegionCopyCount = 0;
190     uint32_t oldRegionCount = 0;
191     heap_->GetNewSpace()->EnumerateRegions([&] (Region *current) {
192         if (current->InNewToNewSet()) {
193             AddWorkload(std::make_unique<UpdateAndSweepNewRegionWorkload>(this, current));
194             youngeRegionMoveCount++;
195         } else {
196             AddWorkload(std::make_unique<UpdateNewRegionWorkload>(this, current));
197             youngeRegionCopyCount++;
198         }
199     });
200     heap_->EnumerateOldSpaceRegions([this, &oldRegionCount] (Region *current) {
201         if (current->InCollectSet()) {
202             return;
203         }
204         AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
205         oldRegionCount++;
206     });
207     heap_->EnumerateSnapshotSpaceRegions([this] (Region *current) {
208         AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
209     });
210     LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
211                         << youngeRegionMoveCount
212                         << "younge space region compact coping count:" << youngeRegionCopyCount
213                         << "old space region count:" << oldRegionCount;
214 
215     if (heap_->IsParallelGCEnabled()) {
216         os::memory::LockHolder holder(mutex_);
217         parallel_ = CalculateUpdateThreadNum();
218         for (int i = 0; i < parallel_; i++) {
219             Taskpool::GetCurrentTaskpool()->PostTask(
220                 std::make_unique<UpdateReferenceTask>(heap_->GetJSThread()->GetThreadId(), this));
221         }
222     }
223 
224     UpdateRoot();
225     UpdateWeakReference();
226     ProcessWorkloads(true);
227     WaitFinished();
228 }
229 
UpdateRoot()230 void ParallelEvacuator::UpdateRoot()
231 {
232     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
233     RootVisitor gcUpdateYoung = [this]([[maybe_unused]] Root type, ObjectSlot slot) {
234         UpdateObjectSlot(slot);
235     };
236     RootRangeVisitor gcUpdateRangeYoung = [this]([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) {
237         for (ObjectSlot slot = start; slot < end; slot++) {
238             UpdateObjectSlot(slot);
239         }
240     };
241     RootBaseAndDerivedVisitor gcUpdateDerived =
242         []([[maybe_unused]] Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) {
243         if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
244             derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
245         }
246     };
247 
248     objXRay_.VisitVMRoots(gcUpdateYoung, gcUpdateRangeYoung, gcUpdateDerived);
249 }
250 
UpdateRecordWeakReference()251 void ParallelEvacuator::UpdateRecordWeakReference()
252 {
253     auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
254     for (uint32_t i = 0; i < totalThreadCount; i++) {
255         ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
256 
257         while (true) {
258             auto obj = queue->PopBack();
259             if (UNLIKELY(obj == nullptr)) {
260                 break;
261             }
262             ObjectSlot slot(ToUintPtr(obj));
263             JSTaggedValue value(slot.GetTaggedType());
264             if (value.IsWeak()) {
265                 UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
266             }
267         }
268     }
269 }
270 
UpdateWeakReference()271 void ParallelEvacuator::UpdateWeakReference()
272 {
273     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
274     UpdateRecordWeakReference();
275     auto stringTable = heap_->GetEcmaVM()->GetEcmaStringTable();
276     bool isFullMark = heap_->IsFullMark();
277     WeakRootVisitor gcUpdateWeak = [isFullMark](TaggedObject *header) {
278         Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
279         if (objectRegion->InYoungSpaceOrCSet()) {
280             if (objectRegion->InNewToNewSet()) {
281                 if (objectRegion->Test(header)) {
282                     return header;
283                 }
284             } else {
285                 MarkWord markWord(header);
286                 if (markWord.IsForwardingAddress()) {
287                     return markWord.ToForwardingAddress();
288                 }
289             }
290             return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
291         }
292         if (isFullMark) {
293             if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(header)) {
294                 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
295             }
296         }
297         return header;
298     };
299     if (isFullMark) {
300         // Only old gc will sweep string table.
301         stringTable->SweepWeakReference(gcUpdateWeak);
302     }
303 
304     heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
305     heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
306 }
307 
UpdateRSet(Region * region)308 void ParallelEvacuator::UpdateRSet(Region *region)
309 {
310     auto cb = [this](void *mem) -> bool {
311         ObjectSlot slot(ToUintPtr(mem));
312         return UpdateOldToNewObjectSlot(slot);
313     };
314     if (heap_->GetSweeper()->IsSweeping()) {
315         if (region->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
316             // Region is safe while update remember set
317             region->MergeRSetForConcurrentSweeping();
318         } else {
319             region->AtomicIterateAllSweepingRSetBits(cb);
320         }
321     }
322     region->IterateAllOldToNewBits(cb);
323     region->IterateAllCrossRegionBits([this](void *mem) {
324         ObjectSlot slot(ToUintPtr(mem));
325         UpdateObjectSlot(slot);
326     });
327     region->ClearCrossRegionRSet();
328 }
329 
UpdateNewRegionReference(Region * region)330 void ParallelEvacuator::UpdateNewRegionReference(Region *region)
331 {
332     Region *current = heap_->GetNewSpace()->GetCurrentRegion();
333     auto curPtr = region->GetBegin();
334     uintptr_t endPtr = 0;
335     if (region == current) {
336         auto top = heap_->GetNewSpace()->GetTop();
337         endPtr = curPtr + region->GetAllocatedBytes(top);
338     } else {
339         endPtr = curPtr + region->GetAllocatedBytes();
340     }
341 
342     size_t objSize = 0;
343     while (curPtr < endPtr) {
344         auto freeObject = FreeObject::Cast(curPtr);
345         // If curPtr is freeObject, It must to mark unpoison first.
346         ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(freeObject), TaggedObject::TaggedObjectSize());
347         if (!freeObject->IsFreeObject()) {
348             auto obj = reinterpret_cast<TaggedObject *>(curPtr);
349             auto klass = obj->GetClass();
350             UpdateNewObjectField(obj, klass);
351             objSize = klass->SizeFromJSHClass(obj);
352         } else {
353             freeObject->AsanUnPoisonFreeObject();
354             objSize = freeObject->Available();
355             freeObject->AsanPoisonFreeObject();
356         }
357         curPtr += objSize;
358         CHECK_OBJECT_SIZE(objSize);
359     }
360     CHECK_REGION_END(curPtr, endPtr);
361 }
362 
UpdateAndSweepNewRegionReference(Region * region)363 void ParallelEvacuator::UpdateAndSweepNewRegionReference(Region *region)
364 {
365     uintptr_t freeStart = region->GetBegin();
366     uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
367     region->IterateAllMarkedBits([&](void *mem) {
368         ASSERT(region->InRange(ToUintPtr(mem)));
369         auto header = reinterpret_cast<TaggedObject *>(mem);
370         JSHClass *klass = header->GetClass();
371         UpdateNewObjectField(header, klass);
372 
373         uintptr_t freeEnd = ToUintPtr(mem);
374         if (freeStart != freeEnd) {
375             size_t freeSize = freeEnd - freeStart;
376             FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeSize);
377             SemiSpace *toSpace = heap_->GetNewSpace();
378             toSpace->DecreaseSurvivalObjectSize(freeSize);
379         }
380 
381         freeStart = freeEnd + klass->SizeFromJSHClass(header);
382     });
383     CHECK_REGION_END(freeStart, freeEnd);
384     if (freeStart < freeEnd) {
385         FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeEnd - freeStart);
386     }
387 }
388 
UpdateNewObjectField(TaggedObject * object,JSHClass * cls)389 void ParallelEvacuator::UpdateNewObjectField(TaggedObject *object, JSHClass *cls)
390 {
391     objXRay_.VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls,
392         [this](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
393             if (area == VisitObjectArea::IN_OBJECT) {
394                 if (VisitBodyInObj(root, start, end, [&](ObjectSlot slot) { UpdateObjectSlot(slot); })) {
395                     return;
396                 };
397             }
398             for (ObjectSlot slot = start; slot < end; slot++) {
399                 UpdateObjectSlot(slot);
400             }
401         });
402 }
403 
WaitFinished()404 void ParallelEvacuator::WaitFinished()
405 {
406     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
407     if (parallel_ > 0) {
408         os::memory::LockHolder holder(mutex_);
409         while (parallel_ > 0) {
410             condition_.Wait(&mutex_);
411         }
412     }
413 }
414 
ProcessWorkloads(bool isMain)415 bool ParallelEvacuator::ProcessWorkloads(bool isMain)
416 {
417     std::unique_ptr<Workload> region = GetWorkloadSafe();
418     while (region != nullptr) {
419         region->Process(isMain);
420         region = GetWorkloadSafe();
421     }
422     if (!isMain) {
423         os::memory::LockHolder holder(mutex_);
424         if (--parallel_ <= 0) {
425             condition_.SignalAll();
426         }
427     }
428     return true;
429 }
430 
EvacuationTask(int32_t id,ParallelEvacuator * evacuator)431 ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, ParallelEvacuator *evacuator)
432     : Task(id), evacuator_(evacuator)
433 {
434     allocator_ = new TlabAllocator(evacuator->heap_);
435 }
436 
~EvacuationTask()437 ParallelEvacuator::EvacuationTask::~EvacuationTask()
438 {
439     delete allocator_;
440 }
441 
Run(uint32_t threadIndex)442 bool ParallelEvacuator::EvacuationTask::Run([[maybe_unused]] uint32_t threadIndex)
443 {
444     return evacuator_->EvacuateSpace(allocator_);
445 }
446 
Run(uint32_t threadIndex)447 bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
448 {
449     evacuator_->ProcessWorkloads(false);
450     return true;
451 }
452 
Process(bool isMain)453 bool ParallelEvacuator::EvacuateWorkload::Process([[maybe_unused]] bool isMain)
454 {
455     return true;
456 }
457 
Process(bool isMain)458 bool ParallelEvacuator::UpdateRSetWorkload::Process([[maybe_unused]] bool isMain)
459 {
460     GetEvacuator()->UpdateRSet(GetRegion());
461     return true;
462 }
463 
Process(bool isMain)464 bool ParallelEvacuator::UpdateNewRegionWorkload::Process([[maybe_unused]] bool isMain)
465 {
466     GetEvacuator()->UpdateNewRegionReference(GetRegion());
467     return true;
468 }
469 
Process(bool isMain)470 bool ParallelEvacuator::UpdateAndSweepNewRegionWorkload::Process([[maybe_unused]] bool isMain)
471 {
472     GetEvacuator()->UpdateAndSweepNewRegionReference(GetRegion());
473     return true;
474 }
475 }  // namespace panda::ecmascript
476