1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/parallel_evacuator-inl.h"
17
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/barriers-inl.h"
20 #include "ecmascript/mem/clock_scope.h"
21 #include "ecmascript/mem/gc_bitset.h"
22 #include "ecmascript/mem/heap.h"
23 #include "ecmascript/mem/mem.h"
24 #include "ecmascript/mem/space-inl.h"
25 #include "ecmascript/mem/tlab_allocator-inl.h"
26 #include "ecmascript/mem/visitor.h"
27 #include "ecmascript/mem/gc_stats.h"
28 #include "ecmascript/ecma_string_table.h"
29 #include "ecmascript/runtime_call_id.h"
30
31 namespace panda::ecmascript {
Initialize()32 void ParallelEvacuator::Initialize()
33 {
34 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorInitialize);
35 waterLine_ = heap_->GetNewSpace()->GetWaterLine();
36 heap_->SwapNewSpace();
37 allocator_ = new TlabAllocator(heap_);
38 promotedSize_ = 0;
39 }
40
Finalize()41 void ParallelEvacuator::Finalize()
42 {
43 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorFinalize);
44 delete allocator_;
45 heap_->Resume(OLD_GC);
46 }
47
Evacuate()48 void ParallelEvacuator::Evacuate()
49 {
50 ClockScope clockScope;
51 Initialize();
52 EvacuateSpace();
53 UpdateReference();
54 Finalize();
55 heap_->GetEcmaVM()->GetEcmaGCStats()->StatisticConcurrentEvacuate(clockScope.GetPauseTime());
56 }
57
EvacuateSpace()58 void ParallelEvacuator::EvacuateSpace()
59 {
60 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuator);
61 heap_->GetFromSpaceDuringEvacuation()->EnumerateRegions([this] (Region *current) {
62 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
63 });
64 heap_->GetOldSpace()->EnumerateCollectRegionSet(
65 [this](Region *current) {
66 AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
67 });
68 if (heap_->IsParallelGCEnabled()) {
69 os::memory::LockHolder holder(mutex_);
70 parallel_ = CalculateEvacuationThreadNum();
71 for (int i = 0; i < parallel_; i++) {
72 Taskpool::GetCurrentTaskpool()->PostTask(
73 std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), this));
74 }
75 }
76
77 EvacuateSpace(allocator_, true);
78 WaitFinished();
79 }
80
EvacuateSpace(TlabAllocator * allocator,bool isMain)81 bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, bool isMain)
82 {
83 std::unique_ptr<Workload> region = GetWorkloadSafe();
84 while (region != nullptr) {
85 EvacuateRegion(allocator, region->GetRegion());
86 region = GetWorkloadSafe();
87 }
88 allocator->Finalize();
89 if (!isMain) {
90 os::memory::LockHolder holder(mutex_);
91 if (--parallel_ <= 0) {
92 condition_.SignalAll();
93 }
94 }
95 return true;
96 }
97
EvacuateRegion(TlabAllocator * allocator,Region * region)98 void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region)
99 {
100 bool isInOldGen = region->InOldSpace();
101 bool isBelowAgeMark = region->BelowAgeMark();
102 size_t promotedSize = 0;
103 if (!isBelowAgeMark && !isInOldGen && IsWholeRegionEvacuate(region)) {
104 if (heap_->MoveYoungRegionSync(region)) {
105 return;
106 }
107 }
108 region->IterateAllMarkedBits([this, ®ion, &isInOldGen, &isBelowAgeMark,
109 &promotedSize, &allocator](void *mem) {
110 ASSERT(region->InRange(ToUintPtr(mem)));
111 auto header = reinterpret_cast<TaggedObject *>(mem);
112 auto klass = header->GetClass();
113 auto size = klass->SizeFromJSHClass(header);
114
115 uintptr_t address = 0;
116 bool actualPromoted = false;
117 bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
118 if (hasAgeMark) {
119 address = allocator->Allocate(size, OLD_SPACE);
120 actualPromoted = true;
121 promotedSize += size;
122 } else if (isInOldGen) {
123 address = allocator->Allocate(size, OLD_SPACE);
124 actualPromoted = true;
125 } else {
126 address = allocator->Allocate(size, SEMI_SPACE);
127 if (address == 0) {
128 address = allocator->Allocate(size, OLD_SPACE);
129 actualPromoted = true;
130 promotedSize += size;
131 }
132 }
133 LOG_ECMA_IF(address == 0, FATAL) << "Evacuate object failed:" << size;
134
135 if (memcpy_s(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size) != EOK) {
136 LOG_FULL(FATAL) << "memcpy_s failed";
137 }
138 heap_->OnMoveEvent(reinterpret_cast<uintptr_t>(mem), reinterpret_cast<TaggedObject *>(address), size);
139 Barriers::SetPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
140 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
141 VerifyHeapObject(reinterpret_cast<TaggedObject *>(address));
142 #endif
143 if (actualPromoted) {
144 SetObjectFieldRSet(reinterpret_cast<TaggedObject *>(address), klass);
145 }
146 });
147 promotedSize_.fetch_add(promotedSize);
148 }
149
VerifyHeapObject(TaggedObject * object)150 void ParallelEvacuator::VerifyHeapObject(TaggedObject *object)
151 {
152 auto klass = object->GetClass();
153 objXRay_.VisitObjectBody<VisitType::OLD_GC_VISIT>(object, klass,
154 [&]([[maybe_unused]] TaggedObject *root, ObjectSlot start, ObjectSlot end, [[maybe_unused]] bool isNative) {
155 for (ObjectSlot slot = start; slot < end; slot++) {
156 JSTaggedValue value(slot.GetTaggedType());
157 if (value.IsHeapObject()) {
158 if (value.IsWeakForHeapObject()) {
159 continue;
160 }
161 Region *objectRegion = Region::ObjectAddressToRange(value.GetTaggedObject());
162 if (!heap_->IsFullMark() && !objectRegion->InYoungSpace()) {
163 continue;
164 }
165 if (!objectRegion->Test(value.GetTaggedObject()) && !objectRegion->InAppSpawnSpace()) {
166 LOG_GC(FATAL) << "Miss mark value: " << value.GetTaggedObject()
167 << ", body address:" << slot.SlotAddress()
168 << ", header address:" << object;
169 }
170 }
171 }
172 });
173 }
174
UpdateReference()175 void ParallelEvacuator::UpdateReference()
176 {
177 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
178 // Update reference pointers
179 uint32_t youngeRegionMoveCount = 0;
180 uint32_t youngeRegionCopyCount = 0;
181 uint32_t oldRegionCount = 0;
182 heap_->GetNewSpace()->EnumerateRegions([&] (Region *current) {
183 if (current->InNewToNewSet()) {
184 AddWorkload(std::make_unique<UpdateAndSweepNewRegionWorkload>(this, current));
185 youngeRegionMoveCount++;
186 } else {
187 AddWorkload(std::make_unique<UpdateNewRegionWorkload>(this, current));
188 youngeRegionCopyCount++;
189 }
190 });
191 heap_->EnumerateOldSpaceRegions([this, &oldRegionCount] (Region *current) {
192 if (current->InCollectSet()) {
193 return;
194 }
195 AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
196 oldRegionCount++;
197 });
198 heap_->EnumerateSnapshotSpaceRegions([this] (Region *current) {
199 AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
200 });
201 LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
202 << youngeRegionMoveCount
203 << "younge space region compact coping count:" << youngeRegionCopyCount
204 << "old space region count:" << oldRegionCount;
205
206 if (heap_->IsParallelGCEnabled()) {
207 os::memory::LockHolder holder(mutex_);
208 parallel_ = CalculateUpdateThreadNum();
209 for (int i = 0; i < parallel_; i++) {
210 Taskpool::GetCurrentTaskpool()->PostTask(
211 std::make_unique<UpdateReferenceTask>(heap_->GetJSThread()->GetThreadId(), this));
212 }
213 }
214
215 UpdateRoot();
216 UpdateWeakReference();
217 ProcessWorkloads(true);
218 WaitFinished();
219 }
220
UpdateRoot()221 void ParallelEvacuator::UpdateRoot()
222 {
223 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
224 RootVisitor gcUpdateYoung = [this]([[maybe_unused]] Root type, ObjectSlot slot) {
225 UpdateObjectSlot(slot);
226 };
227 RootRangeVisitor gcUpdateRangeYoung = [this]([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) {
228 for (ObjectSlot slot = start; slot < end; slot++) {
229 UpdateObjectSlot(slot);
230 }
231 };
232 RootBaseAndDerivedVisitor gcUpdateDerived =
233 []([[maybe_unused]]Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) {
234 if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
235 derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
236 }
237 };
238
239 objXRay_.VisitVMRoots(gcUpdateYoung, gcUpdateRangeYoung, gcUpdateDerived);
240 }
241
UpdateRecordWeakReference()242 void ParallelEvacuator::UpdateRecordWeakReference()
243 {
244 auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
245 for (uint32_t i = 0; i < totalThreadCount; i++) {
246 ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
247
248 while (true) {
249 auto obj = queue->PopBack();
250 if (UNLIKELY(obj == nullptr)) {
251 break;
252 }
253 ObjectSlot slot(ToUintPtr(obj));
254 JSTaggedValue value(slot.GetTaggedType());
255 if (value.IsWeak()) {
256 UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
257 }
258 }
259 }
260 }
261
UpdateWeakReference()262 void ParallelEvacuator::UpdateWeakReference()
263 {
264 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
265 UpdateRecordWeakReference();
266 auto stringTable = heap_->GetEcmaVM()->GetEcmaStringTable();
267 bool isFullMark = heap_->IsFullMark();
268 WeakRootVisitor gcUpdateWeak = [isFullMark](TaggedObject *header) {
269 Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
270 if (objectRegion->InYoungSpaceOrCSet()) {
271 if (objectRegion->InNewToNewSet()) {
272 if (objectRegion->Test(header)) {
273 return header;
274 }
275 } else {
276 MarkWord markWord(header);
277 if (markWord.IsForwardingAddress()) {
278 return markWord.ToForwardingAddress();
279 }
280 }
281 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
282 }
283 if (isFullMark) {
284 if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(header)) {
285 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
286 }
287 }
288 return header;
289 };
290
291 stringTable->SweepWeakReference(gcUpdateWeak);
292 heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
293 heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
294 }
295
UpdateRSet(Region * region)296 void ParallelEvacuator::UpdateRSet(Region *region)
297 {
298 auto cb = [this](void *mem) -> bool {
299 ObjectSlot slot(ToUintPtr(mem));
300 return UpdateOldToNewObjectSlot(slot);
301 };
302 if (heap_->GetSweeper()->IsSweeping()) {
303 if (region->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
304 // Region is safe while update remember set
305 region->MergeRSetForConcurrentSweeping();
306 } else {
307 region->AtomicIterateAllSweepingRSetBits(cb);
308 }
309 }
310 region->IterateAllOldToNewBits(cb);
311 region->IterateAllCrossRegionBits([this](void *mem) {
312 ObjectSlot slot(ToUintPtr(mem));
313 UpdateObjectSlot(slot);
314 });
315 region->ClearCrossRegionRSet();
316 }
317
UpdateNewRegionReference(Region * region)318 void ParallelEvacuator::UpdateNewRegionReference(Region *region)
319 {
320 Region *current = heap_->GetNewSpace()->GetCurrentRegion();
321 auto curPtr = region->GetBegin();
322 uintptr_t endPtr = 0;
323 if (region == current) {
324 auto top = heap_->GetNewSpace()->GetTop();
325 endPtr = curPtr + region->GetAllocatedBytes(top);
326 } else {
327 endPtr = curPtr + region->GetAllocatedBytes();
328 }
329
330 size_t objSize = 0;
331 while (curPtr < endPtr) {
332 auto freeObject = FreeObject::Cast(curPtr);
333 // If curPtr is freeObject, It must to mark unpoison first.
334 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(freeObject), TaggedObject::TaggedObjectSize());
335 if (!freeObject->IsFreeObject()) {
336 auto obj = reinterpret_cast<TaggedObject *>(curPtr);
337 auto klass = obj->GetClass();
338 UpdateNewObjectField(obj, klass);
339 objSize = klass->SizeFromJSHClass(obj);
340 } else {
341 freeObject->AsanUnPoisonFreeObject();
342 objSize = freeObject->Available();
343 freeObject->AsanPoisonFreeObject();
344 }
345 curPtr += objSize;
346 CHECK_OBJECT_SIZE(objSize);
347 }
348 CHECK_REGION_END(curPtr, endPtr);
349 }
350
UpdateAndSweepNewRegionReference(Region * region)351 void ParallelEvacuator::UpdateAndSweepNewRegionReference(Region *region)
352 {
353 uintptr_t freeStart = region->GetBegin();
354 uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
355 region->IterateAllMarkedBits([&](void *mem) {
356 ASSERT(region->InRange(ToUintPtr(mem)));
357 auto header = reinterpret_cast<TaggedObject *>(mem);
358 JSHClass *klass = header->GetClass();
359 UpdateNewObjectField(header, klass);
360
361 uintptr_t freeEnd = ToUintPtr(mem);
362 if (freeStart != freeEnd) {
363 size_t freeSize = freeEnd - freeStart;
364 FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeSize);
365 SemiSpace *toSpace = heap_->GetNewSpace();
366 toSpace->DecreaseSurvivalObjectSize(freeSize);
367 }
368
369 freeStart = freeEnd + klass->SizeFromJSHClass(header);
370 });
371 CHECK_REGION_END(freeStart, freeEnd);
372 if (freeStart < freeEnd) {
373 FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeEnd - freeStart);
374 }
375 }
376
UpdateNewObjectField(TaggedObject * object,JSHClass * cls)377 void ParallelEvacuator::UpdateNewObjectField(TaggedObject *object, JSHClass *cls)
378 {
379 objXRay_.VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls,
380 [this]([[maybe_unused]] TaggedObject *root, ObjectSlot start, ObjectSlot end, [[maybe_unused]] bool isNative) {
381 for (ObjectSlot slot = start; slot < end; slot++) {
382 UpdateObjectSlot(slot);
383 }
384 });
385 }
386
WaitFinished()387 void ParallelEvacuator::WaitFinished()
388 {
389 MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
390 if (parallel_ > 0) {
391 os::memory::LockHolder holder(mutex_);
392 while (parallel_ > 0) {
393 condition_.Wait(&mutex_);
394 }
395 }
396 }
397
ProcessWorkloads(bool isMain)398 bool ParallelEvacuator::ProcessWorkloads(bool isMain)
399 {
400 std::unique_ptr<Workload> region = GetWorkloadSafe();
401 while (region != nullptr) {
402 region->Process(isMain);
403 region = GetWorkloadSafe();
404 }
405 if (!isMain) {
406 os::memory::LockHolder holder(mutex_);
407 if (--parallel_ <= 0) {
408 condition_.SignalAll();
409 }
410 }
411 return true;
412 }
413
EvacuationTask(int32_t id,ParallelEvacuator * evacuator)414 ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, ParallelEvacuator *evacuator)
415 : Task(id), evacuator_(evacuator)
416 {
417 allocator_ = new TlabAllocator(evacuator->heap_);
418 }
419
~EvacuationTask()420 ParallelEvacuator::EvacuationTask::~EvacuationTask()
421 {
422 delete allocator_;
423 }
424
Run(uint32_t threadIndex)425 bool ParallelEvacuator::EvacuationTask::Run([[maybe_unused]] uint32_t threadIndex)
426 {
427 return evacuator_->EvacuateSpace(allocator_);
428 }
429
Run(uint32_t threadIndex)430 bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
431 {
432 evacuator_->ProcessWorkloads(false);
433 return true;
434 }
435
Process(bool isMain)436 bool ParallelEvacuator::EvacuateWorkload::Process([[maybe_unused]] bool isMain)
437 {
438 return true;
439 }
440
Process(bool isMain)441 bool ParallelEvacuator::UpdateRSetWorkload::Process([[maybe_unused]] bool isMain)
442 {
443 GetEvacuator()->UpdateRSet(GetRegion());
444 return true;
445 }
446
Process(bool isMain)447 bool ParallelEvacuator::UpdateNewRegionWorkload::Process([[maybe_unused]] bool isMain)
448 {
449 GetEvacuator()->UpdateNewRegionReference(GetRegion());
450 return true;
451 }
452
Process(bool isMain)453 bool ParallelEvacuator::UpdateAndSweepNewRegionWorkload::Process([[maybe_unused]] bool isMain)
454 {
455 GetEvacuator()->UpdateAndSweepNewRegionReference(GetRegion());
456 return true;
457 }
458 } // namespace panda::ecmascript
459