• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/parallel_evacuation-inl.h"
17 
18 #include "ecmascript/js_hclass-inl.h"
19 #include "ecmascript/mem/barriers-inl.h"
20 #include "ecmascript/mem/heap.h"
21 #include "ecmascript/mem/object_xray-inl.h"
22 #include "ecmascript/mem/space-inl.h"
23 #include "ecmascript/mem/mem.h"
24 #include "ecmascript/mem/tlab_allocator-inl.h"
25 #include "ecmascript/mem/utils.h"
26 #include "ecmascript/runtime_call_id.h"
27 
28 namespace panda::ecmascript {
Initialize()29 void ParallelEvacuation::Initialize()
30 {
31     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuationInitialize);
32     heap_->SwapNewSpace();
33     allocator_ = new TlabAllocator(heap_);
34     waterLine_ = heap_->GetFromSpace()->GetWaterLine();
35     promotedSize_ = 0;
36 }
37 
Finalize()38 void ParallelEvacuation::Finalize()
39 {
40     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuationFinalize);
41     delete allocator_;
42     heap_->GetSweeper()->PostConcurrentSweepTasks();
43     heap_->Resume(OLD_GC);
44 }
45 
Evacuate()46 void ParallelEvacuation::Evacuate()
47 {
48     ClockScope clockScope;
49     Initialize();
50     EvacuateSpace();
51     UpdateReference();
52     Finalize();
53     heap_->GetEcmaVM()->GetEcmaGCStats()->StatisticConcurrentEvacuate(clockScope.GetPauseTime());
54 }
55 
EvacuateSpace()56 void ParallelEvacuation::EvacuateSpace()
57 {
58     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuation);
59     heap_->GetFromSpace()->EnumerateRegions([this] (Region *current) {
60         AddFragment(std::make_unique<EvacuationFragment>(this, current));
61     });
62     if (!heap_->GetOldSpace()->IsCSetEmpty()) {
63         heap_->GetOldSpace()->EnumerateCollectRegionSet([this](Region *current) {
64             AddFragment(std::make_unique<EvacuationFragment>(this, current));
65         });
66     }
67     if (heap_->IsParallelGCEnabled()) {
68         os::memory::LockHolder holder(mutex_);
69         parallel_ = CalculateEvacuationThreadNum();
70         for (int i = 0; i < parallel_; i++) {
71             Platform::GetCurrentPlatform()->PostTask(std::make_unique<EvacuationTask>(this));
72         }
73     }
74 
75     EvacuateSpace(allocator_, true);
76     WaitFinished();
77 }
78 
EvacuateSpace(TlabAllocator * allocator,bool isMain)79 bool ParallelEvacuation::EvacuateSpace(TlabAllocator *allocator, bool isMain)
80 {
81     std::unique_ptr<Fragment> region = GetFragmentSafe();
82     while (region != nullptr) {
83         EvacuateRegion(allocator, region->GetRegion());
84         region = GetFragmentSafe();
85     }
86     allocator->Finalize();
87     if (!isMain) {
88         os::memory::LockHolder holder(mutex_);
89         if (--parallel_ <= 0) {
90             condition_.SignalAll();
91         }
92     }
93     return true;
94 }
95 
EvacuateRegion(TlabAllocator * allocator,Region * region)96 void ParallelEvacuation::EvacuateRegion(TlabAllocator *allocator, Region *region)
97 {
98     bool isInOldGen = region->InOldGeneration();
99     bool isBelowAgeMark = region->BelowAgeMark();
100     size_t promotedSize = 0;
101     if (!isBelowAgeMark && !isInOldGen && IsWholeRegionEvacuate(region)) {
102         if (heap_->MoveYoungRegionSync(region)) {
103             return;
104         }
105     }
106     auto markBitmap = region->GetMarkBitmap();
107     ASSERT(markBitmap != nullptr);
108     markBitmap->IterateOverMarkedChunks([this, &region, &isInOldGen, &isBelowAgeMark,
109                                          &promotedSize, &allocator](void *mem) {
110         ASSERT(region->InRange(ToUintPtr(mem)));
111         auto header = reinterpret_cast<TaggedObject *>(mem);
112         auto klass = header->GetClass();
113         auto size = klass->SizeFromJSHClass(header);
114 
115         uintptr_t address = 0;
116         bool actualPromoted = false;
117         bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
118         if (hasAgeMark) {
119             address = allocator->Allocate(size, OLD_SPACE);
120             actualPromoted = true;
121             promotedSize += size;
122         } else if (isInOldGen) {
123             address = allocator->Allocate(size, OLD_SPACE);
124             actualPromoted = true;
125         } else {
126             address = allocator->Allocate(size, SEMI_SPACE);
127             if (address == 0) {
128                 address = allocator->Allocate(size, OLD_SPACE);
129                 actualPromoted = true;
130                 promotedSize += size;
131             }
132         }
133         LOG_IF(address == 0, FATAL, RUNTIME) << "Evacuate object failed:" << size;
134 
135         Utils::Copy(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size);
136 
137         Barriers::SetDynPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
138 #if ECMASCRIPT_ENABLE_HEAP_VERIFY
139         VerifyHeapObject(reinterpret_cast<TaggedObject *>(address));
140 #endif
141         if (actualPromoted) {
142             SetObjectFieldRSet(reinterpret_cast<TaggedObject *>(address), klass);
143         }
144     });
145     promotedSize_.fetch_add(promotedSize);
146 }
147 
VerifyHeapObject(TaggedObject * object)148 void ParallelEvacuation::VerifyHeapObject(TaggedObject *object)
149 {
150     auto klass = object->GetClass();
151     objXRay_.VisitObjectBody<GCType::OLD_GC>(object, klass,
152         [&](TaggedObject *root, ObjectSlot start, ObjectSlot end) {
153             for (ObjectSlot slot = start; slot < end; slot++) {
154                 JSTaggedValue value(slot.GetTaggedType());
155                 if (value.IsHeapObject()) {
156                     if (value.IsWeakForHeapObject()) {
157                         continue;
158                     }
159                     Region *object_region = Region::ObjectAddressToRange(value.GetTaggedObject());
160                     if (!heap_->IsFullMark() && !object_region->InYoungGeneration()) {
161                         continue;
162                     }
163                     auto rset = object_region->GetMarkBitmap();
164                     if (!rset->Test(value.GetTaggedObject())) {
165                         LOG(FATAL, RUNTIME) << "Miss mark value: " << value.GetTaggedObject()
166                                             << ", body address:" << slot.SlotAddress()
167                                             << ", header address:" << object;
168                     }
169                 }
170             }
171         });
172 }
173 
UpdateReference()174 void ParallelEvacuation::UpdateReference()
175 {
176     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
177     // Update reference pointers
178     uint32_t youngeRegionMoveCount = 0;
179     uint32_t youngeRegionCopyCount = 0;
180     uint32_t oldRegionCount = 0;
181     heap_->GetNewSpace()->EnumerateRegions([&] (Region *current) {
182         if (current->InNewToNewSet()) {
183             AddFragment(std::make_unique<UpdateAndSweepNewRegionFragment>(this, current));
184             youngeRegionMoveCount++;
185         } else {
186             AddFragment(std::make_unique<UpdateNewRegionFragment>(this, current));
187             youngeRegionCopyCount++;
188         }
189     });
190     heap_->EnumerateOldSpaceRegions([this, &oldRegionCount] (Region *current) {
191         if (current->InCollectSet()) {
192             return;
193         }
194         AddFragment(std::make_unique<UpdateRSetFragment>(this, current));
195         oldRegionCount++;
196     });
197     LOG(DEBUG, RUNTIME) << "UpdatePointers statistic: younge space region compact moving count:"
198                         << youngeRegionMoveCount
199                         << "younge space region compact coping count:" << youngeRegionCopyCount
200                         << "old space region count:" << oldRegionCount;
201 
202     if (heap_->IsParallelGCEnabled()) {
203         os::memory::LockHolder holder(mutex_);
204         parallel_ = CalculateUpdateThreadNum();
205         for (int i = 0; i < parallel_; i++) {
206             Platform::GetCurrentPlatform()->PostTask(std::make_unique<UpdateReferenceTask>(this));
207         }
208     }
209 
210     UpdateRoot();
211     UpdateWeakReference();
212     ProcessFragments(true);
213     WaitFinished();
214 }
215 
UpdateRoot()216 void ParallelEvacuation::UpdateRoot()
217 {
218     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
219     RootVisitor gcUpdateYoung = [this]([[maybe_unused]] Root type, ObjectSlot slot) {
220         UpdateObjectSlot(slot);
221     };
222     RootRangeVisitor gcUpdateRangeYoung = [this]([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) {
223         for (ObjectSlot slot = start; slot < end; slot++) {
224             UpdateObjectSlot(slot);
225         }
226     };
227 
228     objXRay_.VisitVMRoots(gcUpdateYoung, gcUpdateRangeYoung);
229 }
230 
UpdateRecordWeakReference()231 void ParallelEvacuation::UpdateRecordWeakReference()
232 {
233     auto totalThreadCount = Platform::GetCurrentPlatform()->GetTotalThreadNum() + 1;
234     for (uint32_t i = 0; i < totalThreadCount; i++) {
235         ProcessQueue *queue = heap_->GetWorkList()->GetWeakReferenceQueue(i);
236 
237         while (true) {
238             auto obj = queue->PopBack();
239             if (UNLIKELY(obj == nullptr)) {
240                 break;
241             }
242             ObjectSlot slot(ToUintPtr(obj));
243             JSTaggedValue value(slot.GetTaggedType());
244             ASSERT(value.IsWeak() || value.IsUndefined());
245             if (!value.IsUndefined()) {
246                 UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
247             }
248         }
249     }
250 }
251 
UpdateWeakReference()252 void ParallelEvacuation::UpdateWeakReference()
253 {
254     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
255     UpdateRecordWeakReference();
256     auto stringTable = heap_->GetEcmaVM()->GetEcmaStringTable();
257     bool isFullMark = heap_->IsFullMark();
258     WeakRootVisitor gcUpdateWeak = [isFullMark](TaggedObject *header) {
259         Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
260         if (objectRegion->InYoungOrCSetGeneration()) {
261             if (objectRegion->InNewToNewSet()) {
262                 auto markBitmap = objectRegion->GetMarkBitmap();
263                 if (markBitmap->Test(header)) {
264                     return header;
265                 }
266             } else {
267                 MarkWord markWord(header);
268                 if (markWord.IsForwardingAddress()) {
269                     return markWord.ToForwardingAddress();
270                 }
271             }
272             return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
273         }
274         if (isFullMark) {
275             auto markBitmap = objectRegion->GetMarkBitmap();
276             if (markBitmap == nullptr || !markBitmap->Test(header)) {
277                 return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
278             }
279         }
280         return header;
281     };
282 
283     stringTable->SweepWeakReference(gcUpdateWeak);
284     heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
285     heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
286 }
287 
UpdateRSet(Region * region)288 void ParallelEvacuation::UpdateRSet(Region *region)
289 {
290     auto rememberedSet = region->GetOldToNewRememberedSet();
291     if (LIKELY(rememberedSet != nullptr)) {
292         rememberedSet->IterateOverMarkedChunks([this, rememberedSet](void *mem) -> bool {
293             ObjectSlot slot(ToUintPtr(mem));
294             if (UpdateObjectSlot(slot)) {
295                 Region *valueRegion = Region::ObjectAddressToRange(slot.GetTaggedObjectHeader());
296                 if (!valueRegion->InYoungGeneration()) {
297                     rememberedSet->Clear(slot.SlotAddress());
298                 }
299             }
300             return true;
301         });
302     }
303     rememberedSet = region->GetCrossRegionRememberedSet();
304     if (LIKELY(rememberedSet != nullptr)) {
305         rememberedSet->IterateOverMarkedChunks([this](void *mem) -> bool {
306             ObjectSlot slot(ToUintPtr(mem));
307             UpdateObjectSlot(slot);
308             return true;
309         });
310         rememberedSet->ClearAllBits();
311     }
312 }
313 
UpdateNewRegionReference(Region * region)314 void ParallelEvacuation::UpdateNewRegionReference(Region *region)
315 {
316     Region *current = heap_->GetNewSpace()->GetCurrentRegion();
317     auto curPtr = region->GetBegin();
318     uintptr_t endPtr;
319     if (region == current) {
320         auto top = heap_->GetNewSpace()->GetTop();
321         endPtr = curPtr + region->GetAllocatedBytes(top);
322     } else {
323         endPtr = curPtr + region->GetAllocatedBytes();
324     }
325 
326     size_t objSize = 0;
327     while (curPtr < endPtr) {
328         auto freeObject = FreeObject::Cast(curPtr);
329         if (!freeObject->IsFreeObject()) {
330             auto obj = reinterpret_cast<TaggedObject *>(curPtr);
331             auto klass = obj->GetClass();
332             UpdateNewObjectField(obj, klass);
333             objSize = klass->SizeFromJSHClass(obj);
334         } else {
335             objSize = freeObject->Available();
336         }
337         curPtr += objSize;
338         CHECK_OBJECT_SIZE(objSize);
339     }
340     CHECK_REGION_END(curPtr, endPtr);
341 }
342 
UpdateAndSweepNewRegionReference(Region * region)343 void ParallelEvacuation::UpdateAndSweepNewRegionReference(Region *region)
344 {
345     auto markBitmap = region->GetMarkBitmap();
346     uintptr_t freeStart = region->GetBegin();
347     uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
348     if (markBitmap != nullptr) {
349         markBitmap->IterateOverMarkedChunks([this, &region, &freeStart](void *mem) {
350             ASSERT(region->InRange(ToUintPtr(mem)));
351             auto header = reinterpret_cast<TaggedObject *>(mem);
352             JSHClass *klass = header->GetClass();
353             UpdateNewObjectField(header, klass);
354 
355             uintptr_t freeEnd = ToUintPtr(mem);
356             if (freeStart != freeEnd) {
357                 size_t freeSize = freeEnd - freeStart;
358                 FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeSize);
359                 SemiSpace *toSpace = const_cast<SemiSpace *>(heap_->GetNewSpace());
360                 toSpace->DecrementSurvivalObjectSize(freeSize);
361             }
362 
363             freeStart = freeEnd + klass->SizeFromJSHClass(header);
364         });
365     }
366     CHECK_REGION_END(freeStart, freeEnd);
367     if (freeStart < freeEnd) {
368         FreeObject::FillFreeObject(heap_->GetEcmaVM(), freeStart, freeEnd - freeStart);
369     }
370 }
371 
UpdateNewObjectField(TaggedObject * object,JSHClass * cls)372 void ParallelEvacuation::UpdateNewObjectField(TaggedObject *object, JSHClass *cls)
373 {
374     objXRay_.VisitObjectBody<GCType::OLD_GC>(object, cls,
375         [this](TaggedObject *root, ObjectSlot start, ObjectSlot end) {
376             for (ObjectSlot slot = start; slot < end; slot++) {
377                 UpdateObjectSlot(slot);
378             }
379         });
380 }
381 
WaitFinished()382 void ParallelEvacuation::WaitFinished()
383 {
384     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
385     if (parallel_ > 0) {
386         os::memory::LockHolder holder(mutex_);
387         while (parallel_ > 0) {
388             condition_.Wait(&mutex_);
389         }
390     }
391 }
392 
ProcessFragments(bool isMain)393 bool ParallelEvacuation::ProcessFragments(bool isMain)
394 {
395     std::unique_ptr<Fragment> region = GetFragmentSafe();
396     while (region != nullptr) {
397         region->Process(isMain);
398         region = GetFragmentSafe();
399     }
400     if (!isMain) {
401         os::memory::LockHolder holder(mutex_);
402         if (--parallel_ <= 0) {
403             condition_.SignalAll();
404         }
405     }
406     return true;
407 }
408 
EvacuationTask(ParallelEvacuation * evacuation)409 ParallelEvacuation::EvacuationTask::EvacuationTask(ParallelEvacuation *evacuation)
410     : evacuation_(evacuation)
411 {
412     allocator_ = new TlabAllocator(evacuation->heap_);
413 }
414 
~EvacuationTask()415 ParallelEvacuation::EvacuationTask::~EvacuationTask()
416 {
417     delete allocator_;
418 }
419 
Run(uint32_t threadIndex)420 bool ParallelEvacuation::EvacuationTask::Run(uint32_t threadIndex)
421 {
422     return evacuation_->EvacuateSpace(allocator_);
423 }
424 
Run(uint32_t threadIndex)425 bool ParallelEvacuation::UpdateReferenceTask::Run(uint32_t threadIndex)
426 {
427     evacuation_->ProcessFragments(false);
428     return true;
429 }
430 
Process(bool isMain)431 bool ParallelEvacuation::EvacuationFragment::Process(bool isMain)
432 {
433     return true;
434 }
435 
Process(bool isMain)436 bool ParallelEvacuation::UpdateRSetFragment::Process(bool isMain)
437 {
438     GetEvacuation()->UpdateRSet(GetRegion());
439     return true;
440 }
441 
Process(bool isMain)442 bool ParallelEvacuation::UpdateNewRegionFragment::Process(bool isMain)
443 {
444     GetEvacuation()->UpdateNewRegionReference(GetRegion());
445     return true;
446 }
447 
Process(bool isMain)448 bool ParallelEvacuation::UpdateAndSweepNewRegionFragment::Process(bool isMain)
449 {
450     GetEvacuation()->UpdateAndSweepNewRegionReference(GetRegion());
451     return true;
452 }
453 }  // namespace panda::ecmascript
454