• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_PARALLEL_EVACUATOR_INL_H
17 #define ECMASCRIPT_MEM_PARALLEL_EVACUATOR_INL_H
18 
19 #include "ecmascript/mem/parallel_evacuator.h"
20 
21 #include "common_components/taskpool/taskpool.h"
22 #include "ecmascript/mem/heap.h"
23 #include "ecmascript/mem/mark_word.h"
24 #include "ecmascript/mem/region-inl.h"
25 
26 namespace panda::ecmascript {
ParallelEvacuator(Heap * heap)27 ParallelEvacuator::ParallelEvacuator(Heap *heap) : heap_(heap), updateRootVisitor_(this),
28     setObjectFieldRSetVisitor_(this) {}
29 
30 // Move regions with a survival rate of more than 75% to new space
31 // Move regions when young space overshoot size is larger than max capacity.
SelectRegionEvacuateType(Region * region)32 RegionEvacuateType ParallelEvacuator::SelectRegionEvacuateType(Region *region)
33 {
34     double aliveRate = static_cast<double>(region->AliveObject()) / region->GetSize();
35     if (UNLIKELY(region->HasAgeMark())) {
36         return RegionEvacuateType::OBJECT_EVACUATE;
37     } else if (region->BelowAgeMark()) {
38         if (aliveRate >= MIN_OBJECT_SURVIVAL_RATE) {
39             return RegionEvacuateType::REGION_NEW_TO_OLD;
40         }
41         return RegionEvacuateType::OBJECT_EVACUATE;
42     }
43     if (aliveRate >= MIN_OBJECT_SURVIVAL_RATE || heap_->GetFromSpaceDuringEvacuation()->CommittedSizeIsLarge()) {
44         return RegionEvacuateType::REGION_NEW_TO_NEW;
45     }
46     return RegionEvacuateType::OBJECT_EVACUATE;
47 }
48 
CompensateOvershootSizeIfHighAliveRate(Region * region)49 void ParallelEvacuator::CompensateOvershootSizeIfHighAliveRate(Region* region)
50 {
51     double aliveRate = static_cast<double>(region->AliveObject()) / region->GetSize();
52     if (region->IsFreshRegion() || aliveRate >= STRICT_OBJECT_SURVIVAL_RATE) {
53         size_t compensateSize = static_cast<size_t>(region->GetCapacity() * (1.0 - HPPGC_NEWSPACE_SIZE_RATIO));
54         heap_->GetNewSpace()->AddOverShootSize(compensateSize);
55     }
56 }
57 
TryWholeRegionEvacuate(Region * region,RegionEvacuateType type)58 bool ParallelEvacuator::TryWholeRegionEvacuate(Region *region, RegionEvacuateType type)
59 {
60     switch (type) {
61         case RegionEvacuateType::REGION_NEW_TO_NEW:
62             CompensateOvershootSizeIfHighAliveRate(region);
63             return heap_->MoveYoungRegion(region);
64         case RegionEvacuateType::REGION_NEW_TO_OLD:
65             return heap_->MoveYoungRegionToOld(region);
66         default:
67             return false;
68     }
69 }
70 
UpdateForwardedOldToNewObjectSlot(TaggedObject * object,ObjectSlot & slot,bool isWeak)71 bool ParallelEvacuator::UpdateForwardedOldToNewObjectSlot(TaggedObject *object, ObjectSlot &slot, bool isWeak)
72 {
73     MarkWord markWord(object);
74     if (markWord.IsForwardingAddress()) {
75         TaggedObject *dst = markWord.ToForwardingAddress();
76         if (isWeak) {
77             dst = JSTaggedValue(dst).CreateAndGetWeakRef().GetRawTaggedObject();
78         }
79         slot.Update(dst);
80         Region *dstRegion = Region::ObjectAddressToRange(dst);
81         // Keep oldToNewRSet when object is YoungSpace
82         if (dstRegion->InYoungSpace()) {
83             return true;
84         }
85     } else if (isWeak) {
86         slot.Clear();
87     }
88     return false;
89 }
90 
UpdateOldToNewObjectSlot(ObjectSlot & slot)91 bool ParallelEvacuator::UpdateOldToNewObjectSlot(ObjectSlot &slot)
92 {
93     JSTaggedValue value(slot.GetTaggedType());
94     if (!value.IsHeapObject()) {
95         return false;
96     }
97     TaggedObject *object = value.GetHeapObject();
98     Region *valueRegion = Region::ObjectAddressToRange(object);
99     // It is only update old to new object when iterate OldToNewRSet
100     if (valueRegion->InYoungSpace()) {
101         if (!valueRegion->InNewToNewSet()) {
102             return UpdateForwardedOldToNewObjectSlot(object, slot, value.IsWeakForHeapObject());
103         }
104         // move region from fromspace to tospace
105         if (valueRegion->Test(object)) {
106             return true;
107         }
108         if (value.IsWeakForHeapObject()) {
109             slot.Clear();
110         }
111     } else if (valueRegion->InNewToOldSet()) {
112         if (value.IsWeakForHeapObject() && !valueRegion->Test(object)) {
113             slot.Clear();
114         }
115     }
116     return false;
117 }
118 
UpdateRootVisitor(ParallelEvacuator * evacuator)119 ParallelEvacuator::UpdateRootVisitor::UpdateRootVisitor(ParallelEvacuator *evacuator) : evacuator_(evacuator) {}
120 
VisitRoot(Root type,ObjectSlot slot)121 void ParallelEvacuator::UpdateRootVisitor::VisitRoot([[maybe_unused]] Root type, ObjectSlot slot)
122 {
123     evacuator_->UpdateObjectSlot(slot);
124 }
125 
VisitRangeRoot(Root type,ObjectSlot start,ObjectSlot end)126 void ParallelEvacuator::UpdateRootVisitor::VisitRangeRoot([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end)
127 {
128     for (ObjectSlot slot = start; slot < end; slot++) {
129         evacuator_->UpdateObjectSlot(slot);
130     }
131 }
132 
VisitBaseAndDerivedRoot(Root type,ObjectSlot base,ObjectSlot derived,uintptr_t baseOldObject)133 void ParallelEvacuator::UpdateRootVisitor::VisitBaseAndDerivedRoot([[maybe_unused]] Root type, ObjectSlot base,
134                                                                    ObjectSlot derived, uintptr_t baseOldObject)
135 {
136     if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
137         derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
138     }
139 }
140 
UpdateObjectSlot(ObjectSlot & slot)141 void ParallelEvacuator::UpdateObjectSlot(ObjectSlot &slot)
142 {
143     JSTaggedValue value(slot.GetTaggedType());
144     if (value.IsHeapObject()) {
145         if (value.IsInSharedHeap()) {
146             return;
147         }
148         if (value.IsWeakForHeapObject()) {
149             return UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
150         }
151         TaggedObject *object = value.GetTaggedObject();
152         MarkWord markWord(object);
153         if (markWord.IsForwardingAddress()) {
154             TaggedObject *dst = markWord.ToForwardingAddress();
155             slot.Update(dst);
156         }
157     }
158 }
159 
UpdateWeakObjectSlot(TaggedObject * value,ObjectSlot & slot)160 void ParallelEvacuator::UpdateWeakObjectSlot(TaggedObject *value, ObjectSlot &slot)
161 {
162     Region *objectRegion = Region::ObjectAddressToRange(value);
163     if (objectRegion->InSharedHeap()) {
164         return;
165     }
166 
167     TaggedObject *dst = UpdateAddressAfterEvacation(value);
168     if (dst == value) {
169         return;
170     }
171     if (dst == nullptr) {
172         slot.Clear();
173         return;
174     }
175     auto weakRef = JSTaggedValue(dst).CreateAndGetWeakRef().GetRawTaggedObject();
176     slot.Update(weakRef);
177 }
178 
179 template<TriggerGCType gcType, bool needUpdateLocalToShare>
UpdateNewObjectSlot(ObjectSlot & slot)180 void ParallelEvacuator::UpdateNewObjectSlot(ObjectSlot &slot)
181 {
182     JSTaggedValue value(slot.GetTaggedType());
183     if (value.IsHeapObject()) {
184         Region *objectRegion = Region::ObjectAddressToRange(value.GetRawData());
185         ASSERT(objectRegion != nullptr);
186         if constexpr (needUpdateLocalToShare == true) {
187             if (objectRegion->InSharedSweepableSpace()) {
188                 Region *rootRegion = Region::ObjectAddressToRange(slot.SlotAddress());
189                 rootRegion->InsertLocalToShareRSet(slot.SlotAddress());
190                 return;
191             }
192         }
193         if constexpr (gcType == TriggerGCType::YOUNG_GC) {
194             if (!objectRegion->InYoungSpace()) {
195                 if (value.IsWeakForHeapObject() && objectRegion->InNewToOldSet() &&
196                     !objectRegion->Test(value.GetRawData())) {
197                     slot.Clear();
198                 }
199                 return;
200             }
201         } else if constexpr (gcType == TriggerGCType::OLD_GC) {
202             if (!objectRegion->InYoungSpaceOrCSet()) {
203                 if (value.IsWeakForHeapObject() && !objectRegion->InSharedHeap() &&
204                         (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(value.GetRawData()))) {
205                     slot.Clear();
206                 }
207                 return;
208             }
209         } else {
210             LOG_GC(FATAL) << "UpdateNewObjectSlot: not support gcType yet";
211             UNREACHABLE();
212         }
213         if (objectRegion->InNewToNewSet()) {
214             if (value.IsWeakForHeapObject() && !objectRegion->Test(value.GetRawData())) {
215                 slot.Clear();
216             }
217             return;
218         }
219         UpdateObjectSlotValue(value, slot);
220     }
221 }
222 
UpdateCrossRegionObjectSlot(ObjectSlot & slot)223 void ParallelEvacuator::UpdateCrossRegionObjectSlot(ObjectSlot &slot)
224 {
225     JSTaggedValue value(slot.GetTaggedType());
226     if (value.IsHeapObject()) {
227         Region *objectRegion = Region::ObjectAddressToRange(value.GetRawData());
228         ASSERT(objectRegion != nullptr);
229         if (objectRegion->InCollectSet()) {
230             UpdateObjectSlotValue(value, slot);
231         }
232     }
233 }
234 
UpdateObjectSlotValue(JSTaggedValue value,ObjectSlot & slot)235 void ParallelEvacuator::UpdateObjectSlotValue(JSTaggedValue value, ObjectSlot &slot)
236 {
237     if (value.IsWeakForHeapObject()) {
238         MarkWord markWord(value.GetWeakReferent());
239         if (markWord.IsForwardingAddress()) {
240             auto dst = static_cast<JSTaggedType>(ToUintPtr(markWord.ToForwardingAddress()));
241             slot.Update(JSTaggedValue(dst).CreateAndGetWeakRef().GetRawData());
242         } else {
243             slot.Clear();
244         }
245     } else {
246         MarkWord markWord(value.GetTaggedObject());
247         if (markWord.IsForwardingAddress()) {
248             auto dst = reinterpret_cast<JSTaggedType>(markWord.ToForwardingAddress());
249             slot.Update(dst);
250         }
251     }
252 }
253 
SetObjectFieldRSetVisitor(ParallelEvacuator * evacuator)254 ParallelEvacuator::SetObjectFieldRSetVisitor::SetObjectFieldRSetVisitor(ParallelEvacuator *evacuator)
255     : evacuator_(evacuator) {}
256 
VisitObjectRangeImpl(BaseObject * root,uintptr_t startAddr,uintptr_t endAddr,VisitObjectArea area)257 void ParallelEvacuator::SetObjectFieldRSetVisitor::VisitObjectRangeImpl(BaseObject *root, uintptr_t startAddr,
258     uintptr_t endAddr, VisitObjectArea area)
259 {
260     JSThread *thread = evacuator_->heap_->GetJSThread();
261     Region *rootRegion = Region::ObjectAddressToRange(root);
262     ObjectSlot start(startAddr);
263     ObjectSlot end(endAddr);
264     if (UNLIKELY(area == VisitObjectArea::IN_OBJECT)) {
265         JSHClass *hclass = TaggedObject::Cast(root)->GetClass();
266         ASSERT(!hclass->IsAllTaggedProp());
267         int index = 0;
268         TaggedObject *dst = hclass->GetLayout(thread).GetTaggedObject();
269         LayoutInfo *layout = LayoutInfo::UncheckCast(dst);
270         ObjectSlot realEnd = start;
271         realEnd += layout->GetPropertiesCapacity();
272         end = end > realEnd ? realEnd : end;
273         for (ObjectSlot slot = start; slot < end; slot++) {
274             auto attr = layout->GetAttr(thread, index++);
275             if (attr.IsTaggedRep()) {
276                 evacuator_->SetObjectRSet(slot, rootRegion);
277             }
278         }
279         return;
280     }
281     for (ObjectSlot slot = start; slot < end; slot++) {
282         evacuator_->SetObjectRSet(slot, rootRegion);
283     };
284 }
285 
SetObjectFieldRSet(TaggedObject * object,JSHClass * cls)286 void ParallelEvacuator::SetObjectFieldRSet(TaggedObject *object, JSHClass *cls)
287 {
288     ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls, setObjectFieldRSetVisitor_);
289 }
290 
SetObjectRSet(ObjectSlot slot,Region * region)291 void ParallelEvacuator::SetObjectRSet(ObjectSlot slot, Region *region)
292 {
293     JSTaggedType value = slot.GetTaggedType();
294     if (!JSTaggedValue(value).IsHeapObject()) {
295         return;
296     }
297     Region *valueRegion = Region::ObjectAddressToRange(value);
298     if (valueRegion->InYoungSpace()) {
299         region->InsertOldToNewRSet(slot.SlotAddress());
300     } else if (valueRegion->InNewToOldSet()) {
301         if (JSTaggedValue(value).IsWeakForHeapObject() && !valueRegion->Test(value)) {
302             slot.Clear();
303         }
304     } else if (valueRegion->InSharedSweepableSpace()) {
305         region->InsertLocalToShareRSet(slot.SlotAddress());
306     } else if (valueRegion->InCollectSet()) {
307         region->InsertCrossRegionRSet(slot.SlotAddress());
308     } else if (JSTaggedValue(value).IsWeakForHeapObject()) {
309         if (heap_->IsConcurrentFullMark() && !valueRegion->InSharedHeap() &&
310                 (valueRegion->GetMarkGCBitset() == nullptr || !valueRegion->Test(value))) {
311             slot.Clear();
312         }
313     }
314 }
315 
TryAcquire()316 bool ParallelEvacuator::AcquireItem::TryAcquire()
317 {
318     return acquire_.exchange(true, std::memory_order_relaxed) == false;
319 }
320 
Add(std::unique_ptr<Workload> workload)321 void ParallelEvacuator::WorkloadSet::Add(std::unique_ptr<Workload> workload)
322 {
323     workloads_.emplace_back(AcquireItem{}, std::move(workload));
324 }
325 
HasRemaningWorkload()326 bool ParallelEvacuator::WorkloadSet::HasRemaningWorkload() const
327 {
328     return remainingWorkloadNum_.load(std::memory_order_relaxed) > 0;
329 }
330 
FetchSubAndCheckWorkloadCount(size_t finishedCount)331 bool ParallelEvacuator::WorkloadSet::FetchSubAndCheckWorkloadCount(size_t finishedCount)
332 {
333     return remainingWorkloadNum_.fetch_sub(finishedCount, std::memory_order_relaxed) == finishedCount;
334 }
335 
UpdateAddressAfterEvacation(TaggedObject * oldAddress)336 TaggedObject* ParallelEvacuator::UpdateAddressAfterEvacation(TaggedObject *oldAddress)
337 {
338     Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(oldAddress));
339     if (!objectRegion) {
340         return nullptr;
341     }
342     if (objectRegion->InYoungSpaceOrCSet()) {
343         if (objectRegion->InNewToNewSet()) {
344             if (objectRegion->Test(oldAddress)) {
345                 return oldAddress;
346             }
347         } else {
348             MarkWord markWord(oldAddress);
349             if (markWord.IsForwardingAddress()) {
350                 return markWord.ToForwardingAddress();
351             }
352         }
353         return nullptr;
354     } else if (objectRegion->InNewToOldSet() && !objectRegion->Test(oldAddress)) {
355         return nullptr;
356     }
357     if (heap_->IsConcurrentFullMark()) {
358         if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(oldAddress)) {
359             return nullptr;
360         }
361     }
362     return oldAddress;
363 }
364 
CalculateEvacuationThreadNum()365 int ParallelEvacuator::CalculateEvacuationThreadNum()
366 {
367     uint32_t count = evacuateWorkloadSet_.GetWorkloadCount();
368     uint32_t regionPerThread = 8;
369     uint32_t maxThreadNum = std::min(heap_->GetMaxEvacuateTaskCount(),
370         common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum());
371     return static_cast<int>(std::min(std::max(1U, count / regionPerThread), maxThreadNum));
372 }
373 
CalculateUpdateThreadNum()374 int ParallelEvacuator::CalculateUpdateThreadNum()
375 {
376     uint32_t count = updateWorkloadSet_.GetWorkloadCount();
377     double regionPerThread = 1.0 / 4;
378     count = std::pow(count, regionPerThread);
379     uint32_t maxThreadNum = std::min(heap_->GetMaxEvacuateTaskCount(),
380         common::Taskpool::GetCurrentTaskpool()->GetTotalThreadNum());
381     return static_cast<int>(std::min(std::max(1U, count), maxThreadNum));
382 }
383 
GetWorkloadCount()384 size_t ParallelEvacuator::WorkloadSet::GetWorkloadCount() const
385 {
386     return workloads_.size();
387 }
388 
389 }  // namespace panda::ecmascript
390 #endif  // ECMASCRIPT_MEM_PARALLEL_EVACUATOR_INL_H
391