• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_PARALLEL_EVACUATOR_INL_H
17 #define ECMASCRIPT_MEM_PARALLEL_EVACUATOR_INL_H
18 
19 #include "ecmascript/mem/parallel_evacuator.h"
20 
21 #include "ecmascript/mem/heap.h"
22 #include "ecmascript/mem/mark_word.h"
23 #include "ecmascript/mem/region-inl.h"
24 #include "ecmascript/taskpool/taskpool.h"
25 
26 namespace panda::ecmascript {
ParallelEvacuator(Heap * heap)27 ParallelEvacuator::ParallelEvacuator(Heap *heap) : heap_(heap), updateRootVisitor_(this),
28     setObjectFieldRSetVisitor_(this) {}
29 
30 // Move regions with a survival rate of more than 75% to new space
31 // Move regions when young space overshoot size is larger than max capacity.
SelectRegionEvacuateType(Region * region)32 RegionEvacuateType ParallelEvacuator::SelectRegionEvacuateType(Region *region)
33 {
34     double aliveRate = static_cast<double>(region->AliveObject()) / region->GetSize();
35     if (UNLIKELY(region->HasAgeMark())) {
36         return RegionEvacuateType::OBJECT_EVACUATE;
37     } else if (region->BelowAgeMark()) {
38         if (aliveRate >= MIN_OBJECT_SURVIVAL_RATE) {
39             return RegionEvacuateType::REGION_NEW_TO_OLD;
40         }
41         return RegionEvacuateType::OBJECT_EVACUATE;
42     }
43     if (aliveRate >= MIN_OBJECT_SURVIVAL_RATE || heap_->GetFromSpaceDuringEvacuation()->CommittedSizeIsLarge()) {
44         return RegionEvacuateType::REGION_NEW_TO_NEW;
45     }
46     return RegionEvacuateType::OBJECT_EVACUATE;
47 }
48 
CompensateOvershootSizeIfHighAliveRate(Region * region)49 void ParallelEvacuator::CompensateOvershootSizeIfHighAliveRate(Region* region)
50 {
51     double aliveRate = static_cast<double>(region->AliveObject()) / region->GetSize();
52     if (region->IsFreshRegion() || aliveRate >= STRICT_OBJECT_SURVIVAL_RATE) {
53         size_t compensateSize = static_cast<size_t>(region->GetCapacity() * (1.0 - HPPGC_NEWSPACE_SIZE_RATIO));
54         heap_->GetNewSpace()->AddOverShootSize(compensateSize);
55     }
56 }
57 
TryWholeRegionEvacuate(Region * region,RegionEvacuateType type)58 bool ParallelEvacuator::TryWholeRegionEvacuate(Region *region, RegionEvacuateType type)
59 {
60     switch (type) {
61         case RegionEvacuateType::REGION_NEW_TO_NEW:
62             CompensateOvershootSizeIfHighAliveRate(region);
63             return heap_->MoveYoungRegion(region);
64         case RegionEvacuateType::REGION_NEW_TO_OLD:
65             return heap_->MoveYoungRegionToOld(region);
66         default:
67             return false;
68     }
69 }
70 
UpdateForwardedOldToNewObjectSlot(TaggedObject * object,ObjectSlot & slot,bool isWeak)71 bool ParallelEvacuator::UpdateForwardedOldToNewObjectSlot(TaggedObject *object, ObjectSlot &slot, bool isWeak)
72 {
73     MarkWord markWord(object);
74     if (markWord.IsForwardingAddress()) {
75         TaggedObject *dst = markWord.ToForwardingAddress();
76         if (isWeak) {
77             dst = JSTaggedValue(dst).CreateAndGetWeakRef().GetRawTaggedObject();
78         }
79         slot.Update(dst);
80         Region *dstRegion = Region::ObjectAddressToRange(dst);
81         // Keep oldToNewRSet when object is YoungSpace
82         if (dstRegion->InYoungSpace()) {
83             return true;
84         }
85     } else if (isWeak) {
86         slot.Clear();
87     }
88     return false;
89 }
90 
UpdateOldToNewObjectSlot(ObjectSlot & slot)91 bool ParallelEvacuator::UpdateOldToNewObjectSlot(ObjectSlot &slot)
92 {
93     JSTaggedValue value(slot.GetTaggedType());
94     if (!value.IsHeapObject()) {
95         return false;
96     }
97     TaggedObject *object = value.GetHeapObject();
98     Region *valueRegion = Region::ObjectAddressToRange(object);
99     // It is only update old to new object when iterate OldToNewRSet
100     if (valueRegion->InYoungSpace()) {
101         if (!valueRegion->InNewToNewSet()) {
102             return UpdateForwardedOldToNewObjectSlot(object, slot, value.IsWeakForHeapObject());
103         }
104         // move region from fromspace to tospace
105         if (valueRegion->Test(object)) {
106             return true;
107         }
108         if (value.IsWeakForHeapObject()) {
109             slot.Clear();
110         }
111     } else if (valueRegion->InNewToOldSet()) {
112         if (value.IsWeakForHeapObject() && !valueRegion->Test(object)) {
113             slot.Clear();
114         }
115     }
116     return false;
117 }
118 
UpdateRootVisitor(ParallelEvacuator * evacuator)119 ParallelEvacuator::UpdateRootVisitor::UpdateRootVisitor(ParallelEvacuator *evacuator) : evacuator_(evacuator) {}
120 
VisitRoot(Root type,ObjectSlot slot)121 void ParallelEvacuator::UpdateRootVisitor::VisitRoot([[maybe_unused]] Root type, ObjectSlot slot)
122 {
123     evacuator_->UpdateObjectSlot(slot);
124 }
125 
VisitRangeRoot(Root type,ObjectSlot start,ObjectSlot end)126 void ParallelEvacuator::UpdateRootVisitor::VisitRangeRoot([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end)
127 {
128     for (ObjectSlot slot = start; slot < end; slot++) {
129         evacuator_->UpdateObjectSlot(slot);
130     }
131 }
132 
VisitBaseAndDerivedRoot(Root type,ObjectSlot base,ObjectSlot derived,uintptr_t baseOldObject)133 void ParallelEvacuator::UpdateRootVisitor::VisitBaseAndDerivedRoot([[maybe_unused]] Root type, ObjectSlot base,
134                                                                    ObjectSlot derived, uintptr_t baseOldObject)
135 {
136     if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
137         derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
138     }
139 }
140 
UpdateObjectSlot(ObjectSlot & slot)141 void ParallelEvacuator::UpdateObjectSlot(ObjectSlot &slot)
142 {
143     JSTaggedValue value(slot.GetTaggedType());
144     if (value.IsHeapObject()) {
145         if (value.IsInSharedHeap()) {
146             return;
147         }
148         if (value.IsWeakForHeapObject()) {
149             return UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
150         }
151         TaggedObject *object = value.GetTaggedObject();
152         MarkWord markWord(object);
153         if (markWord.IsForwardingAddress()) {
154             TaggedObject *dst = markWord.ToForwardingAddress();
155             slot.Update(dst);
156         }
157     }
158 }
159 
UpdateWeakObjectSlot(TaggedObject * value,ObjectSlot & slot)160 void ParallelEvacuator::UpdateWeakObjectSlot(TaggedObject *value, ObjectSlot &slot)
161 {
162     Region *objectRegion = Region::ObjectAddressToRange(value);
163     if (objectRegion->InSharedHeap()) {
164         return;
165     }
166 
167     TaggedObject *dst = UpdateAddressAfterEvacation(value);
168     if (dst == value) {
169         return;
170     }
171     if (dst == nullptr) {
172         slot.Clear();
173         return;
174     }
175     auto weakRef = JSTaggedValue(dst).CreateAndGetWeakRef().GetRawTaggedObject();
176     slot.Update(weakRef);
177 }
178 
179 template<TriggerGCType gcType, bool needUpdateLocalToShare>
UpdateNewObjectSlot(ObjectSlot & slot)180 void ParallelEvacuator::UpdateNewObjectSlot(ObjectSlot &slot)
181 {
182     JSTaggedValue value(slot.GetTaggedType());
183     if (value.IsHeapObject()) {
184         Region *objectRegion = Region::ObjectAddressToRange(value.GetRawData());
185         ASSERT(objectRegion != nullptr);
186         if constexpr (needUpdateLocalToShare == true) {
187             if (objectRegion->InSharedSweepableSpace()) {
188                 Region *rootRegion = Region::ObjectAddressToRange(slot.SlotAddress());
189                 rootRegion->InsertLocalToShareRSet(slot.SlotAddress());
190                 return;
191             }
192         }
193         if constexpr (gcType == TriggerGCType::YOUNG_GC) {
194             if (!objectRegion->InYoungSpace()) {
195                 if (value.IsWeakForHeapObject() && objectRegion->InNewToOldSet() &&
196                     !objectRegion->Test(value.GetRawData())) {
197                     slot.Clear();
198                 }
199                 return;
200             }
201         } else if constexpr (gcType == TriggerGCType::OLD_GC) {
202             if (!objectRegion->InYoungSpaceOrCSet()) {
203                 if (value.IsWeakForHeapObject() && !objectRegion->InSharedHeap() &&
204                         (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(value.GetRawData()))) {
205                     slot.Clear();
206                 }
207                 return;
208             }
209         } else {
210             LOG_GC(FATAL) << "UpdateNewObjectSlot: not support gcType yet";
211             UNREACHABLE();
212         }
213         if (objectRegion->InNewToNewSet()) {
214             if (value.IsWeakForHeapObject() && !objectRegion->Test(value.GetRawData())) {
215                 slot.Clear();
216             }
217             return;
218         }
219         UpdateObjectSlotValue(value, slot);
220     }
221 }
222 
UpdateCrossRegionObjectSlot(ObjectSlot & slot)223 void ParallelEvacuator::UpdateCrossRegionObjectSlot(ObjectSlot &slot)
224 {
225     JSTaggedValue value(slot.GetTaggedType());
226     if (value.IsHeapObject()) {
227         Region *objectRegion = Region::ObjectAddressToRange(value.GetRawData());
228         ASSERT(objectRegion != nullptr);
229         if (objectRegion->InCollectSet()) {
230             UpdateObjectSlotValue(value, slot);
231         }
232     }
233 }
234 
UpdateObjectSlotValue(JSTaggedValue value,ObjectSlot & slot)235 void ParallelEvacuator::UpdateObjectSlotValue(JSTaggedValue value, ObjectSlot &slot)
236 {
237     if (value.IsWeakForHeapObject()) {
238         MarkWord markWord(value.GetWeakReferent());
239         if (markWord.IsForwardingAddress()) {
240             auto dst = static_cast<JSTaggedType>(ToUintPtr(markWord.ToForwardingAddress()));
241             slot.Update(JSTaggedValue(dst).CreateAndGetWeakRef().GetRawData());
242         } else {
243             slot.Clear();
244         }
245     } else {
246         MarkWord markWord(value.GetTaggedObject());
247         if (markWord.IsForwardingAddress()) {
248             auto dst = reinterpret_cast<JSTaggedType>(markWord.ToForwardingAddress());
249             slot.Update(dst);
250         }
251     }
252 }
253 
SetObjectFieldRSetVisitor(ParallelEvacuator * evacuator)254 ParallelEvacuator::SetObjectFieldRSetVisitor::SetObjectFieldRSetVisitor(ParallelEvacuator *evacuator)
255     : evacuator_(evacuator) {}
256 
VisitObjectRangeImpl(TaggedObject * root,ObjectSlot start,ObjectSlot end,VisitObjectArea area)257 void ParallelEvacuator::SetObjectFieldRSetVisitor::VisitObjectRangeImpl(TaggedObject *root, ObjectSlot start,
258     ObjectSlot end, VisitObjectArea area)
259 {
260     Region *rootRegion = Region::ObjectAddressToRange(root);
261     if (UNLIKELY(area == VisitObjectArea::IN_OBJECT)) {
262         JSHClass *hclass = root->GetClass();
263         ASSERT(!hclass->IsAllTaggedProp());
264         int index = 0;
265         TaggedObject *dst = hclass->GetLayout().GetTaggedObject();
266         LayoutInfo *layout = LayoutInfo::UncheckCast(dst);
267         ObjectSlot realEnd = start;
268         realEnd += layout->GetPropertiesCapacity();
269         end = end > realEnd ? realEnd : end;
270         for (ObjectSlot slot = start; slot < end; slot++) {
271             auto attr = layout->GetAttr(index++);
272             if (attr.IsTaggedRep()) {
273                 evacuator_->SetObjectRSet(slot, rootRegion);
274             }
275         }
276         return;
277     }
278     for (ObjectSlot slot = start; slot < end; slot++) {
279         evacuator_->SetObjectRSet(slot, rootRegion);
280     };
281 }
282 
SetObjectFieldRSet(TaggedObject * object,JSHClass * cls)283 void ParallelEvacuator::SetObjectFieldRSet(TaggedObject *object, JSHClass *cls)
284 {
285     ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls, setObjectFieldRSetVisitor_);
286 }
287 
SetObjectRSet(ObjectSlot slot,Region * region)288 void ParallelEvacuator::SetObjectRSet(ObjectSlot slot, Region *region)
289 {
290     JSTaggedType value = slot.GetTaggedType();
291     if (!JSTaggedValue(value).IsHeapObject()) {
292         return;
293     }
294     Region *valueRegion = Region::ObjectAddressToRange(value);
295     if (valueRegion->InYoungSpace()) {
296         region->InsertOldToNewRSet(slot.SlotAddress());
297     } else if (valueRegion->InNewToOldSet()) {
298         if (JSTaggedValue(value).IsWeakForHeapObject() && !valueRegion->Test(value)) {
299             slot.Clear();
300         }
301     } else if (valueRegion->InSharedSweepableSpace()) {
302         region->InsertLocalToShareRSet(slot.SlotAddress());
303     } else if (valueRegion->InCollectSet()) {
304         region->InsertCrossRegionRSet(slot.SlotAddress());
305     } else if (JSTaggedValue(value).IsWeakForHeapObject()) {
306         if (heap_->IsConcurrentFullMark() && !valueRegion->InSharedHeap() &&
307                 (valueRegion->GetMarkGCBitset() == nullptr || !valueRegion->Test(value))) {
308             slot.Clear();
309         }
310     }
311 }
312 
TryAcquire()313 bool ParallelEvacuator::AcquireItem::TryAcquire()
314 {
315     return acquire_.exchange(true, std::memory_order_relaxed) == false;
316 }
317 
Add(std::unique_ptr<Workload> workload)318 void ParallelEvacuator::WorkloadSet::Add(std::unique_ptr<Workload> workload)
319 {
320     workloads_.emplace_back(AcquireItem{}, std::move(workload));
321 }
322 
HasRemaningWorkload()323 bool ParallelEvacuator::WorkloadSet::HasRemaningWorkload() const
324 {
325     return remainingWorkloadNum_.load(std::memory_order_relaxed) > 0;
326 }
327 
FetchSubAndCheckWorkloadCount(size_t finishedCount)328 bool ParallelEvacuator::WorkloadSet::FetchSubAndCheckWorkloadCount(size_t finishedCount)
329 {
330     return remainingWorkloadNum_.fetch_sub(finishedCount, std::memory_order_relaxed) == finishedCount;
331 }
332 
UpdateAddressAfterEvacation(TaggedObject * oldAddress)333 TaggedObject* ParallelEvacuator::UpdateAddressAfterEvacation(TaggedObject *oldAddress)
334 {
335     Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(oldAddress));
336     if (!objectRegion) {
337         return nullptr;
338     }
339     if (objectRegion->InYoungSpaceOrCSet()) {
340         if (objectRegion->InNewToNewSet()) {
341             if (objectRegion->Test(oldAddress)) {
342                 return oldAddress;
343             }
344         } else {
345             MarkWord markWord(oldAddress);
346             if (markWord.IsForwardingAddress()) {
347                 return markWord.ToForwardingAddress();
348             }
349         }
350         return nullptr;
351     } else if (objectRegion->InNewToOldSet() && !objectRegion->Test(oldAddress)) {
352         return nullptr;
353     }
354     if (heap_->IsConcurrentFullMark()) {
355         if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(oldAddress)) {
356             return nullptr;
357         }
358     }
359     return oldAddress;
360 }
361 
CalculateEvacuationThreadNum()362 int ParallelEvacuator::CalculateEvacuationThreadNum()
363 {
364     uint32_t count = evacuateWorkloadSet_.GetWorkloadCount();
365     uint32_t regionPerThread = 8;
366     uint32_t maxThreadNum = std::min(heap_->GetMaxEvacuateTaskCount(),
367         Taskpool::GetCurrentTaskpool()->GetTotalThreadNum());
368     return static_cast<int>(std::min(std::max(1U, count / regionPerThread), maxThreadNum));
369 }
370 
CalculateUpdateThreadNum()371 int ParallelEvacuator::CalculateUpdateThreadNum()
372 {
373     uint32_t count = updateWorkloadSet_.GetWorkloadCount();
374     double regionPerThread = 1.0 / 4;
375     count = std::pow(count, regionPerThread);
376     uint32_t maxThreadNum = std::min(heap_->GetMaxEvacuateTaskCount(),
377         Taskpool::GetCurrentTaskpool()->GetTotalThreadNum());
378     return static_cast<int>(std::min(std::max(1U, count), maxThreadNum));
379 }
380 
GetWorkloadCount()381 size_t ParallelEvacuator::WorkloadSet::GetWorkloadCount() const
382 {
383     return workloads_.size();
384 }
385 
386 }  // namespace panda::ecmascript
387 #endif  // ECMASCRIPT_MEM_PARALLEL_EVACUATOR_INL_H
388