• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef ECMASCRIPT_MEM_SHARED_SHARED_SPACE_H
17 #define ECMASCRIPT_MEM_SHARED_SHARED_SPACE_H
18 
19 #include "ecmascript/mem/mem_common.h"
20 #include "ecmascript/mem/sparse_space.h"
21 
22 namespace panda::ecmascript {
23 #define CHECK_SOBJECT_NOT_NULL()                                                        \
24     if (object != 0) {                                                                  \
25         return object;                                                                  \
26     }
27 
28 class SharedHeap;
29 class SharedLocalSpace;
30 
31 class SharedSparseSpace : public Space {
32 public:
33     SharedSparseSpace(SharedHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity);
~SharedSparseSpace()34     ~SharedSparseSpace() override
35     {
36         delete allocator_;
37     }
38     NO_COPY_SEMANTIC(SharedSparseSpace);
39     NO_MOVE_SEMANTIC(SharedSparseSpace);
40 
41     void Reset();
42 
43     uintptr_t AllocateWithoutGC(JSThread *thread, size_t size);
44 
45     uintptr_t Allocate(JSThread *thread, size_t size, bool allowGC = true);
46     uintptr_t TryAllocateAndExpand(JSThread *thread, size_t size, bool expand);
47 
48     // For work deserialize
49     void ResetTopPointer(uintptr_t top);
50     uintptr_t AllocateNoGCAndExpand(JSThread *thread, size_t size);
51     Region *AllocateDeserializeRegion(JSThread *thread);
52     void MergeDeserializeAllocateRegions(const std::vector<Region *> &allocateRegions);
53 
54     // For sweeping
55     void PrepareSweeping();
56     void AsyncSweep(bool isMain);
57     void Sweep();
58 
59     bool TryFillSweptRegion();
60     // Ensure All region finished sweeping
61     bool FinishFillSweptRegion();
62 
63     void AddSweepingRegion(Region *region);
64     void SortSweepingRegion();
65     Region *GetSweepingRegionSafe();
66     void AddSweptRegionSafe(Region *region);
67     Region *GetSweptRegionSafe();
68 
69     void FreeRegion(Region *current, bool isMain = true);
70     void FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain);
71 
72     void IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const;
73 
74     size_t GetHeapObjectSize() const;
75 
76     void IncreaseAllocatedSize(size_t size);
77 
IncreaseLiveObjectSize(size_t size)78     void IncreaseLiveObjectSize(size_t size)
79     {
80         liveObjectSize_ += size;
81     }
82 
DecreaseLiveObjectSize(size_t size)83     void DecreaseLiveObjectSize(size_t size)
84     {
85         liveObjectSize_ -= size;
86     }
87 
CommittedSizeExceed()88     bool CommittedSizeExceed()
89     {
90         return committedSize_ >= maximumCapacity_ + outOfMemoryOvershootSize_;
91     }
92 
93     void CheckAndTriggerLocalFullMark();
94 
95     size_t GetTotalAllocatedSize() const;
96 
97     void InvokeAllocationInspector(Address object, size_t size, size_t alignedSize);
98 
99     void DetachFreeObjectSet(Region *region);
100 
101 protected:
102     bool Expand(JSThread *thread);
103     FreeListAllocator<FreeObject> *allocator_;
104     SweepState sweepState_ = SweepState::NO_SWEEP;
105     SharedHeap *sHeap_ {nullptr};
106 
107 private:
108     static constexpr double LIVE_OBJECT_SIZE_RATIO = 0.8;
109 
110     uintptr_t AllocateWithExpand(JSThread *thread, size_t size);
111     uintptr_t TryAllocate(JSThread *thread, size_t size);
112     // For sweeping
113     uintptr_t AllocateAfterSweepingCompleted(JSThread *thread, size_t size);
114     void IncAllocSObjectSize(uintptr_t object, size_t size);
115 
116     Mutex lock_;
117     Mutex allocateLock_;
118     std::vector<Region *> sweepingList_;
119     std::vector<Region *> sweptList_;
120     size_t liveObjectSize_ {0};
121     size_t triggerLocalFullMarkLimit_ {0};
122 };
123 
124 class SharedAppSpawnSpace : public SharedSparseSpace {
125 public:
126     SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity);
127     ~SharedAppSpawnSpace() override = default;
128     NO_COPY_SEMANTIC(SharedAppSpawnSpace);
129     NO_MOVE_SEMANTIC(SharedAppSpawnSpace);
130 
131     void IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const;
132 };
133 
134 class SharedNonMovableSpace : public SharedSparseSpace {
135 public:
136     SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
137     ~SharedNonMovableSpace() override = default;
138     NO_COPY_SEMANTIC(SharedNonMovableSpace);
139     NO_MOVE_SEMANTIC(SharedNonMovableSpace);
140 };
141 
142 class SharedOldSpace : public SharedSparseSpace {
143 public:
144     SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
145     ~SharedOldSpace() override = default;
146     NO_COPY_SEMANTIC(SharedOldSpace);
147     NO_MOVE_SEMANTIC(SharedOldSpace);
148 
149     static constexpr int64_t MAX_EVACUATION_SIZE = 2_MB;
150     static constexpr size_t MIN_COLLECT_REGION_SIZE = 5;
151 
GetMergeSize()152     size_t GetMergeSize() const
153     {
154         return mergeSize_;
155     }
156 
IncreaseMergeSize(size_t size)157     void IncreaseMergeSize(size_t size)
158     {
159         mergeSize_ += size;
160     }
161 
ResetMergeSize()162     void ResetMergeSize()
163     {
164         mergeSize_ = 0;
165     }
166 
167     void Merge(SharedLocalSpace *localSpace);
168     void SelectCSets();
169     void RevertCSets();
170     void ReclaimCSets();
171     void AddCSetRegion(Region *region);
172     void RemoveCSetRegion(Region *region);
173 
174     template<class Callback>
EnumerateCollectRegionSet(Callback && cb)175     void EnumerateCollectRegionSet(Callback &&cb) const
176     {
177         for (Region *region : collectRegionSet_) {
178             if (region != nullptr) {
179                 cb(region);
180             }
181         }
182     }
183 
GetCollectSetRegionCount()184     size_t GetCollectSetRegionCount() const
185     {
186         return collectRegionSet_.size();
187     }
188 
189 private:
190     Mutex lock_;
191     size_t mergeSize_ {0};
192     std::vector<Region *> collectRegionSet_;
193 };
194 
195 class SharedLocalSpace : public SharedSparseSpace {
196 public:
197     SharedLocalSpace() = delete;
198     SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
199     ~SharedLocalSpace() override = default;
200     NO_COPY_SEMANTIC(SharedLocalSpace);
201     NO_MOVE_SEMANTIC(SharedLocalSpace);
202 
203     uintptr_t Allocate(size_t size, bool isExpand = true);
204     bool AddRegionToList(Region *region);
205     void FreeBumpPoint();
206     void Stop();
207 private:
208     void ForceExpandInSharedGC(JSThread *thread);
209 };
210 
211 class SharedReadOnlySpace : public Space {
212 public:
213     SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
214     ~SharedReadOnlySpace() override = default;
SetReadOnly()215     void SetReadOnly()
216     {
217         auto cb = [](Region *region) {
218             region->SetReadOnlyAndMarked();
219         };
220         EnumerateRegions(cb);
221     }
222 
223     void IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const;
224 
ClearReadOnly()225     void ClearReadOnly()
226     {
227         auto cb = [](Region *region) {
228             region->ClearReadOnly();
229         };
230         EnumerateRegions(cb);
231     }
232 
233     bool Expand(JSThread *thread);
234 
235     uintptr_t Allocate(JSThread *thread, size_t size);
236 
237     NO_COPY_SEMANTIC(SharedReadOnlySpace);
238     NO_MOVE_SEMANTIC(SharedReadOnlySpace);
239 
240 private:
241     Mutex allocateLock_;
242     BumpPointerAllocator allocator_;
243 };
244 
245 class SharedHugeObjectSpace : public Space {
246 public:
247     SharedHugeObjectSpace(BaseHeap *heap, HeapRegionAllocator *regionAllocator, size_t initialCapacity,
248                     size_t maximumCapacity);
249     ~SharedHugeObjectSpace() override = default;
250     NO_COPY_SEMANTIC(SharedHugeObjectSpace);
251     NO_MOVE_SEMANTIC(SharedHugeObjectSpace);
252     // Sometimes it is unsafe to checkSafePoint here, e.g. in deserialize, if do checkSafePoint JSThread may be
253     // suspended and then do SharedGC, which will free some regions in SharedHeap that are allocated at the beginning
254     // of deserializing for further object allocating, but no object has been allocated on at this moment.
255     uintptr_t Allocate(JSThread *thread, size_t objectSize, AllocateEventType allocType = AllocateEventType::NORMAL);
256     void Sweep();
257     size_t GetHeapObjectSize() const;
258     void IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const;
259 
260     void ReclaimHugeRegion();
261 
262     void InvokeAllocationInspector(Address object, size_t objectSize);
263 
264     bool CommittedSizeExceed(size_t size = 0) const
265     {
266         return committedSize_ + size >= maximumCapacity_ + outOfMemoryOvershootSize_;
267     }
268 
269     void CheckAndTriggerLocalFullMark(JSThread *thread, size_t size);
270 private:
271     static constexpr size_t HUGE_OBJECT_BITSET_SIZE = 16;
272     static constexpr double HUGE_OBJECT_SIZE_RATIO = 0.8;
273 
274     size_t triggerLocalFullMarkLimit_ {0};
275     EcmaList<Region> hugeNeedFreeList_ {};
276     Mutex allocateLock_;
277 };
278 }  // namespace panda::ecmascript
279 #endif  // ECMASCRIPT_MEM_SHARED_SHARED_SPACE_H
280