1 /* 2 * Copyright (c) 2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef ECMASCRIPT_MEM_SPARSE_SPACE_H 17 #define ECMASCRIPT_MEM_SPARSE_SPACE_H 18 19 #include "ecmascript/mem/space-inl.h" 20 #include "ecmascript/mem/mem_common.h" 21 #include "ecmascript/mem/jit_fort.h" 22 23 #ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING 24 #define CHECK_OBJECT_AND_INC_OBJ_SIZE(size) \ 25 if (object != 0) { \ 26 IncreaseLiveObjectSize(size); \ 27 if (!heap_->IsConcurrentFullMark() || heap_->IsReadyToConcurrentMark()) { \ 28 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size); \ 29 } \ 30 InvokeAllocationInspector(object, size, size); \ 31 return object; \ 32 } 33 #else 34 #define CHECK_OBJECT_AND_INC_OBJ_SIZE(size) \ 35 if (object != 0) { \ 36 IncreaseLiveObjectSize(size); \ 37 if (!heap_->IsConcurrentFullMark() || heap_->IsReadyToConcurrentMark()) { \ 38 Region::ObjectAddressToRange(object)->IncreaseAliveObject(size); \ 39 } \ 40 return object; \ 41 } 42 #endif 43 44 enum class SweepState : uint8_t { 45 NO_SWEEP, 46 SWEEPING, 47 SWEPT 48 }; 49 50 namespace panda::ecmascript { 51 class LocalSpace; 52 class SemiSpace; 53 54 class SparseSpace : public Space { 55 public: 56 SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity); ~SparseSpace()57 ~SparseSpace() override 58 { 59 delete allocator_; 60 } 61 NO_COPY_SEMANTIC(SparseSpace); 62 NO_MOVE_SEMANTIC(SparseSpace); 63 64 void Initialize() override; 65 void Reset(); 66 void ResetTopPointer(uintptr_t top); 67 68 uintptr_t Allocate(size_t size, bool allowGC = true); 69 bool Expand(); 70 71 // For sweeping 72 virtual void PrepareSweeping(); 73 virtual void Sweep(); 74 virtual void AsyncSweep(bool isMain); 75 76 bool TryFillSweptRegion(); 77 // Ensure All region finished sweeping 78 bool FinishFillSweptRegion(); 79 80 void AddSweepingRegion(Region *region); 81 void SortSweepingRegion(); 82 Region *GetSweepingRegionSafe(); 83 void AddSweptRegionSafe(Region *region); 84 Region *GetSweptRegionSafe(); 85 Region *TryToGetSuitableSweptRegion(size_t size); 86 87 void FreeRegion(Region *current, bool isMain = true); 88 void FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain); 89 90 void DetachFreeObjectSet(Region *region); 91 92 void IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const; 93 void IterateOldToNewOverObjects( 94 const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const; 95 96 size_t GetHeapObjectSize() const; 97 98 void IncreaseAllocatedSize(size_t size); 99 IncreaseLiveObjectSize(size_t size)100 void IncreaseLiveObjectSize(size_t size) 101 { 102 liveObjectSize_ += size; 103 } 104 DecreaseLiveObjectSize(size_t size)105 void DecreaseLiveObjectSize(size_t size) 106 { 107 liveObjectSize_ -= size; 108 } 109 SetOvershootSize(size_t size)110 void SetOvershootSize(size_t size) 111 { 112 overshootSize_ = size; 113 } 114 IncreaseOvershootSize(size_t size)115 void IncreaseOvershootSize(size_t size) 116 { 117 overshootSize_ += size; 118 } 119 GetOvershootSize()120 size_t GetOvershootSize() const 121 { 122 return overshootSize_; 123 } 124 AdjustOvershootSize()125 void AdjustOvershootSize() 126 { 127 if (overshootSize_ > 0 && maximumCapacity_ > committedSize_) { 128 size_t size = maximumCapacity_ - committedSize_; 129 overshootSize_ = overshootSize_ > size ? overshootSize_ - size : 0; 130 } 131 } 132 CommittedSizeExceed()133 bool CommittedSizeExceed() const 134 { 135 return committedSize_ >= maximumCapacity_ + overshootSize_ + outOfMemoryOvershootSize_; 136 } 137 138 size_t GetTotalAllocatedSize() const; 139 140 void InvokeAllocationInspector(Address object, size_t size, size_t alignedSize); 141 142 protected: 143 FreeListAllocator<FreeObject> *allocator_; 144 SweepState sweepState_ = SweepState::NO_SWEEP; 145 Heap *localHeap_ {nullptr}; 146 size_t liveObjectSize_ {0}; 147 uintptr_t AllocateAfterSweepingCompleted(size_t size); 148 149 private: 150 Mutex lock_; 151 std::vector<Region *> sweepingList_; 152 std::vector<Region *> sweptList_; 153 size_t overshootSize_ {0}; 154 }; 155 156 class OldSpace : public SparseSpace { 157 public: 158 OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity); 159 ~OldSpace() override = default; 160 NO_COPY_SEMANTIC(OldSpace); 161 NO_MOVE_SEMANTIC(OldSpace); 162 163 Region *TrySweepToGetSuitableRegion(size_t size); 164 Region *TryToGetExclusiveRegion(size_t size); 165 166 uintptr_t AllocateFast(size_t size); 167 uintptr_t AllocateSlow(size_t size, bool tryFast); 168 // CSet 169 void SelectCSet(); 170 void CheckRegionSize(); 171 void RevertCSet(); 172 void ReclaimCSet(); 173 174 bool SwapRegion(Region *region, SemiSpace *fromSpace); 175 176 void PrepareSweepNewToOldRegions(); 177 void SweepNewToOldRegions(); 178 GetSelectedRegionNumber()179 unsigned long GetSelectedRegionNumber() const 180 { 181 return std::max(committedSize_ / PARTIAL_GC_MAX_COLLECT_REGION_RATE, PARTIAL_GC_INITIAL_COLLECT_REGION_SIZE); 182 } 183 GetMergeSize()184 size_t GetMergeSize() const 185 { 186 return mergeSize_; 187 } 188 IncreaseMergeSize(size_t size)189 void IncreaseMergeSize(size_t size) 190 { 191 mergeSize_ += size; 192 } 193 ResetMergeSize()194 void ResetMergeSize() 195 { 196 mergeSize_ = 0; 197 } 198 IncreaseCommittedOverSizeLimit(size_t size)199 void IncreaseCommittedOverSizeLimit(size_t size) 200 { 201 committedOverSizeLimit_ += size; 202 } 203 ResetCommittedOverSizeLimit()204 void ResetCommittedOverSizeLimit() 205 { 206 DecreaseOutOfMemoryOvershootSize(committedOverSizeLimit_); 207 committedOverSizeLimit_ = 0; 208 } 209 210 template<class Callback> EnumerateCollectRegionSet(const Callback & cb)211 void EnumerateCollectRegionSet(const Callback &cb) const 212 { 213 for (Region *current : collectRegionSet_) { 214 if (current != nullptr) { 215 cb(current); 216 } 217 } 218 } 219 GetCollectSetRegionCount()220 size_t GetCollectSetRegionCount() const 221 { 222 return collectRegionSet_.size(); 223 } 224 225 void Merge(LocalSpace *localSpace); 226 private: 227 static constexpr int64_t PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND = 2_MB; 228 static constexpr int64_t PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND = 6_MB; 229 static constexpr unsigned long long PARTIAL_GC_MAX_COLLECT_REGION_RATE = 2_MB; 230 static constexpr unsigned long long PARTIAL_GC_INITIAL_COLLECT_REGION_SIZE = 24; 231 static constexpr size_t PARTIAL_GC_MIN_COLLECT_REGION_SIZE = 5; 232 233 void FreeRegionFromSpace(Region *region); 234 235 CVector<Region *> collectRegionSet_; 236 Mutex lock_; 237 size_t mergeSize_ {0}; 238 size_t committedOverSizeLimit_ {0}; 239 }; 240 241 class NonMovableSpace : public SparseSpace { 242 public: 243 NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity); 244 ~NonMovableSpace() override = default; 245 NO_COPY_SEMANTIC(NonMovableSpace); 246 NO_MOVE_SEMANTIC(NonMovableSpace); 247 248 uintptr_t CheckAndAllocate(size_t size); 249 }; 250 251 class AppSpawnSpace : public SparseSpace { 252 public: 253 AppSpawnSpace(Heap *heap, size_t initialCapacity); 254 ~AppSpawnSpace() override = default; 255 NO_COPY_SEMANTIC(AppSpawnSpace); 256 NO_MOVE_SEMANTIC(AppSpawnSpace); 257 258 uintptr_t AllocateSync(size_t size); 259 260 void IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const; 261 private: 262 Mutex mutex_; 263 }; 264 265 class LocalSpace : public SparseSpace { 266 public: 267 LocalSpace() = delete; 268 LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity); 269 ~LocalSpace() override = default; 270 NO_COPY_SEMANTIC(LocalSpace); 271 NO_MOVE_SEMANTIC(LocalSpace); 272 273 uintptr_t Allocate(size_t size, bool isExpand = true); 274 bool AddRegionToList(Region *region); 275 void FreeBumpPoint(); 276 void Stop(); 277 private: 278 void ForceExpandInGC(); 279 }; 280 281 class MachineCode; 282 struct MachineCodeDesc; 283 class MachineCodeSpace : public SparseSpace { 284 public: 285 MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity); 286 ~MachineCodeSpace() override; 287 NO_COPY_SEMANTIC(MachineCodeSpace); 288 NO_MOVE_SEMANTIC(MachineCodeSpace); // Note: Expand() left for define 289 uintptr_t GetMachineCodeObject(uintptr_t pc); 290 size_t CheckMachineCodeObject(uintptr_t curPtr, uintptr_t &machineCode, uintptr_t pc); 291 void AsyncSweep(bool isMain) override; 292 void Sweep() override; 293 void PrepareSweeping() override; 294 uintptr_t Allocate(size_t size, bool allowGC = true); 295 uintptr_t Allocate(size_t size, MachineCodeDesc *desc, bool allowGC = true); 296 uintptr_t PUBLIC_API JitFortAllocate(MachineCodeDesc *desc); 297 MarkJitFortMemAlive(MachineCode * obj)298 inline void MarkJitFortMemAlive(MachineCode *obj) 299 { 300 if (jitFort_) { 301 jitFort_->MarkJitFortMemAlive(obj); 302 } 303 } 304 MarkJitFortMemInstalled(MachineCode * obj)305 inline void MarkJitFortMemInstalled(MachineCode *obj) 306 { 307 if (jitFort_) { 308 jitFort_->MarkJitFortMemInstalled(obj); 309 } 310 } 311 IsSweeping()312 inline bool IsSweeping() 313 { 314 return sweepState_ == SweepState::SWEEPING ; 315 } 316 GetJitFort()317 inline JitFort *GetJitFort() 318 { 319 return jitFort_; 320 } 321 InJitFortRange(uintptr_t address)322 bool InJitFortRange(uintptr_t address) const 323 { 324 if (jitFort_) { 325 return jitFort_->InRange(address); 326 } 327 return false; 328 } 329 330 private: 331 JitFort *jitFort_ {nullptr}; 332 Mutex asyncSweepMutex_; 333 friend class Heap; 334 friend class ConcurrentSweeper; 335 }; 336 } // namespace panda::ecmascript 337 #endif // ECMASCRIPT_MEM_SPARSE_SPACE_H 338