1 /** 2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 #ifndef PANDA_GLOBAL_OBJECT_STORAGE_H 16 #define PANDA_GLOBAL_OBJECT_STORAGE_H 17 18 #include <libpandabase/os/mutex.h> 19 20 #include "runtime/include/runtime.h" 21 #include "runtime/include/mem/panda_containers.h" 22 #include "runtime/include/object_header.h" 23 #include "runtime/mem/object_helpers.h" 24 #include "runtime/mem/gc/gc.h" 25 #include "runtime/mem/gc/gc_root.h" 26 #include "runtime/mem/gc/gc_phase.h" 27 #include "runtime/include/class.h" 28 #include "runtime/include/panda_vm.h" 29 #include "reference.h" 30 #include "utils/logger.h" 31 #include "utils/dfx.h" 32 33 namespace panda::mem::test { 34 class ReferenceStorageTest; 35 } // namespace panda::mem::test 36 37 namespace panda::mem { 38 39 /** 40 * Storage for objects which need to handle by GC. GC will handle moving these objects and will not reclaim then until 41 * user haven't called Remove method on this object. 42 * References will be removed automatically after Remove method or after storage's destructor. 43 */ 44 class GlobalObjectStorage { 45 public: 46 explicit GlobalObjectStorage(mem::InternalAllocatorPtr allocator, size_t maxSize, bool enableSizeCheck); 47 48 ~GlobalObjectStorage(); 49 50 /// Check whether ref is a valid global reference or not. 51 bool IsValidGlobalRef(const Reference *ref) const; 52 53 /// Add object to the storage and return associated pointer with this object 54 PANDA_PUBLIC_API Reference *Add(const ObjectHeader *object, Reference::ObjectType type) const; 55 56 /// Get stored object associated with given reference. Reference should be returned on Add method before. Get(const Reference * reference)57 ObjectHeader *Get(const Reference *reference) const 58 { 59 if (reference == nullptr) { 60 return nullptr; 61 } 62 auto type = reference->GetType(); 63 reference = Reference::GetRefWithoutType(reference); 64 AssertType(type); 65 ObjectHeader *result = nullptr; 66 if (type == Reference::ObjectType::GLOBAL) { 67 result = globalStorage_->Get(reference); 68 } else if (type == Reference::ObjectType::WEAK) { 69 result = weakStorage_->Get(reference); 70 } else { 71 result = globalFixedStorage_->Get(reference); 72 } 73 return result; 74 } 75 GetAddressForRef(const Reference * reference)76 uintptr_t GetAddressForRef(const Reference *reference) const 77 { 78 ASSERT(reference != nullptr); 79 auto type = reference->GetType(); 80 reference = Reference::GetRefWithoutType(reference); 81 AssertType(type); 82 uintptr_t result = 0; 83 if (type == Reference::ObjectType::GLOBAL) { 84 result = globalStorage_->GetAddressForRef(reference); 85 } else if (type == Reference::ObjectType::WEAK) { 86 result = weakStorage_->GetAddressForRef(reference); 87 } else { 88 result = globalFixedStorage_->GetAddressForRef(reference); 89 } 90 return result; 91 } 92 93 /// Remove object from storage by given reference. Reference should be returned on Add method before. 94 PANDA_PUBLIC_API void Remove(const Reference *reference); 95 96 /// Get all objects from storage. Used by debugging. 97 PandaVector<ObjectHeader *> GetAllObjects(); 98 99 void VisitObjects(const GCRootVisitor &gcRootVisitor, mem::RootType rootType); 100 101 /// Update pointers to moved Objects in global storage. 102 // NOTE(alovkov): take a closure from gc 103 void UpdateMovedRefs(); 104 105 void ClearUnmarkedWeakRefs(const GC *gc, const mem::GC::ReferenceClearPredicateT &pred); 106 107 size_t GetSize(); 108 109 void Dump(); 110 111 private: 112 NO_COPY_SEMANTIC(GlobalObjectStorage); 113 NO_MOVE_SEMANTIC(GlobalObjectStorage); 114 115 class ArrayStorage; 116 117 static constexpr size_t GLOBAL_REF_SIZE_WARNING_LINE = 20; 118 119 mem::InternalAllocatorPtr allocator_; 120 ArrayStorage *globalStorage_; 121 ArrayStorage *globalFixedStorage_; 122 ArrayStorage *weakStorage_; 123 AssertType(Reference::ObjectType type)124 static void AssertType([[maybe_unused]] Reference::ObjectType type) 125 { 126 ASSERT(type == Reference::ObjectType::GLOBAL || type == Reference::ObjectType::GLOBAL_FIXED || 127 type == Reference::ObjectType::WEAK); 128 } 129 130 friend class ::panda::mem::test::ReferenceStorageTest; 131 132 class ArrayStorage { 133 #ifndef NDEBUG 134 // for better coverage of EnsureCapacity 135 static constexpr size_t INITIAL_SIZE = 2; 136 #else 137 static constexpr size_t INITIAL_SIZE = 128; 138 #endif // NDEBUG 139 static constexpr size_t FREE_INDEX_BIT = 0; 140 static constexpr size_t BITS_FOR_TYPE = 2U; 141 static constexpr size_t BITS_FOR_INDEX = 1U; 142 static constexpr size_t ENSURE_CAPACITY_MULTIPLIER = 2; 143 144 /** 145 * There are 2 cases: 146 * 1) When index is busy - then we store jobject in storage_ and 0 in the lowest bit (cause of alignment). 147 * Reference* contains it's index shifted by 2 with reference-type in lowest bits which we return to user and 148 * doesn't stores inside storage explicity. 149 * 150 * 2) When index if free - storage[index] stores next free index (shifted by 1) with lowest bit equals to 1 151 */ 152 /* 153 |-----------------------------------------------------|------------------|------------------| 154 | Case | Highest bits | [1] lowest bit | [0] lowest bit | 155 --------------------------------------------------------------------------------------------| 156 | busy-index | | | | 157 | Reference* (index) | index | 0/1 (ref-type) | 0/1 (ref-type) | 158 | storage[index] | xxx | 0 | 159 ---------------------|--------------------------------|------------------|------------------- 160 | free-index | | | 161 | storage[index] | xxx | 1 | 162 --------------------------------------------------------------------------------------------- 163 */ 164 GUARDED_BY(mutex_)165 PandaVector<uintptr_t> storage_ GUARDED_BY(mutex_) {}; 166 /// Index of first available block in list 167 uintptr_t firstAvailableBlock_; 168 /// How many blocks are available in current storage (can be increased if size less than max size) 169 size_t blocksAvailable_; 170 171 bool enableSizeCheck_; 172 bool isFixed_; 173 size_t maxSize_; 174 175 mutable os::memory::RWLock mutex_; 176 mem::InternalAllocatorPtr allocator_; 177 178 public: 179 explicit ArrayStorage(mem::InternalAllocatorPtr allocator, size_t maxSize, bool enableSizeCheck, 180 bool isFixed = false) enableSizeCheck_(enableSizeCheck)181 : enableSizeCheck_(enableSizeCheck), isFixed_(isFixed), maxSize_(maxSize), allocator_(allocator) 182 { 183 ASSERT(maxSize < (std::numeric_limits<uintptr_t>::max() >> (BITS_FOR_TYPE))); 184 185 blocksAvailable_ = isFixed ? maxSize : INITIAL_SIZE; 186 firstAvailableBlock_ = 0; 187 188 storage_.resize(blocksAvailable_); 189 for (size_t i = 0; i < storage_.size() - 1; i++) { 190 storage_[i] = EncodeNextIndex(i + 1); 191 } 192 storage_[storage_.size() - 1] = 0; 193 } 194 195 ~ArrayStorage() = default; 196 197 NO_COPY_SEMANTIC(ArrayStorage); 198 NO_MOVE_SEMANTIC(ArrayStorage); 199 Add(const ObjectHeader * object)200 Reference *Add(const ObjectHeader *object) 201 { 202 ASSERT(object != nullptr); 203 os::memory::WriteLockHolder lk(mutex_); 204 205 if (blocksAvailable_ == 0) { 206 if (storage_.size() * ENSURE_CAPACITY_MULTIPLIER <= maxSize_ && !isFixed_) { 207 EnsureCapacity(); 208 } else { 209 LOG(ERROR, GC) << "Global reference storage is full"; 210 Dump(); 211 return nullptr; 212 } 213 } 214 ASSERT(blocksAvailable_ != 0); 215 auto nextBlock = DecodeIndex(storage_[firstAvailableBlock_]); 216 auto currentIndex = firstAvailableBlock_; 217 AssertIndex(currentIndex); 218 219 auto addr = reinterpret_cast<uintptr_t>(object); 220 [[maybe_unused]] uintptr_t lastBit = BitField<uintptr_t, FREE_INDEX_BIT>::Get(addr); 221 ASSERT(lastBit == 0); // every object should be alignmented 222 223 storage_[currentIndex] = addr; 224 auto ref = IndexToReference(currentIndex); 225 firstAvailableBlock_ = nextBlock; 226 blocksAvailable_--; 227 228 CheckAlmostOverflow(); 229 return ref; 230 } 231 EnsureCapacity()232 void EnsureCapacity() REQUIRES(mutex_) 233 { 234 auto prevLength = storage_.size(); 235 size_t newLength = storage_.size() * ENSURE_CAPACITY_MULTIPLIER; 236 blocksAvailable_ = firstAvailableBlock_ = prevLength; 237 storage_.resize(newLength); 238 for (size_t i = prevLength; i < newLength - 1; i++) { 239 storage_[i] = EncodeNextIndex(i + 1); 240 } 241 storage_[storage_.size() - 1] = 0; 242 LOG(DEBUG, GC) << "Increase global storage from: " << prevLength << " to: " << newLength; 243 } 244 CheckAlmostOverflow()245 void CheckAlmostOverflow() REQUIRES_SHARED(mutex_) 246 { 247 size_t nowSize = GetSize(); 248 if (enableSizeCheck_ && nowSize >= maxSize_ - GLOBAL_REF_SIZE_WARNING_LINE) { 249 LOG(INFO, GC) << "Global reference storage almost overflow. now size: " << nowSize 250 << ", max size: " << maxSize_; 251 // NOTE(xucheng): Dump global reference storage info now. May use Thread::Dump() when it can be used. 252 Dump(); 253 } 254 } 255 Get(const Reference * ref)256 ObjectHeader *Get(const Reference *ref) const 257 { 258 os::memory::ReadLockHolder lk(mutex_); 259 auto index = ReferenceToIndex(ref); 260 return reinterpret_cast<ObjectHeader *>(storage_[index]); 261 } 262 GetAddressForRef(const Reference * ref)263 uintptr_t GetAddressForRef(const Reference *ref) const 264 { 265 os::memory::ReadLockHolder lk(mutex_); 266 ASSERT(isFixed_); 267 auto index = ReferenceToIndex(ref); 268 return reinterpret_cast<uintptr_t>(&storage_[index]); 269 } 270 Remove(const Reference * ref)271 void Remove(const Reference *ref) 272 { 273 os::memory::WriteLockHolder lk(mutex_); 274 auto index = ReferenceToIndex(ref); 275 storage_[index] = EncodeNextIndex(firstAvailableBlock_); 276 firstAvailableBlock_ = index; 277 blocksAvailable_++; 278 } 279 UpdateMovedRefs()280 void UpdateMovedRefs() 281 { 282 os::memory::WriteLockHolder lk(mutex_); 283 // NOLINTNEXTLINE(modernize-loop-convert) 284 for (size_t index = 0; index < storage_.size(); index++) { 285 auto ref = storage_[index]; 286 if (IsBusy(ref)) { 287 auto obj = reinterpret_cast<ObjectHeader *>(ref); 288 if (obj != nullptr && obj->IsForwarded()) { 289 auto newAddr = reinterpret_cast<ObjectHeader *>(GetForwardAddress(obj)); 290 LOG(DEBUG, GC) << "Global ref update from: " << obj << " to: " << newAddr; 291 storage_[index] = ToUintPtr(newAddr); 292 } 293 } 294 } 295 } 296 VisitObjects(const GCRootVisitor & gcRootVisitor,mem::RootType rootType)297 void VisitObjects(const GCRootVisitor &gcRootVisitor, mem::RootType rootType) 298 { 299 os::memory::ReadLockHolder lk(mutex_); 300 301 for (const auto &ref : storage_) { 302 if (IsBusy(ref)) { 303 auto obj = reinterpret_cast<ObjectHeader *>(ref); 304 if (obj != nullptr) { 305 LOG(DEBUG, GC) << " Found root from global storage: " << mem::GetDebugInfoAboutObject(obj); 306 gcRootVisitor({rootType, obj}); 307 } 308 } 309 } 310 } 311 ClearUnmarkedWeakRefs(const GC * gc,const mem::GC::ReferenceClearPredicateT & pred)312 void ClearUnmarkedWeakRefs(const GC *gc, const mem::GC::ReferenceClearPredicateT &pred) 313 { 314 ASSERT(IsMarking(gc->GetGCPhase())); 315 os::memory::WriteLockHolder lk(mutex_); 316 317 for (auto &ref : storage_) { 318 if (IsBusy(ref)) { 319 auto obj = reinterpret_cast<ObjectHeader *>(ref); 320 if (obj != nullptr && pred(obj) && !gc->IsMarked(obj)) { 321 LOG(DEBUG, GC) << "Clear not marked weak-reference: " << std::hex << ref << " object: " << obj; 322 ref = reinterpret_cast<uintptr_t>(nullptr); 323 } 324 } 325 } 326 } 327 GetAllObjects()328 PandaVector<ObjectHeader *> GetAllObjects() 329 { 330 auto objects = PandaVector<ObjectHeader *>(allocator_->Adapter()); 331 { 332 os::memory::ReadLockHolder lk(mutex_); 333 for (const auto &ref : storage_) { 334 // we don't return nulls on GetAllObjects 335 if (ref != 0 && IsBusy(ref)) { 336 auto obj = reinterpret_cast<ObjectHeader *>(ref); 337 objects.push_back(obj); 338 } 339 } 340 } 341 return objects; 342 } 343 IsValidGlobalRef(const Reference * ref)344 bool IsValidGlobalRef(const Reference *ref) 345 { 346 ASSERT(ref != nullptr); 347 os::memory::ReadLockHolder lk(mutex_); 348 uintptr_t index = ReferenceToIndex<false>(ref); 349 if (index >= storage_.size()) { 350 return false; 351 } 352 if (IsFreeIndex(index)) { 353 return false; 354 } 355 return index < storage_.size(); 356 } 357 DumpWithLock()358 void DumpWithLock() 359 { 360 os::memory::ReadLockHolder lk(mutex_); 361 Dump(); 362 } 363 Dump()364 void Dump() REQUIRES_SHARED(mutex_) 365 { 366 if (DfxController::IsInitialized() && 367 DfxController::GetOptionValue(DfxOptionHandler::REFERENCE_DUMP) != 1) { 368 return; 369 } 370 static constexpr size_t DUMP_NUMS = 20; 371 size_t num = 0; 372 LOG(INFO, GC) << "Dump the last " << DUMP_NUMS << " global references info:"; 373 374 for (auto it = storage_.rbegin(); it != storage_.rend(); it++) { 375 uintptr_t ref = *it; 376 if (IsBusy(ref)) { 377 auto obj = reinterpret_cast<ObjectHeader *>(ref); 378 LOG(INFO, GC) << "\t Index: " << GetSize() - num << ", Global reference: " << std::hex << ref 379 << ", Object: " << std::hex << obj 380 << ", Class: " << obj->ClassAddr<panda::Class>()->GetName(); 381 num++; 382 if (num == DUMP_NUMS || num > GetSize()) { 383 break; 384 } 385 } 386 } 387 } 388 GetSize()389 size_t GetSize() const REQUIRES_SHARED(mutex_) 390 { 391 return storage_.size() - blocksAvailable_; 392 } 393 GetSizeWithLock()394 size_t GetSizeWithLock() const 395 { 396 os::memory::ReadLockHolder globalLock(mutex_); 397 return GetSize(); 398 } 399 IsFreeIndex(uintptr_t index)400 bool IsFreeIndex(uintptr_t index) REQUIRES_SHARED(mutex_) 401 { 402 return IsFreeValue(storage_[index]); 403 } 404 IsFreeValue(uintptr_t value)405 bool IsFreeValue(uintptr_t value) 406 { 407 uintptr_t lastBit = BitField<uintptr_t, FREE_INDEX_BIT>::Get(value); 408 return lastBit == 1; 409 } 410 IsBusy(uintptr_t value)411 bool IsBusy(uintptr_t value) 412 { 413 return !IsFreeValue(value); 414 } 415 EncodeObjectIndex(uintptr_t index)416 static uintptr_t EncodeObjectIndex(uintptr_t index) 417 { 418 ASSERT(index < (std::numeric_limits<uintptr_t>::max() >> BITS_FOR_INDEX)); 419 return index << BITS_FOR_INDEX; 420 } 421 EncodeNextIndex(uintptr_t index)422 static uintptr_t EncodeNextIndex(uintptr_t index) 423 { 424 uintptr_t shiftedIndex = EncodeObjectIndex(index); 425 BitField<uintptr_t, FREE_INDEX_BIT>::Set(1, &shiftedIndex); 426 return shiftedIndex; 427 } 428 DecodeIndex(uintptr_t index)429 static uintptr_t DecodeIndex(uintptr_t index) 430 { 431 return index >> BITS_FOR_INDEX; 432 } 433 434 /** 435 * We need to add 1 to not return nullptr to distinct it from situation when we couldn't create a reference. 436 * Shift by 2 is needed because every Reference stores type in lowest 2 bits. 437 */ IndexToReference(uintptr_t encodedIndex)438 Reference *IndexToReference(uintptr_t encodedIndex) const REQUIRES_SHARED(mutex_) 439 { 440 AssertIndex(DecodeIndex(encodedIndex)); 441 return reinterpret_cast<Reference *>((encodedIndex + 1) << BITS_FOR_TYPE); 442 } 443 444 template <bool CHECK_ASSERT = true> ReferenceToIndex(const Reference * ref)445 uintptr_t ReferenceToIndex(const Reference *ref) const REQUIRES_SHARED(mutex_) 446 { 447 if (CHECK_ASSERT) { 448 AssertIndex(ref); 449 } 450 return (reinterpret_cast<uintptr_t>(ref) >> BITS_FOR_TYPE) - 1; 451 } 452 AssertIndex(const Reference * ref)453 void AssertIndex(const Reference *ref) const REQUIRES_SHARED(mutex_) 454 { 455 auto decodedIndex = (reinterpret_cast<uintptr_t>(ref) >> BITS_FOR_TYPE) - 1; 456 AssertIndex(DecodeIndex(decodedIndex)); 457 } 458 AssertIndex(uintptr_t index)459 void AssertIndex([[maybe_unused]] uintptr_t index) const REQUIRES_SHARED(mutex_) 460 { 461 ASSERT(static_cast<uintptr_t>(index) < storage_.size()); 462 } 463 464 // test usage only GetVectorSize()465 size_t GetVectorSize() 466 { 467 os::memory::ReadLockHolder lk(mutex_); 468 return storage_.size(); 469 } 470 471 friend class ::panda::mem::test::ReferenceStorageTest; 472 }; 473 }; 474 } // namespace panda::mem 475 #endif // PANDA_GLOBAL_OBJECT_STORAGE_H 476