• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_GLOBAL_OBJECT_STORAGE_H
16 #define PANDA_GLOBAL_OBJECT_STORAGE_H
17 
18 #include <libpandabase/os/mutex.h>
19 
20 #include "runtime/include/runtime.h"
21 #include "runtime/include/mem/panda_containers.h"
22 #include "runtime/include/object_header.h"
23 #include "runtime/mem/object_helpers.h"
24 #include "runtime/mem/gc/gc.h"
25 #include "runtime/mem/gc/gc_root.h"
26 #include "runtime/mem/gc/gc_phase.h"
27 #include "runtime/include/class.h"
28 #include "runtime/include/panda_vm.h"
29 #include "reference.h"
30 #include "utils/logger.h"
31 #include "utils/dfx.h"
32 
33 namespace ark::mem::test {
34 class ReferenceStorageTest;
35 }  // namespace ark::mem::test
36 
37 namespace ark::mem {
38 
39 /**
40  * Storage for objects which need to handle by GC. GC will handle moving these objects and will not reclaim then until
41  * user haven't called Remove method on this object.
42  * References will be removed automatically after Remove method or after storage's destructor.
43  */
44 class GlobalObjectStorage {
45 public:
46     explicit GlobalObjectStorage(mem::InternalAllocatorPtr allocator, size_t maxSize, bool enableSizeCheck);
47 
48     ~GlobalObjectStorage();
49 
50     /// Check whether ref is a valid global reference or not.
51     bool IsValidGlobalRef(const Reference *ref) const;
52 
53     /// Add object to the storage and return associated pointer with this object
54     PANDA_PUBLIC_API Reference *Add(const ObjectHeader *object, Reference::ObjectType type) const;
55 
56     /// Get stored object associated with given reference. Reference should be returned on Add method before.
Get(const Reference * reference)57     ObjectHeader *Get(const Reference *reference) const
58     {
59         if (reference == nullptr) {
60             return nullptr;
61         }
62         auto type = reference->GetType();
63         reference = Reference::GetRefWithoutType(reference);
64         AssertType(type);
65         ObjectHeader *result = nullptr;
66         if (type == Reference::ObjectType::GLOBAL) {
67             result = globalStorage_->Get(reference);
68         } else if (type == Reference::ObjectType::WEAK) {
69             result = weakStorage_->Get(reference);
70         } else {
71             result = globalFixedStorage_->Get(reference);
72         }
73         return result;
74     }
75 
GetAddressForRef(const Reference * reference)76     uintptr_t GetAddressForRef(const Reference *reference) const
77     {
78         ASSERT(reference != nullptr);
79         auto type = reference->GetType();
80         reference = Reference::GetRefWithoutType(reference);
81         AssertType(type);
82         uintptr_t result = 0;
83         if (type == Reference::ObjectType::GLOBAL) {
84             result = globalStorage_->GetAddressForRef(reference);
85         } else if (type == Reference::ObjectType::WEAK) {
86             result = weakStorage_->GetAddressForRef(reference);
87         } else {
88             result = globalFixedStorage_->GetAddressForRef(reference);
89         }
90         return result;
91     }
92 
93     /// Remove object from storage by given reference. Reference should be returned on Add method before.
94     PANDA_PUBLIC_API void Remove(const Reference *reference);
95 
96     /// Get all objects from storage. Used by debugging.
97     PandaVector<ObjectHeader *> GetAllObjects();
98 
99     void VisitObjects(const GCRootVisitor &gcRootVisitor, mem::RootType rootType);
100 
101     /// Update pointers to moved Objects in global storage.
102     // NOTE(alovkov): take a closure from gc
103     void UpdateMovedRefs();
104 
105     void ClearUnmarkedWeakRefs(const GC *gc, const mem::GC::ReferenceClearPredicateT &pred);
106 
107     void ClearWeakRefs(const mem::GC::ReferenceClearPredicateT &pred);
108 
109     size_t GetSize();
110 
111     void Dump();
112 
113 private:
114     NO_COPY_SEMANTIC(GlobalObjectStorage);
115     NO_MOVE_SEMANTIC(GlobalObjectStorage);
116 
117     class ArrayStorage;
118 
119     static constexpr size_t GLOBAL_REF_SIZE_WARNING_LINE = 20;
120 
121     mem::InternalAllocatorPtr allocator_;
122     ArrayStorage *globalStorage_;
123     ArrayStorage *globalFixedStorage_;
124     ArrayStorage *weakStorage_;
125 
AssertType(Reference::ObjectType type)126     static void AssertType([[maybe_unused]] Reference::ObjectType type)
127     {
128         ASSERT(type == Reference::ObjectType::GLOBAL || type == Reference::ObjectType::GLOBAL_FIXED ||
129                type == Reference::ObjectType::WEAK);
130     }
131 
132     friend class ::ark::mem::test::ReferenceStorageTest;
133 
134     class ArrayStorage {
135 #ifndef NDEBUG
136         // for better coverage of EnsureCapacity
137         static constexpr size_t INITIAL_SIZE = 2;
138 #else
139         static constexpr size_t INITIAL_SIZE = 128;
140 #endif  // NDEBUG
141         static constexpr size_t FREE_INDEX_BIT = 0;
142         static constexpr size_t BITS_FOR_TYPE = 2U;
143         static constexpr size_t BITS_FOR_INDEX = 1U;
144         static constexpr size_t ENSURE_CAPACITY_MULTIPLIER = 2;
145 
146         /**
147          * There are 2 cases:
148          * 1) When index is busy - then we store jobject in storage_ and 0 in the lowest bit (cause of alignment).
149          * Reference* contains it's index shifted by 2 with reference-type in lowest bits which we return to user and
150          * doesn't stores inside storage explicity.
151          *
152          * 2) When index if free - storage[index] stores next free index (shifted by 1) with lowest bit equals to 1
153          */
154         /*
155         |-----------------------------------------------------|------------------|------------------|
156         |      Case          |         Highest bits           | [1] lowest bit   | [0] lowest bit   |
157         --------------------------------------------------------------------------------------------|
158         | busy-index         |                                |                  |                  |
159         | Reference* (index) |            index               |   0/1 (ref-type) |   0/1 (ref-type) |
160         | storage[index]     |                           xxx                     |        0         |
161         ---------------------|--------------------------------|------------------|-------------------
162         | free-index         |                                                   |                  |
163         | storage[index]     |                        xxx                        |        1         |
164         ---------------------------------------------------------------------------------------------
165         */
166 
GUARDED_BY(mutex_)167         PandaVector<uintptr_t> storage_ GUARDED_BY(mutex_) {};
168         /// Index of first available block in list
169         uintptr_t firstAvailableBlock_;
170         /// How many blocks are available in current storage (can be increased if size less than max size)
171         size_t blocksAvailable_;
172 
173         bool enableSizeCheck_;
174         bool isFixed_;
175         size_t maxSize_;
176 
177         mutable os::memory::RWLock mutex_;
178         mem::InternalAllocatorPtr allocator_;
179 
180     public:
181         explicit ArrayStorage(mem::InternalAllocatorPtr allocator, size_t maxSize, bool enableSizeCheck,
182                               bool isFixed = false)
enableSizeCheck_(enableSizeCheck)183             : enableSizeCheck_(enableSizeCheck), isFixed_(isFixed), maxSize_(maxSize), allocator_(allocator)
184         {
185             ASSERT(maxSize < (std::numeric_limits<uintptr_t>::max() >> (BITS_FOR_TYPE)));
186 
187             blocksAvailable_ = isFixed ? maxSize : INITIAL_SIZE;
188             firstAvailableBlock_ = 0;
189 
190             storage_.resize(blocksAvailable_);
191             for (size_t i = 0; i < storage_.size() - 1; i++) {
192                 storage_[i] = EncodeNextIndex(i + 1);
193             }
194             storage_[storage_.size() - 1] = 0;
195         }
196 
197         ~ArrayStorage() = default;
198 
199         NO_COPY_SEMANTIC(ArrayStorage);
200         NO_MOVE_SEMANTIC(ArrayStorage);
201 
Add(const ObjectHeader * object)202         Reference *Add(const ObjectHeader *object)
203         {
204             ASSERT(object != nullptr);
205             os::memory::WriteLockHolder lk(mutex_);
206 
207             if (blocksAvailable_ == 0) {
208                 if (storage_.size() * ENSURE_CAPACITY_MULTIPLIER <= maxSize_ && !isFixed_) {
209                     EnsureCapacity();
210                 } else {
211                     LOG(ERROR, GC) << "Global reference storage is full";
212                     Dump();
213                     return nullptr;
214                 }
215             }
216             ASSERT(blocksAvailable_ != 0);
217             auto nextBlock = DecodeIndex(storage_[firstAvailableBlock_]);
218             auto currentIndex = firstAvailableBlock_;
219             AssertIndex(currentIndex);
220 
221             auto addr = reinterpret_cast<uintptr_t>(object);
222             [[maybe_unused]] uintptr_t lastBit = BitField<uintptr_t, FREE_INDEX_BIT>::Get(addr);
223             ASSERT(lastBit == 0);  // every object should be alignmented
224 
225             storage_[currentIndex] = addr;
226             auto ref = IndexToReference(currentIndex);
227             firstAvailableBlock_ = nextBlock;
228             blocksAvailable_--;
229 
230             CheckAlmostOverflow();
231             return ref;
232         }
233 
EnsureCapacity()234         void EnsureCapacity() REQUIRES(mutex_)
235         {
236             auto prevLength = storage_.size();
237             size_t newLength = storage_.size() * ENSURE_CAPACITY_MULTIPLIER;
238             blocksAvailable_ = firstAvailableBlock_ = prevLength;
239             storage_.resize(newLength);
240             for (size_t i = prevLength; i < newLength - 1; i++) {
241                 storage_[i] = EncodeNextIndex(i + 1);
242             }
243             storage_[storage_.size() - 1] = 0;
244             LOG(DEBUG, GC) << "Increase global storage from: " << prevLength << " to: " << newLength;
245         }
246 
CheckAlmostOverflow()247         void CheckAlmostOverflow() REQUIRES_SHARED(mutex_)
248         {
249             size_t nowSize = GetSize();
250             if (enableSizeCheck_ && nowSize >= maxSize_ - GLOBAL_REF_SIZE_WARNING_LINE) {
251                 LOG(INFO, GC) << "Global reference storage almost overflow. now size: " << nowSize
252                               << ", max size: " << maxSize_;
253                 // NOTE(xucheng): Dump global reference storage info now. May use Thread::Dump() when it can be used.
254                 Dump();
255             }
256         }
257 
Get(const Reference * ref)258         ObjectHeader *Get(const Reference *ref) const
259         {
260             os::memory::ReadLockHolder lk(mutex_);
261             auto index = ReferenceToIndex(ref);
262             return reinterpret_cast<ObjectHeader *>(storage_[index]);
263         }
264 
GetAddressForRef(const Reference * ref)265         uintptr_t GetAddressForRef(const Reference *ref) const
266         {
267             os::memory::ReadLockHolder lk(mutex_);
268             ASSERT(isFixed_);
269             auto index = ReferenceToIndex(ref);
270             return reinterpret_cast<uintptr_t>(&storage_[index]);
271         }
272 
Remove(const Reference * ref)273         void Remove(const Reference *ref)
274         {
275             os::memory::WriteLockHolder lk(mutex_);
276             auto index = ReferenceToIndex(ref);
277             storage_[index] = EncodeNextIndex(firstAvailableBlock_);
278             firstAvailableBlock_ = index;
279             blocksAvailable_++;
280         }
281 
UpdateMovedRefs()282         void UpdateMovedRefs()
283         {
284             os::memory::WriteLockHolder lk(mutex_);
285             // NOLINTNEXTLINE(modernize-loop-convert)
286             for (size_t index = 0; index < storage_.size(); index++) {
287                 auto ref = storage_[index];
288                 if (IsBusy(ref)) {
289                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
290                     if (obj != nullptr && obj->IsForwarded()) {
291                         auto newAddr = reinterpret_cast<ObjectHeader *>(GetForwardAddress(obj));
292                         LOG(DEBUG, GC) << "Global ref update from: " << obj << " to: " << newAddr;
293                         storage_[index] = ToUintPtr(newAddr);
294                     }
295                 }
296             }
297         }
298 
VisitObjects(const GCRootVisitor & gcRootVisitor,mem::RootType rootType)299         void VisitObjects(const GCRootVisitor &gcRootVisitor, mem::RootType rootType)
300         {
301             os::memory::ReadLockHolder lk(mutex_);
302 
303             for (const auto &ref : storage_) {
304                 if (IsBusy(ref)) {
305                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
306                     if (obj != nullptr) {
307                         LOG(DEBUG, GC) << " Found root from global storage: " << mem::GetDebugInfoAboutObject(obj);
308                         gcRootVisitor({rootType, obj});
309                     }
310                 }
311             }
312         }
313 
ClearWeakRefs(const mem::GC::ReferenceClearPredicateT & pred)314         void ClearWeakRefs(const mem::GC::ReferenceClearPredicateT &pred)
315         {
316             os::memory::WriteLockHolder lk(mutex_);
317 
318             for (auto &ref : storage_) {
319                 if (IsBusy(ref)) {
320                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
321                     if (obj != nullptr && pred(obj)) {
322                         LOG(DEBUG, GC) << "Clear weak-reference: " << obj;
323                         ref = reinterpret_cast<uintptr_t>(nullptr);
324                     }
325                 }
326             }
327         }
328 
GetAllObjects()329         PandaVector<ObjectHeader *> GetAllObjects()
330         {
331             auto objects = PandaVector<ObjectHeader *>(allocator_->Adapter());
332             {
333                 os::memory::ReadLockHolder lk(mutex_);
334                 for (const auto &ref : storage_) {
335                     // we don't return nulls on GetAllObjects
336                     if (ref != 0 && IsBusy(ref)) {
337                         auto obj = reinterpret_cast<ObjectHeader *>(ref);
338                         objects.push_back(obj);
339                     }
340                 }
341             }
342             return objects;
343         }
344 
IsValidGlobalRef(const Reference * ref)345         bool IsValidGlobalRef(const Reference *ref)
346         {
347             ASSERT(ref != nullptr);
348             os::memory::ReadLockHolder lk(mutex_);
349             uintptr_t index = ReferenceToIndex<false>(ref);
350             if (index >= storage_.size()) {
351                 return false;
352             }
353             if (IsFreeIndex(index)) {
354                 return false;
355             }
356             return index < storage_.size();
357         }
358 
DumpWithLock()359         void DumpWithLock()
360         {
361             os::memory::ReadLockHolder lk(mutex_);
362             Dump();
363         }
364 
Dump()365         void Dump() REQUIRES_SHARED(mutex_)
366         {
367             if (DfxController::IsInitialized() &&
368                 DfxController::GetOptionValue(DfxOptionHandler::REFERENCE_DUMP) != 1) {
369                 return;
370             }
371             static constexpr size_t DUMP_NUMS = 20;
372             size_t num = 0;
373             LOG(INFO, GC) << "Dump the last " << DUMP_NUMS << " global references info:";
374 
375             for (auto it = storage_.rbegin(); it != storage_.rend(); it++) {
376                 uintptr_t ref = *it;
377                 if (IsBusy(ref)) {
378                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
379                     LOG(INFO, GC) << "\t Index: " << GetSize() - num << ", Global reference: " << std::hex << ref
380                                   << ", Object: " << std::hex << obj
381                                   << ", Class: " << obj->ClassAddr<ark::Class>()->GetName();
382                     num++;
383                     if (num == DUMP_NUMS || num > GetSize()) {
384                         break;
385                     }
386                 }
387             }
388         }
389 
GetSize()390         size_t GetSize() const REQUIRES_SHARED(mutex_)
391         {
392             return storage_.size() - blocksAvailable_;
393         }
394 
GetSizeWithLock()395         size_t GetSizeWithLock() const
396         {
397             os::memory::ReadLockHolder globalLock(mutex_);
398             return GetSize();
399         }
400 
IsFreeIndex(uintptr_t index)401         bool IsFreeIndex(uintptr_t index) REQUIRES_SHARED(mutex_)
402         {
403             return IsFreeValue(storage_[index]);
404         }
405 
IsFreeValue(uintptr_t value)406         bool IsFreeValue(uintptr_t value)
407         {
408             uintptr_t lastBit = BitField<uintptr_t, FREE_INDEX_BIT>::Get(value);
409             return lastBit == 1;
410         }
411 
IsBusy(uintptr_t value)412         bool IsBusy(uintptr_t value)
413         {
414             return !IsFreeValue(value);
415         }
416 
EncodeObjectIndex(uintptr_t index)417         static uintptr_t EncodeObjectIndex(uintptr_t index)
418         {
419             ASSERT(index < (std::numeric_limits<uintptr_t>::max() >> BITS_FOR_INDEX));
420             return index << BITS_FOR_INDEX;
421         }
422 
EncodeNextIndex(uintptr_t index)423         static uintptr_t EncodeNextIndex(uintptr_t index)
424         {
425             uintptr_t shiftedIndex = EncodeObjectIndex(index);
426             BitField<uintptr_t, FREE_INDEX_BIT>::Set(1, &shiftedIndex);
427             return shiftedIndex;
428         }
429 
DecodeIndex(uintptr_t index)430         static uintptr_t DecodeIndex(uintptr_t index)
431         {
432             return index >> BITS_FOR_INDEX;
433         }
434 
435         /**
436          * We need to add 1 to not return nullptr to distinct it from situation when we couldn't create a reference.
437          * Shift by 2 is needed because every Reference stores type in lowest 2 bits.
438          */
IndexToReference(uintptr_t encodedIndex)439         Reference *IndexToReference(uintptr_t encodedIndex) const REQUIRES_SHARED(mutex_)
440         {
441             AssertIndex(DecodeIndex(encodedIndex));
442             return reinterpret_cast<Reference *>((encodedIndex + 1) << BITS_FOR_TYPE);
443         }
444 
445         template <bool CHECK_ASSERT = true>
ReferenceToIndex(const Reference * ref)446         uintptr_t ReferenceToIndex(const Reference *ref) const REQUIRES_SHARED(mutex_)
447         {
448             if (CHECK_ASSERT) {
449                 AssertIndex(ref);
450             }
451             return (reinterpret_cast<uintptr_t>(ref) >> BITS_FOR_TYPE) - 1;
452         }
453 
AssertIndex(const Reference * ref)454         void AssertIndex(const Reference *ref) const REQUIRES_SHARED(mutex_)
455         {
456             auto decodedIndex = (reinterpret_cast<uintptr_t>(ref) >> BITS_FOR_TYPE) - 1;
457             AssertIndex(DecodeIndex(decodedIndex));
458         }
459 
AssertIndex(uintptr_t index)460         void AssertIndex([[maybe_unused]] uintptr_t index) const REQUIRES_SHARED(mutex_)
461         {
462             ASSERT(static_cast<uintptr_t>(index) < storage_.size());
463         }
464 
465         // test usage only
GetVectorSize()466         size_t GetVectorSize()
467         {
468             os::memory::ReadLockHolder lk(mutex_);
469             return storage_.size();
470         }
471 
472         friend class ::ark::mem::test::ReferenceStorageTest;
473     };
474 };
475 }  // namespace ark::mem
476 #endif  // PANDA_GLOBAL_OBJECT_STORAGE_H
477