• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef PANDA_GLOBAL_OBJECT_STORAGE_H
16 #define PANDA_GLOBAL_OBJECT_STORAGE_H
17 
18 #include <libpandabase/os/mutex.h>
19 
20 #include "runtime/include/runtime.h"
21 #include "runtime/include/mem/panda_containers.h"
22 #include "runtime/include/object_header.h"
23 #include "runtime/mem/object_helpers.h"
24 #include "runtime/mem/gc/gc.h"
25 #include "runtime/mem/gc/gc_root.h"
26 #include "runtime/mem/gc/gc_phase.h"
27 #include "runtime/include/class.h"
28 #include "runtime/include/panda_vm.h"
29 #include "reference.h"
30 #include "utils/logger.h"
31 
32 namespace panda::mem::test {
33 class ReferenceStorageTest;
34 }  // namespace panda::mem::test
35 
36 namespace panda::mem {
37 
38 /**
39  * Storage for objects which need to handle by GC. GC will handle moving these objects and will not reclaim then until
40  * user haven't called Remove method on this object.
41  * References will be removed automatically after Remove method or after storage's destructor.
42  */
43 class GlobalObjectStorage {
44 public:
45     explicit GlobalObjectStorage(mem::InternalAllocatorPtr allocator, size_t max_size, bool enable_size_check);
46 
47     ~GlobalObjectStorage();
48 
49     /**
50      * Check whether ref is a valid global reference or not.
51      */
52     bool IsValidGlobalRef(const Reference *ref) const;
53 
54     /**
55      * Add object to the storage and return associated pointer with this object
56      */
57     Reference *Add(const ObjectHeader *object, Reference::ObjectType type) const;
58 
59     /**
60      * Get stored object associated with given reference. Reference should be returned on Add method before.
61      */
62     ObjectHeader *Get(const Reference *reference) const;
63 
64     /**
65      * Remove object from storage by given reference. Reference should be returned on Add method before.
66      */
67     void Remove(const Reference *reference);
68 
69     /**
70      * Get all objects from storage. Used by debugging.
71      */
72     PandaVector<ObjectHeader *> GetAllObjects();
73 
74     void VisitObjects(const GCRootVisitor &gc_root_visitor, mem::RootType rootType);
75 
76     /**
77      * Update pointers to moved Objects in global storage.
78      */
79     // TODO(alovkov): take a closure from gc
80     void UpdateMovedRefs();
81 
82     void ClearUnmarkedWeakRefs(const GC *gc, const mem::GC::ReferenceClearPredicateT &pred);
83 
84     size_t GetSize();
85 
86     void Dump();
87 
88 private:
89     NO_COPY_SEMANTIC(GlobalObjectStorage);
90     NO_MOVE_SEMANTIC(GlobalObjectStorage);
91 
92     class ArrayStorage;
93 
94     static constexpr size_t GLOBAL_REF_SIZE_WARNING_LINE = 20;
95 
96     mem::InternalAllocatorPtr allocator_;
97     ArrayStorage *global_storage_;
98     ArrayStorage *weak_storage_;
99 
AssertType(Reference::ObjectType type)100     static void AssertType([[maybe_unused]] Reference::ObjectType type)
101     {
102         ASSERT(type == Reference::ObjectType::GLOBAL || type == Reference::ObjectType::WEAK);
103     }
104 
105     friend class ::panda::mem::test::ReferenceStorageTest;
106 
107     class ArrayStorage {
108 #ifndef NDEBUG
109         // for better coverage of EnsureCapacity
110         static constexpr size_t INITIAL_SIZE = 2;
111 #else
112         static constexpr size_t INITIAL_SIZE = 128;
113 #endif  // NDEBUG
114         static constexpr size_t FREE_INDEX_BIT = 0;
115         static constexpr size_t BITS_FOR_TYPE = 2U;
116         static constexpr size_t BITS_FOR_INDEX = 1U;
117         static constexpr size_t ENSURE_CAPACITY_MULTIPLIER = 2;
118 
119         /**
120         There are 2 cases:
121         1) When index is busy - then we store jobject in storage_ and 0 in the lowest bit (cause of alignment).
122         Reference* contains it's index shifted by 2 with reference-type in lowest bits which we return to user and
123         doesn't stores inside storage explicity.
124 
125         2) When index if free - storage[index] stores next free index (shifted by 1) with lowest bit equals to 1
126 
127         |-----------------------------------------------------|------------------|------------------|
128         |      Case          |         Highest bits           | [1] lowest bit   | [0] lowest bit   |
129         --------------------------------------------------------------------------------------------|
130         | busy-index         |                                |                  |                  |
131         | Reference* (index) |            index               |   0/1 (ref-type) |   0/1 (ref-type) |
132         | storage[index]     |                           xxx                     |        0         |
133         ---------------------|--------------------------------|------------------|-------------------
134         | free-index         |                                                   |                  |
135         | storage[index]     |                        xxx                        |        1         |
136         ---------------------------------------------------------------------------------------------
137         */
138 
GUARDED_BY(mutex_)139         PandaVector<uintptr_t> storage_ GUARDED_BY(mutex_) {};
140         /**
141          * Index of first available block in list
142          */
143         uintptr_t first_available_block_;
144         /**
145          * How many blocks are available in current storage (can be increased if size less than max size)
146          */
147         size_t blocks_available_;
148 
149         bool enable_size_check_;
150         size_t max_size_;
151 
152         mutable os::memory::RWLock mutex_;
153         mem::InternalAllocatorPtr allocator_;
154 
155     public:
ArrayStorage(mem::InternalAllocatorPtr allocator,size_t max_size,bool enable_size_check)156         explicit ArrayStorage(mem::InternalAllocatorPtr allocator, size_t max_size, bool enable_size_check)
157             : enable_size_check_(enable_size_check), max_size_(max_size), allocator_(allocator)
158         {
159             ASSERT(max_size < (std::numeric_limits<uintptr_t>::max() >> (BITS_FOR_TYPE)));
160 
161             blocks_available_ = INITIAL_SIZE;
162             first_available_block_ = 0;
163 
164             storage_.resize(INITIAL_SIZE);
165             for (size_t i = 0; i < storage_.size() - 1; i++) {
166                 storage_[i] = EncodeNextIndex(i + 1);
167             }
168             storage_[storage_.size() - 1] = 0;
169         }
170 
171         ~ArrayStorage() = default;
172 
173         NO_COPY_SEMANTIC(ArrayStorage);
174         NO_MOVE_SEMANTIC(ArrayStorage);
175 
Add(const ObjectHeader * object)176         Reference *Add(const ObjectHeader *object)
177         {
178             ASSERT(object != nullptr);
179             os::memory::WriteLockHolder lk(mutex_);
180 
181             if (blocks_available_ == 0) {
182                 if (storage_.size() * ENSURE_CAPACITY_MULTIPLIER <= max_size_) {
183                     EnsureCapacity();
184                 } else {
185                     LOG(ERROR, GC) << "Global reference storage is full";
186                     Dump();
187                     return nullptr;
188                 }
189             }
190             ASSERT(blocks_available_ != 0);
191             auto next_block = DecodeIndex(storage_[first_available_block_]);
192             auto current_index = first_available_block_;
193             AssertIndex(current_index);
194 
195             auto addr = reinterpret_cast<uintptr_t>(object);
196             [[maybe_unused]] uintptr_t last_bit = BitField<uintptr_t, FREE_INDEX_BIT>::Get(addr);
197             ASSERT(last_bit == 0);  // every object should be alignmented
198 
199             storage_[current_index] = addr;
200             auto ref = IndexToReference(current_index);
201             first_available_block_ = next_block;
202             blocks_available_--;
203 
204             CheckAlmostOverflow();
205             return ref;
206         }
207 
EnsureCapacity()208         void EnsureCapacity() REQUIRES(mutex_)
209         {
210             auto prev_length = storage_.size();
211             size_t new_length = storage_.size() * ENSURE_CAPACITY_MULTIPLIER;
212             blocks_available_ = first_available_block_ = prev_length;
213             storage_.resize(new_length);
214             for (size_t i = prev_length; i < new_length - 1; i++) {
215                 storage_[i] = EncodeNextIndex(i + 1);
216             }
217             storage_[storage_.size() - 1] = 0;
218             LOG(DEBUG, GC) << "Increase global storage from: " << prev_length << " to: " << new_length;
219         }
220 
CheckAlmostOverflow()221         void CheckAlmostOverflow() REQUIRES_SHARED(mutex_)
222         {
223             size_t now_size = GetSize();
224             if (enable_size_check_ && now_size >= max_size_ - GLOBAL_REF_SIZE_WARNING_LINE) {
225                 LOG(INFO, GC) << "Global reference storage almost overflow. now size: " << now_size
226                               << ", max size: " << max_size_;
227                 // TODO(xucheng): Dump global reference storage info now. May use Thread::Dump() when it can be used.
228                 Dump();
229             }
230         }
231 
Get(const Reference * ref)232         ObjectHeader *Get(const Reference *ref) const
233         {
234             os::memory::ReadLockHolder lk(mutex_);
235             auto index = ReferenceToIndex(ref);
236             return reinterpret_cast<ObjectHeader *>(storage_[index]);
237         }
238 
Remove(const Reference * ref)239         void Remove(const Reference *ref)
240         {
241             os::memory::WriteLockHolder lk(mutex_);
242             auto index = ReferenceToIndex(ref);
243             storage_[index] = EncodeNextIndex(first_available_block_);
244             first_available_block_ = index;
245             blocks_available_++;
246         }
247 
UpdateMovedRefs()248         void UpdateMovedRefs()
249         {
250             os::memory::WriteLockHolder lk(mutex_);
251             // NOLINTNEXTLINE(modernize-loop-convert)
252             for (size_t index = 0; index < storage_.size(); index++) {
253                 auto ref = storage_[index];
254                 if (IsBusy(ref)) {
255                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
256                     if (obj != nullptr && obj->IsForwarded()) {
257                         auto new_addr = reinterpret_cast<ObjectHeader *>(GetForwardAddress(obj));
258                         LOG(DEBUG, GC) << "Global ref update from: " << obj << " to: " << new_addr;
259                         storage_[index] = ToUintPtr(new_addr);
260                     }
261                 }
262             }
263         }
264 
VisitObjects(const GCRootVisitor & gc_root_visitor,mem::RootType rootType)265         void VisitObjects(const GCRootVisitor &gc_root_visitor, mem::RootType rootType)
266         {
267             os::memory::ReadLockHolder lk(mutex_);
268 
269             for (const auto &ref : storage_) {
270                 if (IsBusy(ref)) {
271                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
272                     if (obj != nullptr) {
273                         LOG(DEBUG, GC) << " Found root from global storage: " << mem::GetDebugInfoAboutObject(obj);
274                         gc_root_visitor({rootType, obj});
275                     }
276                 }
277             }
278         }
279 
ClearUnmarkedWeakRefs(const GC * gc,const mem::GC::ReferenceClearPredicateT & pred)280         void ClearUnmarkedWeakRefs(const GC *gc, const mem::GC::ReferenceClearPredicateT &pred)
281         {
282             ASSERT(IsMarking(gc->GetGCPhase()));
283             os::memory::WriteLockHolder lk(mutex_);
284 
285             for (auto &ref : storage_) {
286                 if (IsBusy(ref)) {
287                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
288                     if (obj != nullptr && pred(obj) && !gc->IsMarked(obj)) {
289                         LOG(DEBUG, GC) << "Clear not marked weak-reference: " << std::hex << ref << " object: " << obj;
290                         ref = reinterpret_cast<uintptr_t>(nullptr);
291                     }
292                 }
293             }
294         }
295 
GetAllObjects()296         PandaVector<ObjectHeader *> GetAllObjects()
297         {
298             auto objects = PandaVector<ObjectHeader *>(allocator_->Adapter());
299             {
300                 os::memory::ReadLockHolder lk(mutex_);
301                 for (const auto &ref : storage_) {
302                     // we don't return nulls on GetAllObjects
303                     if (ref != 0 && IsBusy(ref)) {
304                         auto obj = reinterpret_cast<ObjectHeader *>(ref);
305                         objects.push_back(obj);
306                     }
307                 }
308             }
309             return objects;
310         }
311 
IsValidGlobalRef(const Reference * ref)312         bool IsValidGlobalRef(const Reference *ref)
313         {
314             ASSERT(ref != nullptr);
315             os::memory::ReadLockHolder lk(mutex_);
316             uintptr_t index = ReferenceToIndex<false>(ref);
317             if (index >= storage_.size()) {
318                 return false;
319             }
320             if (IsFreeIndex(index)) {
321                 return false;
322             }
323             return index < storage_.size();
324         }
325 
DumpWithLock()326         void DumpWithLock()
327         {
328             os::memory::ReadLockHolder lk(mutex_);
329             Dump();
330         }
331 
Dump()332         void Dump() REQUIRES_SHARED(mutex_)
333         {
334             static constexpr size_t DUMP_NUMS = 20;
335             size_t num = 0;
336             LOG(INFO, GC) << "Dump the last " << DUMP_NUMS << " global references info:";
337 
338             for (auto it = storage_.rbegin(); it != storage_.rend(); it++) {
339                 uintptr_t ref = *it;
340                 if (IsBusy(ref)) {
341                     auto obj = reinterpret_cast<ObjectHeader *>(ref);
342                     LOG(INFO, GC) << "\t Index: " << GetSize() - num << ", Global reference: " << std::hex << ref
343                                   << ", Object: " << std::hex << obj
344                                   << ", Class: " << obj->ClassAddr<panda::Class>()->GetName();
345                     num++;
346                     if (num == DUMP_NUMS || num > GetSize()) {
347                         break;
348                     }
349                 }
350             }
351         }
352 
GetSize()353         size_t GetSize() const REQUIRES_SHARED(mutex_)
354         {
355             return storage_.size() - blocks_available_;
356         }
357 
GetSizeWithLock()358         size_t GetSizeWithLock() const
359         {
360             os::memory::ReadLockHolder global_lock(mutex_);
361             return GetSize();
362         }
363 
IsFreeIndex(uintptr_t index)364         bool IsFreeIndex(uintptr_t index) REQUIRES_SHARED(mutex_)
365         {
366             return IsFreeValue(storage_[index]);
367         }
368 
IsFreeValue(uintptr_t value)369         bool IsFreeValue(uintptr_t value)
370         {
371             uintptr_t last_bit = BitField<uintptr_t, FREE_INDEX_BIT>::Get(value);
372             return last_bit == 1;
373         }
374 
IsBusy(uintptr_t value)375         bool IsBusy(uintptr_t value)
376         {
377             return !IsFreeValue(value);
378         }
379 
EncodeObjectIndex(uintptr_t index)380         static uintptr_t EncodeObjectIndex(uintptr_t index)
381         {
382             ASSERT(index < (std::numeric_limits<uintptr_t>::max() >> BITS_FOR_INDEX));
383             return index << BITS_FOR_INDEX;
384         }
385 
EncodeNextIndex(uintptr_t index)386         static uintptr_t EncodeNextIndex(uintptr_t index)
387         {
388             uintptr_t shifted_index = EncodeObjectIndex(index);
389             BitField<uintptr_t, FREE_INDEX_BIT>::Set(1, &shifted_index);
390             return shifted_index;
391         }
392 
DecodeIndex(uintptr_t index)393         static uintptr_t DecodeIndex(uintptr_t index)
394         {
395             return index >> BITS_FOR_INDEX;
396         }
397 
398         /**
399          * We need to add 1 to not return nullptr to distinct it from situation when we couldn't create a reference.
400          * Shift by 2 is needed because every Reference stores type in lowest 2 bits.
401          */
IndexToReference(uintptr_t encoded_index)402         Reference *IndexToReference(uintptr_t encoded_index) const REQUIRES_SHARED(mutex_)
403         {
404             AssertIndex(DecodeIndex(encoded_index));
405             return reinterpret_cast<Reference *>((encoded_index + 1) << BITS_FOR_TYPE);
406         }
407 
408         template <bool check_assert = true>
ReferenceToIndex(const Reference * ref)409         uintptr_t ReferenceToIndex(const Reference *ref) const REQUIRES_SHARED(mutex_)
410         {
411             if (check_assert) {
412                 AssertIndex(ref);
413             }
414             return (reinterpret_cast<uintptr_t>(ref) >> BITS_FOR_TYPE) - 1;
415         }
416 
AssertIndex(const Reference * ref)417         void AssertIndex(const Reference *ref) const REQUIRES_SHARED(mutex_)
418         {
419             auto decoded_index = (reinterpret_cast<uintptr_t>(ref) >> BITS_FOR_TYPE) - 1;
420             AssertIndex(DecodeIndex(decoded_index));
421         }
422 
AssertIndex(uintptr_t index)423         void AssertIndex([[maybe_unused]] uintptr_t index) const REQUIRES_SHARED(mutex_)
424         {
425             ASSERT(static_cast<uintptr_t>(index) < storage_.size());
426         }
427 
428         // test usage only
GetVectorSize()429         size_t GetVectorSize()
430         {
431             os::memory::ReadLockHolder lk(mutex_);
432             return storage_.size();
433         }
434 
435         friend class ::panda::mem::test::ReferenceStorageTest;
436     };
437 };
438 }  // namespace panda::mem
439 #endif  // PANDA_GLOBAL_OBJECT_STORAGE_H
440