• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_CPPGC_HEAP_PAGE_H_
6 #define V8_HEAP_CPPGC_HEAP_PAGE_H_
7 
8 #include "src/base/iterator.h"
9 #include "src/base/macros.h"
10 #include "src/heap/cppgc/globals.h"
11 #include "src/heap/cppgc/heap-object-header.h"
12 #include "src/heap/cppgc/object-start-bitmap.h"
13 
14 namespace cppgc {
15 namespace internal {
16 
17 class BaseSpace;
18 class NormalPageSpace;
19 class LargePageSpace;
20 class HeapBase;
21 class PageBackend;
22 
23 class V8_EXPORT_PRIVATE BasePage {
24  public:
25   static inline BasePage* FromPayload(void*);
26   static inline const BasePage* FromPayload(const void*);
27 
28   static BasePage* FromInnerAddress(const HeapBase*, void*);
29   static const BasePage* FromInnerAddress(const HeapBase*, const void*);
30 
31   static void Destroy(BasePage*);
32 
33   BasePage(const BasePage&) = delete;
34   BasePage& operator=(const BasePage&) = delete;
35 
heap()36   HeapBase& heap() const { return heap_; }
37 
space()38   BaseSpace& space() const { return space_; }
39 
is_large()40   bool is_large() const { return type_ == PageType::kLarge; }
41 
42   Address PayloadStart();
43   ConstAddress PayloadStart() const;
44   Address PayloadEnd();
45   ConstAddress PayloadEnd() const;
46 
47   // Returns the size of live objects on the page at the last GC.
48   // The counter is update after sweeping.
49   size_t AllocatedBytesAtLastGC() const;
50 
51   // |address| must refer to real object.
52   template <AccessMode = AccessMode::kNonAtomic>
53   HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
54   template <AccessMode = AccessMode::kNonAtomic>
55   const HeapObjectHeader& ObjectHeaderFromInnerAddress(
56       const void* address) const;
57 
58   // |address| is guaranteed to point into the page but not payload. Returns
59   // nullptr when pointing into free list entries and the valid header
60   // otherwise. The function is not thread-safe and cannot be called when
61   // e.g. sweeping is in progress.
62   HeapObjectHeader* TryObjectHeaderFromInnerAddress(void* address) const;
63   const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
64       const void* address) const;
65 
66   // SynchronizedLoad and SynchronizedStore are used to sync pages after they
67   // are allocated. std::atomic_thread_fence is sufficient in practice but is
68   // not recognized by tsan. Atomic load and store of the |type_| field are
69   // added for tsan builds.
SynchronizedLoad()70   void SynchronizedLoad() const {
71 #if defined(THREAD_SANITIZER)
72     v8::base::AsAtomicPtr(&type_)->load(std::memory_order_acquire);
73 #endif
74   }
SynchronizedStore()75   void SynchronizedStore() {
76     std::atomic_thread_fence(std::memory_order_seq_cst);
77 #if defined(THREAD_SANITIZER)
78     v8::base::AsAtomicPtr(&type_)->store(type_, std::memory_order_release);
79 #endif
80   }
81 
IncrementDiscardedMemory(size_t value)82   void IncrementDiscardedMemory(size_t value) {
83     DCHECK_GE(discarded_memory_ + value, discarded_memory_);
84     discarded_memory_ += value;
85   }
ResetDiscardedMemory()86   void ResetDiscardedMemory() { discarded_memory_ = 0; }
discarded_memory()87   size_t discarded_memory() const { return discarded_memory_; }
88 
89  protected:
90   enum class PageType : uint8_t { kNormal, kLarge };
91   BasePage(HeapBase&, BaseSpace&, PageType);
92 
93  private:
94   HeapBase& heap_;
95   BaseSpace& space_;
96   PageType type_;
97   size_t discarded_memory_ = 0;
98 };
99 
100 class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
101   template <typename T>
102   class IteratorImpl : v8::base::iterator<std::forward_iterator_tag, T> {
103    public:
104     explicit IteratorImpl(T* p, ConstAddress lab_start = nullptr,
105                           size_t lab_size = 0)
p_(p)106         : p_(p), lab_start_(lab_start), lab_size_(lab_size) {
107       DCHECK(p);
108       DCHECK_EQ(0, (lab_size & (sizeof(T) - 1)));
109       if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
110         p_ += (lab_size_ / sizeof(T));
111       }
112     }
113 
114     T& operator*() { return *p_; }
115     const T& operator*() const { return *p_; }
116 
117     bool operator==(IteratorImpl other) const { return p_ == other.p_; }
118     bool operator!=(IteratorImpl other) const { return !(*this == other); }
119 
120     IteratorImpl& operator++() {
121       const size_t size = p_->AllocatedSize();
122       DCHECK_EQ(0, (size & (sizeof(T) - 1)));
123       p_ += (size / sizeof(T));
124       if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
125         p_ += (lab_size_ / sizeof(T));
126       }
127       return *this;
128     }
129     IteratorImpl operator++(int) {
130       IteratorImpl temp(*this);
131       ++(*this);
132       return temp;
133     }
134 
base()135     T* base() const { return p_; }
136 
137    private:
138     T* p_;
139     ConstAddress lab_start_;
140     size_t lab_size_;
141   };
142 
143  public:
144   using iterator = IteratorImpl<HeapObjectHeader>;
145   using const_iterator = IteratorImpl<const HeapObjectHeader>;
146 
147   // Allocates a new page in the detached state.
148   static NormalPage* Create(PageBackend&, NormalPageSpace&);
149   // Destroys and frees the page. The page must be detached from the
150   // corresponding space (i.e. be swept when called).
151   static void Destroy(NormalPage*);
152 
From(BasePage * page)153   static NormalPage* From(BasePage* page) {
154     DCHECK(!page->is_large());
155     return static_cast<NormalPage*>(page);
156   }
From(const BasePage * page)157   static const NormalPage* From(const BasePage* page) {
158     return From(const_cast<BasePage*>(page));
159   }
160 
161   iterator begin();
162   const_iterator begin() const;
163 
end()164   iterator end() {
165     return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadEnd()));
166   }
end()167   const_iterator end() const {
168     return const_iterator(
169         reinterpret_cast<const HeapObjectHeader*>(PayloadEnd()));
170   }
171 
172   Address PayloadStart();
173   ConstAddress PayloadStart() const;
174   Address PayloadEnd();
175   ConstAddress PayloadEnd() const;
176 
177   static size_t PayloadSize();
178 
PayloadContains(ConstAddress address)179   bool PayloadContains(ConstAddress address) const {
180     return (PayloadStart() <= address) && (address < PayloadEnd());
181   }
182 
AllocatedBytesAtLastGC()183   size_t AllocatedBytesAtLastGC() const { return allocated_bytes_at_last_gc_; }
184 
SetAllocatedBytesAtLastGC(size_t bytes)185   void SetAllocatedBytesAtLastGC(size_t bytes) {
186     allocated_bytes_at_last_gc_ = bytes;
187   }
188 
object_start_bitmap()189   PlatformAwareObjectStartBitmap& object_start_bitmap() {
190     return object_start_bitmap_;
191   }
object_start_bitmap()192   const PlatformAwareObjectStartBitmap& object_start_bitmap() const {
193     return object_start_bitmap_;
194   }
195 
196  private:
197   NormalPage(HeapBase& heap, BaseSpace& space);
198   ~NormalPage();
199 
200   size_t allocated_bytes_at_last_gc_ = 0;
201   PlatformAwareObjectStartBitmap object_start_bitmap_;
202 };
203 
204 class V8_EXPORT_PRIVATE LargePage final : public BasePage {
205  public:
PageHeaderSize()206   static constexpr size_t PageHeaderSize() {
207     // Header should be un-aligned to `kAllocationGranularity` so that adding a
208     // `HeapObjectHeader` gets the user object aligned to
209     // `kGuaranteedObjectAlignment`.
210     return RoundUp<kGuaranteedObjectAlignment>(sizeof(LargePage) +
211                                                sizeof(HeapObjectHeader)) -
212            sizeof(HeapObjectHeader);
213   }
214 
215   // Returns the allocation size required for a payload of size |size|.
216   static size_t AllocationSize(size_t size);
217   // Allocates a new page in the detached state.
218   static LargePage* Create(PageBackend&, LargePageSpace&, size_t);
219   // Destroys and frees the page. The page must be detached from the
220   // corresponding space (i.e. be swept when called).
221   static void Destroy(LargePage*);
222 
From(BasePage * page)223   static LargePage* From(BasePage* page) {
224     DCHECK(page->is_large());
225     return static_cast<LargePage*>(page);
226   }
From(const BasePage * page)227   static const LargePage* From(const BasePage* page) {
228     return From(const_cast<BasePage*>(page));
229   }
230 
231   HeapObjectHeader* ObjectHeader();
232   const HeapObjectHeader* ObjectHeader() const;
233 
234   Address PayloadStart();
235   ConstAddress PayloadStart() const;
236   Address PayloadEnd();
237   ConstAddress PayloadEnd() const;
238 
PayloadSize()239   size_t PayloadSize() const { return payload_size_; }
ObjectSize()240   size_t ObjectSize() const {
241     DCHECK_GT(payload_size_, sizeof(HeapObjectHeader));
242     return payload_size_ - sizeof(HeapObjectHeader);
243   }
244 
AllocatedBytesAtLastGC()245   size_t AllocatedBytesAtLastGC() const { return ObjectSize(); }
246 
PayloadContains(ConstAddress address)247   bool PayloadContains(ConstAddress address) const {
248     return (PayloadStart() <= address) && (address < PayloadEnd());
249   }
250 
251  private:
252   static constexpr size_t kGuaranteedObjectAlignment =
253       2 * kAllocationGranularity;
254 
255   LargePage(HeapBase& heap, BaseSpace& space, size_t);
256   ~LargePage();
257 
258   size_t payload_size_;
259 };
260 
261 // static
FromPayload(void * payload)262 BasePage* BasePage::FromPayload(void* payload) {
263   return reinterpret_cast<BasePage*>(
264       (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
265 }
266 
267 // static
FromPayload(const void * payload)268 const BasePage* BasePage::FromPayload(const void* payload) {
269   return reinterpret_cast<const BasePage*>(
270       (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
271        kPageBaseMask) +
272       kGuardPageSize);
273 }
274 
275 template <AccessMode mode = AccessMode::kNonAtomic>
ObjectHeaderFromInnerAddressImpl(const BasePage * page,const void * address)276 const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
277                                                          const void* address) {
278   if (page->is_large()) {
279     return LargePage::From(page)->ObjectHeader();
280   }
281   const PlatformAwareObjectStartBitmap& bitmap =
282       NormalPage::From(page)->object_start_bitmap();
283   const HeapObjectHeader* header =
284       bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
285   DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
286                          header->AllocatedSize<AccessMode::kAtomic>());
287   return header;
288 }
289 
290 template <AccessMode mode>
ObjectHeaderFromInnerAddress(void * address)291 HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
292   return const_cast<HeapObjectHeader&>(
293       ObjectHeaderFromInnerAddress<mode>(const_cast<const void*>(address)));
294 }
295 
296 template <AccessMode mode>
ObjectHeaderFromInnerAddress(const void * address)297 const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
298     const void* address) const {
299   // This method might be called for |address| found via a Trace method of
300   // another object. If |address| is on a newly allocated page , there will
301   // be no sync between the page allocation and a concurrent marking thread,
302   // resulting in a race with page initialization (specifically with writing
303   // the page |type_| field). This can occur when tracing a Member holding a
304   // reference to a mixin type
305   SynchronizedLoad();
306   const HeapObjectHeader* header =
307       ObjectHeaderFromInnerAddressImpl<mode>(this, address);
308   DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex<mode>());
309   return *header;
310 }
311 
312 }  // namespace internal
313 }  // namespace cppgc
314 
315 #endif  // V8_HEAP_CPPGC_HEAP_PAGE_H_
316