• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/object-allocator.h"
6 
7 #include "include/cppgc/allocation.h"
8 #include "src/base/logging.h"
9 #include "src/base/macros.h"
10 #include "src/heap/cppgc/free-list.h"
11 #include "src/heap/cppgc/globals.h"
12 #include "src/heap/cppgc/heap-object-header.h"
13 #include "src/heap/cppgc/heap-page.h"
14 #include "src/heap/cppgc/heap-space.h"
15 #include "src/heap/cppgc/heap-visitor.h"
16 #include "src/heap/cppgc/heap.h"
17 #include "src/heap/cppgc/memory.h"
18 #include "src/heap/cppgc/object-start-bitmap.h"
19 #include "src/heap/cppgc/page-memory.h"
20 #include "src/heap/cppgc/prefinalizer-handler.h"
21 #include "src/heap/cppgc/stats-collector.h"
22 #include "src/heap/cppgc/sweeper.h"
23 
24 namespace cppgc {
25 namespace internal {
26 
27 namespace {
28 
MarkRangeAsYoung(BasePage * page,Address begin,Address end)29 void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
30 #if defined(CPPGC_YOUNG_GENERATION)
31   DCHECK_LT(begin, end);
32 
33   static constexpr auto kEntrySize = AgeTable::kCardSizeInBytes;
34 
35   const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
36   const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);
37 
38   const uintptr_t young_offset_begin = (begin == page->PayloadStart())
39                                            ? RoundDown(offset_begin, kEntrySize)
40                                            : RoundUp(offset_begin, kEntrySize);
41   const uintptr_t young_offset_end = (end == page->PayloadEnd())
42                                          ? RoundUp(offset_end, kEntrySize)
43                                          : RoundDown(offset_end, kEntrySize);
44 
45   auto& age_table = page->heap().caged_heap().local_data().age_table;
46   for (auto offset = young_offset_begin; offset < young_offset_end;
47        offset += AgeTable::kCardSizeInBytes) {
48     age_table.SetAge(offset, AgeTable::Age::kYoung);
49   }
50 
51   // Set to kUnknown the first and the last regions of the newly allocated
52   // linear buffer.
53   if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
54     age_table.SetAge(offset_begin, AgeTable::Age::kMixed);
55   if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
56     age_table.SetAge(offset_end, AgeTable::Age::kMixed);
57 #endif
58 }
59 
AddToFreeList(NormalPageSpace & space,Address start,size_t size)60 void AddToFreeList(NormalPageSpace& space, Address start, size_t size) {
61   // No need for SetMemoryInaccessible() as LAB memory is retrieved as free
62   // inaccessible memory.
63   space.free_list().Add({start, size});
64   // Concurrent marking may be running while the LAB is set up next to a live
65   // object sharing the same cell in the bitmap.
66   NormalPage::From(BasePage::FromPayload(start))
67       ->object_start_bitmap()
68       .SetBit<AccessMode::kAtomic>(start);
69 }
70 
ReplaceLinearAllocationBuffer(NormalPageSpace & space,StatsCollector & stats_collector,Address new_buffer,size_t new_size)71 void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
72                                    StatsCollector& stats_collector,
73                                    Address new_buffer, size_t new_size) {
74   auto& lab = space.linear_allocation_buffer();
75   if (lab.size()) {
76     AddToFreeList(space, lab.start(), lab.size());
77     stats_collector.NotifyExplicitFree(lab.size());
78   }
79 
80   lab.Set(new_buffer, new_size);
81   if (new_size) {
82     DCHECK_NOT_NULL(new_buffer);
83     stats_collector.NotifyAllocation(new_size);
84     auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
85     // Concurrent marking may be running while the LAB is set up next to a live
86     // object sharing the same cell in the bitmap.
87     page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer);
88     MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
89   }
90 }
91 
AllocateLargeObject(PageBackend & page_backend,LargePageSpace & space,StatsCollector & stats_collector,size_t size,GCInfoIndex gcinfo)92 void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
93                           StatsCollector& stats_collector, size_t size,
94                           GCInfoIndex gcinfo) {
95   LargePage* page = LargePage::Create(page_backend, space, size);
96   space.AddPage(page);
97 
98   auto* header = new (page->ObjectHeader())
99       HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
100 
101   stats_collector.NotifyAllocation(size);
102   MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
103 
104   return header->ObjectStart();
105 }
106 
107 }  // namespace
108 
109 constexpr size_t ObjectAllocator::kSmallestSpaceSize;
110 
ObjectAllocator(RawHeap & heap,PageBackend & page_backend,StatsCollector & stats_collector,PreFinalizerHandler & prefinalizer_handler)111 ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
112                                  StatsCollector& stats_collector,
113                                  PreFinalizerHandler& prefinalizer_handler)
114     : raw_heap_(heap),
115       page_backend_(page_backend),
116       stats_collector_(stats_collector),
117       prefinalizer_handler_(prefinalizer_handler) {}
118 
OutOfLineAllocate(NormalPageSpace & space,size_t size,AlignVal alignment,GCInfoIndex gcinfo)119 void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
120                                          AlignVal alignment,
121                                          GCInfoIndex gcinfo) {
122   void* memory = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
123   stats_collector_.NotifySafePointForConservativeCollection();
124   if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
125     // Objects allocated during pre finalizers should be allocated as black
126     // since marking is already done. Atomics are not needed because there is
127     // no concurrent marking in the background.
128     HeapObjectHeader::FromObject(memory).MarkNonAtomic();
129     // Resetting the allocation buffer forces all further allocations in pre
130     // finalizers to go through this slow path.
131     ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
132     prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
133   }
134   return memory;
135 }
136 
OutOfLineAllocateImpl(NormalPageSpace & space,size_t size,AlignVal alignment,GCInfoIndex gcinfo)137 void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
138                                              size_t size, AlignVal alignment,
139                                              GCInfoIndex gcinfo) {
140   DCHECK_EQ(0, size & kAllocationMask);
141   DCHECK_LE(kFreeListEntrySize, size);
142   // Out-of-line allocation allows for checking this is all situations.
143   CHECK(!in_disallow_gc_scope());
144 
145   // If this allocation is big enough, allocate a large object.
146   if (size >= kLargeObjectSizeThreshold) {
147     auto& large_space = LargePageSpace::From(
148         *raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
149     // LargePage has a natural alignment that already satisfies
150     // `kMaxSupportedAlignment`.
151     return AllocateLargeObject(page_backend_, large_space, stats_collector_,
152                                size, gcinfo);
153   }
154 
155   size_t request_size = size;
156   // Adjust size to be able to accommodate alignment.
157   const size_t dynamic_alignment = static_cast<size_t>(alignment);
158   if (dynamic_alignment != kAllocationGranularity) {
159     CHECK_EQ(2 * sizeof(HeapObjectHeader), dynamic_alignment);
160     request_size += kAllocationGranularity;
161   }
162 
163   RefillLinearAllocationBuffer(space, request_size);
164 
165   // The allocation must succeed, as we just refilled the LAB.
166   void* result = (dynamic_alignment == kAllocationGranularity)
167                      ? AllocateObjectOnSpace(space, size, gcinfo)
168                      : AllocateObjectOnSpace(space, size, alignment, gcinfo);
169   CHECK(result);
170   return result;
171 }
172 
RefillLinearAllocationBuffer(NormalPageSpace & space,size_t size)173 void ObjectAllocator::RefillLinearAllocationBuffer(NormalPageSpace& space,
174                                                    size_t size) {
175   // Try to allocate from the freelist.
176   if (RefillLinearAllocationBufferFromFreeList(space, size)) return;
177 
178   // Lazily sweep pages of this heap until we find a freed area for this
179   // allocation or we finish sweeping all pages of this heap.
180   Sweeper& sweeper = raw_heap_.heap()->sweeper();
181   // TODO(chromium:1056170): Investigate whether this should be a loop which
182   // would result in more agressive re-use of memory at the expense of
183   // potentially larger allocation time.
184   if (sweeper.SweepForAllocationIfRunning(&space, size)) {
185     // Sweeper found a block of at least `size` bytes. Allocation from the
186     // free list may still fail as actual  buckets are not exhaustively
187     // searched for a suitable block. Instead, buckets are tested from larger
188     // sizes that are guaranteed to fit the block to smaller bucket sizes that
189     // may only potentially fit the block. For the bucket that may exactly fit
190     // the allocation of `size` bytes (no overallocation), only the first
191     // entry is checked.
192     if (RefillLinearAllocationBufferFromFreeList(space, size)) return;
193   }
194 
195   sweeper.FinishIfRunning();
196   // TODO(chromium:1056170): Make use of the synchronously freed memory.
197 
198   auto* new_page = NormalPage::Create(page_backend_, space);
199   space.AddPage(new_page);
200 
201   // Set linear allocation buffer to new page.
202   ReplaceLinearAllocationBuffer(space, stats_collector_,
203                                 new_page->PayloadStart(),
204                                 new_page->PayloadSize());
205 }
206 
RefillLinearAllocationBufferFromFreeList(NormalPageSpace & space,size_t size)207 bool ObjectAllocator::RefillLinearAllocationBufferFromFreeList(
208     NormalPageSpace& space, size_t size) {
209   const FreeList::Block entry = space.free_list().Allocate(size);
210   if (!entry.address) return false;
211 
212   // Assume discarded memory on that page is now zero.
213   auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
214   if (page.discarded_memory()) {
215     stats_collector_.DecrementDiscardedMemory(page.discarded_memory());
216     page.ResetDiscardedMemory();
217   }
218 
219   ReplaceLinearAllocationBuffer(
220       space, stats_collector_, static_cast<Address>(entry.address), entry.size);
221   return true;
222 }
223 
ResetLinearAllocationBuffers()224 void ObjectAllocator::ResetLinearAllocationBuffers() {
225   class Resetter : public HeapVisitor<Resetter> {
226    public:
227     explicit Resetter(StatsCollector& stats) : stats_collector_(stats) {}
228 
229     bool VisitLargePageSpace(LargePageSpace&) { return true; }
230 
231     bool VisitNormalPageSpace(NormalPageSpace& space) {
232       ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
233       return true;
234     }
235 
236    private:
237     StatsCollector& stats_collector_;
238   } visitor(stats_collector_);
239 
240   visitor.Traverse(raw_heap_);
241 }
242 
Terminate()243 void ObjectAllocator::Terminate() {
244   ResetLinearAllocationBuffers();
245 }
246 
in_disallow_gc_scope() const247 bool ObjectAllocator::in_disallow_gc_scope() const {
248   return raw_heap_.heap()->in_disallow_gc_scope();
249 }
250 
251 }  // namespace internal
252 }  // namespace cppgc
253