• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "include/cppgc/explicit-management.h"
6 
7 #include <algorithm>
8 #include <tuple>
9 
10 #include "src/heap/cppgc/heap-base.h"
11 #include "src/heap/cppgc/heap-object-header.h"
12 #include "src/heap/cppgc/heap-page.h"
13 #include "src/heap/cppgc/memory.h"
14 
15 namespace cppgc {
16 namespace internal {
17 
18 namespace {
19 
InGC(HeapHandle & heap_handle)20 bool InGC(HeapHandle& heap_handle) {
21   const auto& heap = HeapBase::From(heap_handle);
22   // Whenever the GC is active, avoid modifying the object as it may mess with
23   // state that the GC needs.
24   return heap.in_atomic_pause() || heap.marker() ||
25          heap.sweeper().IsSweepingInProgress();
26 }
27 
28 }  // namespace
29 
FreeUnreferencedObject(HeapHandle & heap_handle,void * object)30 void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
31                                                     void* object) {
32   if (InGC(heap_handle)) {
33     return;
34   }
35 
36   auto& header = HeapObjectHeader::FromObject(object);
37   header.Finalize();
38 
39   size_t object_size = 0;
40   USE(object_size);
41 
42   // `object` is guaranteed to be of type GarbageCollected, so getting the
43   // BasePage is okay for regular and large objects.
44   BasePage* base_page = BasePage::FromPayload(object);
45   if (base_page->is_large()) {  // Large object.
46     object_size = LargePage::From(base_page)->ObjectSize();
47     base_page->space().RemovePage(base_page);
48     base_page->heap().stats_collector()->NotifyExplicitFree(
49         LargePage::From(base_page)->PayloadSize());
50     LargePage::Destroy(LargePage::From(base_page));
51   } else {  // Regular object.
52     const size_t header_size = header.AllocatedSize();
53     object_size = header.ObjectSize();
54     auto* normal_page = NormalPage::From(base_page);
55     auto& normal_space = *static_cast<NormalPageSpace*>(&base_page->space());
56     auto& lab = normal_space.linear_allocation_buffer();
57     ConstAddress payload_end = header.ObjectEnd();
58     SetMemoryInaccessible(&header, header_size);
59     if (payload_end == lab.start()) {  // Returning to LAB.
60       lab.Set(reinterpret_cast<Address>(&header), lab.size() + header_size);
61       normal_page->object_start_bitmap().ClearBit(lab.start());
62     } else {  // Returning to free list.
63       base_page->heap().stats_collector()->NotifyExplicitFree(header_size);
64       normal_space.free_list().Add({&header, header_size});
65       // No need to update the bitmap as the same bit is reused for the free
66       // list entry.
67     }
68   }
69 #if defined(CPPGC_YOUNG_GENERATION)
70   auto& heap_base = HeapBase::From(heap_handle);
71   heap_base.remembered_set().InvalidateRememberedSlotsInRange(
72       object, reinterpret_cast<uint8_t*>(object) + object_size);
73   // If this object was registered as remembered, remove it.
74   heap_base.remembered_set().InvalidateRememberedSourceObject(header);
75 #endif  // defined(CPPGC_YOUNG_GENERATION)
76 }
77 
78 namespace {
79 
Grow(HeapObjectHeader & header,BasePage & base_page,size_t new_size,size_t size_delta)80 bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
81           size_t size_delta) {
82   DCHECK_GE(new_size, header.AllocatedSize() + kAllocationGranularity);
83   DCHECK_GE(size_delta, kAllocationGranularity);
84   DCHECK(!base_page.is_large());
85 
86   auto& normal_space = *static_cast<NormalPageSpace*>(&base_page.space());
87   auto& lab = normal_space.linear_allocation_buffer();
88   if (lab.start() == header.ObjectEnd() && lab.size() >= size_delta) {
89     // LABs are considered used memory which means that no allocated size
90     // adjustments are needed.
91     Address delta_start = lab.Allocate(size_delta);
92     SetMemoryAccessible(delta_start, size_delta);
93     header.SetAllocatedSize(new_size);
94     return true;
95   }
96   return false;
97 }
98 
Shrink(HeapObjectHeader & header,BasePage & base_page,size_t new_size,size_t size_delta)99 bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
100             size_t size_delta) {
101   DCHECK_GE(header.AllocatedSize(), new_size + kAllocationGranularity);
102   DCHECK_GE(size_delta, kAllocationGranularity);
103   DCHECK(!base_page.is_large());
104 
105   auto& normal_space = *static_cast<NormalPageSpace*>(&base_page.space());
106   auto& lab = normal_space.linear_allocation_buffer();
107   Address free_start = header.ObjectEnd() - size_delta;
108   if (lab.start() == header.ObjectEnd()) {
109     DCHECK_EQ(free_start, lab.start() - size_delta);
110     // LABs are considered used memory which means that no allocated size
111     // adjustments are needed.
112     lab.Set(free_start, lab.size() + size_delta);
113     SetMemoryInaccessible(lab.start(), size_delta);
114     header.SetAllocatedSize(new_size);
115   } else if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
116     // Heuristic: Only return memory to the free list if the block is larger
117     // than the smallest size class.
118     SetMemoryInaccessible(free_start, size_delta);
119     base_page.heap().stats_collector()->NotifyExplicitFree(size_delta);
120     normal_space.free_list().Add({free_start, size_delta});
121     NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
122     header.SetAllocatedSize(new_size);
123   }
124 #if defined(CPPGC_YOUNG_GENERATION)
125   base_page.heap().remembered_set().InvalidateRememberedSlotsInRange(
126       free_start, free_start + size_delta);
127 #endif  // defined(CPPGC_YOUNG_GENERATION)
128   // Return success in any case, as we want to avoid that embedders start
129   // copying memory because of small deltas.
130   return true;
131 }
132 
133 }  // namespace
134 
Resize(void * object,size_t new_object_size)135 bool ExplicitManagementImpl::Resize(void* object, size_t new_object_size) {
136   // `object` is guaranteed to be of type GarbageCollected, so getting the
137   // BasePage is okay for regular and large objects.
138   BasePage* base_page = BasePage::FromPayload(object);
139 
140   if (InGC(base_page->heap())) {
141     return false;
142   }
143 
144   // TODO(chromium:1056170): Consider supporting large objects within certain
145   // restrictions.
146   if (base_page->is_large()) {
147     return false;
148   }
149 
150   const size_t new_size = RoundUp<kAllocationGranularity>(
151       sizeof(HeapObjectHeader) + new_object_size);
152   auto& header = HeapObjectHeader::FromObject(object);
153   const size_t old_size = header.AllocatedSize();
154 
155   if (new_size > old_size) {
156     return Grow(header, *base_page, new_size, new_size - old_size);
157   } else if (old_size > new_size) {
158     return Shrink(header, *base_page, new_size, old_size - new_size);
159   }
160   // Same size considering internal restrictions, e.g. alignment.
161   return true;
162 }
163 
164 }  // namespace internal
165 }  // namespace cppgc
166