• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/write-barrier.h"
6 
7 #include "include/cppgc/heap-consistency.h"
8 #include "include/cppgc/internal/pointer-policies.h"
9 #include "src/heap/cppgc/globals.h"
10 #include "src/heap/cppgc/heap-object-header.h"
11 #include "src/heap/cppgc/heap-page.h"
12 #include "src/heap/cppgc/heap.h"
13 #include "src/heap/cppgc/marker.h"
14 #include "src/heap/cppgc/marking-visitor.h"
15 
16 #if defined(CPPGC_CAGED_HEAP)
17 #include "include/cppgc/internal/caged-heap-local-data.h"
18 #endif
19 
20 namespace cppgc {
21 namespace internal {
22 
23 // static
24 AtomicEntryFlag WriteBarrier::incremental_or_concurrent_marking_flag_;
25 
26 namespace {
27 
28 template <MarkerBase::WriteBarrierType type>
ProcessMarkValue(HeapObjectHeader & header,MarkerBase * marker,const void * value)29 void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
30                       const void* value) {
31 #if defined(CPPGC_CAGED_HEAP)
32   DCHECK(reinterpret_cast<CagedHeapLocalData*>(
33              reinterpret_cast<uintptr_t>(value) &
34              ~(kCagedHeapReservationAlignment - 1))
35              ->is_incremental_marking_in_progress);
36 #endif
37   DCHECK(header.IsMarked<AccessMode::kAtomic>());
38   DCHECK(marker);
39 
40   if (V8_UNLIKELY(header.IsInConstruction<AccessMode::kNonAtomic>())) {
41     // In construction objects are traced only if they are unmarked. If marking
42     // reaches this object again when it is fully constructed, it will re-mark
43     // it and tracing it as a previously not fully constructed object would know
44     // to bail out.
45     header.Unmark<AccessMode::kAtomic>();
46     marker->WriteBarrierForInConstructionObject(header);
47     return;
48   }
49 
50   marker->WriteBarrierForObject<type>(header);
51 }
52 
53 }  // namespace
54 
55 // static
DijkstraMarkingBarrierSlowWithSentinelCheck(const void * value)56 void WriteBarrier::DijkstraMarkingBarrierSlowWithSentinelCheck(
57     const void* value) {
58   if (!value || value == kSentinelPointer) return;
59 
60   DijkstraMarkingBarrierSlow(value);
61 }
62 
63 // static
DijkstraMarkingBarrierSlow(const void * value)64 void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) {
65   const BasePage* page = BasePage::FromPayload(value);
66   const auto& heap = page->heap();
67 
68   // GetWriteBarrierType() checks marking state.
69   DCHECK(heap.marker());
70   // No write barriers should be executed from atomic pause marking.
71   DCHECK(!heap.in_atomic_pause());
72 
73   auto& header =
74       const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
75   if (!header.TryMarkAtomic()) return;
76 
77   ProcessMarkValue<MarkerBase::WriteBarrierType::kDijkstra>(
78       header, heap.marker(), value);
79 }
80 
81 // static
DijkstraMarkingBarrierRangeSlow(HeapHandle & heap_handle,const void * first_element,size_t element_size,size_t number_of_elements,TraceCallback trace_callback)82 void WriteBarrier::DijkstraMarkingBarrierRangeSlow(
83     HeapHandle& heap_handle, const void* first_element, size_t element_size,
84     size_t number_of_elements, TraceCallback trace_callback) {
85   auto& heap_base = HeapBase::From(heap_handle);
86 
87   // GetWriteBarrierType() checks marking state.
88   DCHECK(heap_base.marker());
89   // No write barriers should be executed from atomic pause marking.
90   DCHECK(!heap_base.in_atomic_pause());
91 
92   cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_base);
93   const char* array = static_cast<const char*>(first_element);
94   while (number_of_elements-- > 0) {
95     trace_callback(&heap_base.marker()->Visitor(), array);
96     array += element_size;
97   }
98 }
99 
100 // static
SteeleMarkingBarrierSlowWithSentinelCheck(const void * value)101 void WriteBarrier::SteeleMarkingBarrierSlowWithSentinelCheck(
102     const void* value) {
103   if (!value || value == kSentinelPointer) return;
104 
105   SteeleMarkingBarrierSlow(value);
106 }
107 
108 // static
SteeleMarkingBarrierSlow(const void * value)109 void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) {
110   const BasePage* page = BasePage::FromPayload(value);
111   const auto& heap = page->heap();
112 
113   // GetWriteBarrierType() checks marking state.
114   DCHECK(heap.marker());
115   // No write barriers should be executed from atomic pause marking.
116   DCHECK(!heap.in_atomic_pause());
117 
118   auto& header =
119       const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
120   if (!header.IsMarked<AccessMode::kAtomic>()) return;
121 
122   ProcessMarkValue<MarkerBase::WriteBarrierType::kSteele>(header, heap.marker(),
123                                                           value);
124 }
125 
126 #if defined(CPPGC_YOUNG_GENERATION)
127 // static
GenerationalBarrierSlow(const CagedHeapLocalData & local_data,const AgeTable & age_table,const void * slot,uintptr_t value_offset)128 void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
129                                            const AgeTable& age_table,
130                                            const void* slot,
131                                            uintptr_t value_offset) {
132   DCHECK(slot);
133   // A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
134   // of the barrier. This is a result of the order of bailouts where not marking
135   // results in applying the generational barrier.
136   if (local_data.heap_base.in_atomic_pause()) return;
137 
138   if (value_offset > 0 && age_table.GetAge(value_offset) == AgeTable::Age::kOld)
139     return;
140 
141   // Record slot.
142   local_data.heap_base.remembered_set().AddSlot((const_cast<void*>(slot)));
143 }
144 
145 // static
GenerationalBarrierForSourceObjectSlow(const CagedHeapLocalData & local_data,const void * inner_pointer)146 void WriteBarrier::GenerationalBarrierForSourceObjectSlow(
147     const CagedHeapLocalData& local_data, const void* inner_pointer) {
148   DCHECK(inner_pointer);
149 
150   auto& object_header =
151       BasePage::FromInnerAddress(&local_data.heap_base, inner_pointer)
152           ->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(inner_pointer);
153 
154   // Record the source object.
155   local_data.heap_base.remembered_set().AddSourceObject(
156       const_cast<HeapObjectHeader&>(object_header));
157 }
158 #endif  // CPPGC_YOUNG_GENERATION
159 
160 #if V8_ENABLE_CHECKS
161 // static
CheckParams(Type expected_type,const Params & params)162 void WriteBarrier::CheckParams(Type expected_type, const Params& params) {
163   CHECK_EQ(expected_type, params.type);
164 }
165 #endif  // V8_ENABLE_CHECKS
166 
167 // static
IsMarking(const void * object,HeapHandle ** handle)168 bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
169                                                       HeapHandle** handle) {
170   // Large objects cannot have mixins, so we are guaranteed to always have
171   // a pointer on the same page.
172   const auto* page = BasePage::FromPayload(object);
173   *handle = &page->heap();
174   const MarkerBase* marker = page->heap().marker();
175   return marker && marker->IsMarking();
176 }
177 
178 // static
IsMarking(HeapHandle & heap_handle)179 bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(HeapHandle& heap_handle) {
180   const auto& heap_base = internal::HeapBase::From(heap_handle);
181   const MarkerBase* marker = heap_base.marker();
182   return marker && marker->IsMarking();
183 }
184 
185 #if defined(CPPGC_CAGED_HEAP)
186 
187 // static
IsMarking(const HeapHandle & heap_handle,WriteBarrier::Params & params)188 bool WriteBarrierTypeForCagedHeapPolicy::IsMarking(
189     const HeapHandle& heap_handle, WriteBarrier::Params& params) {
190   const auto& heap_base = internal::HeapBase::From(heap_handle);
191   if (const MarkerBase* marker = heap_base.marker()) {
192     return marker->IsMarking();
193   }
194   // Also set caged heap start here to avoid another call immediately after
195   // checking IsMarking().
196 #if defined(CPPGC_YOUNG_GENERATION)
197   params.start =
198       reinterpret_cast<uintptr_t>(&heap_base.caged_heap().local_data());
199 #endif  // !CPPGC_YOUNG_GENERATION
200   return false;
201 }
202 
203 #endif  // CPPGC_CAGED_HEAP
204 
205 }  // namespace internal
206 }  // namespace cppgc
207