• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/marking-barrier.h"
6 
7 #include "src/base/logging.h"
8 #include "src/heap/heap-inl.h"
9 #include "src/heap/heap-write-barrier.h"
10 #include "src/heap/heap.h"
11 #include "src/heap/incremental-marking-inl.h"
12 #include "src/heap/incremental-marking.h"
13 #include "src/heap/mark-compact-inl.h"
14 #include "src/heap/mark-compact.h"
15 #include "src/heap/marking-barrier-inl.h"
16 #include "src/heap/marking-worklist-inl.h"
17 #include "src/heap/marking-worklist.h"
18 #include "src/heap/safepoint.h"
19 #include "src/objects/heap-object.h"
20 #include "src/objects/js-array-buffer.h"
21 
22 namespace v8 {
23 namespace internal {
24 
MarkingBarrier(Heap * heap)25 MarkingBarrier::MarkingBarrier(Heap* heap)
26     : heap_(heap),
27       collector_(heap_->mark_compact_collector()),
28       incremental_marking_(heap_->incremental_marking()),
29       worklist_(collector_->marking_worklists()->shared()),
30       marking_state_(heap_->isolate()),
31       is_main_thread_barrier_(true),
32       is_shared_heap_(heap_->IsShared()) {}
33 
MarkingBarrier(LocalHeap * local_heap)34 MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
35     : heap_(local_heap->heap()),
36       collector_(heap_->mark_compact_collector()),
37       incremental_marking_(nullptr),
38       worklist_(collector_->marking_worklists()->shared()),
39       marking_state_(heap_->isolate()),
40       is_main_thread_barrier_(false),
41       is_shared_heap_(heap_->IsShared()) {}
42 
~MarkingBarrier()43 MarkingBarrier::~MarkingBarrier() { DCHECK(worklist_.IsLocalEmpty()); }
44 
Write(HeapObject host,HeapObjectSlot slot,HeapObject value)45 void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
46                            HeapObject value) {
47   DCHECK(IsCurrentMarkingBarrier());
48   if (MarkValue(host, value)) {
49     if (is_compacting_ && slot.address()) {
50       collector_->RecordSlot(host, slot, value);
51     }
52   }
53 }
54 
WriteWithoutHost(HeapObject value)55 void MarkingBarrier::WriteWithoutHost(HeapObject value) {
56   DCHECK(is_main_thread_barrier_);
57   if (WhiteToGreyAndPush(value)) {
58     incremental_marking_->RestartIfNotMarking();
59 
60     if (V8_UNLIKELY(FLAG_track_retaining_path)) {
61       heap_->AddRetainingRoot(Root::kWriteBarrier, value);
62     }
63   }
64 }
65 
Write(Code host,RelocInfo * reloc_info,HeapObject value)66 void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
67   DCHECK(IsCurrentMarkingBarrier());
68   if (MarkValue(host, value)) {
69     if (is_compacting_) {
70       if (is_main_thread_barrier_) {
71         // An optimization to avoid allocating additional typed slots for the
72         // main thread.
73         collector_->RecordRelocSlot(host, reloc_info, value);
74       } else {
75         RecordRelocSlot(host, reloc_info, value);
76       }
77     }
78   }
79 }
80 
Write(JSArrayBuffer host,ArrayBufferExtension * extension)81 void MarkingBarrier::Write(JSArrayBuffer host,
82                            ArrayBufferExtension* extension) {
83   DCHECK(IsCurrentMarkingBarrier());
84   if (!V8_CONCURRENT_MARKING_BOOL && !marking_state_.IsBlack(host)) {
85     // The extension will be marked when the marker visits the host object.
86     return;
87   }
88   extension->Mark();
89 }
90 
Write(DescriptorArray descriptor_array,int number_of_own_descriptors)91 void MarkingBarrier::Write(DescriptorArray descriptor_array,
92                            int number_of_own_descriptors) {
93   DCHECK(IsCurrentMarkingBarrier());
94   DCHECK(IsReadOnlyHeapObject(descriptor_array.map()));
95   // The DescriptorArray needs to be marked black here to ensure that slots are
96   // recorded by the Scavenger in case the DescriptorArray is promoted while
97   // incremental marking is running. This is needed as the regular marking
98   // visitor does not re-process any already marked descriptors. If we don't
99   // mark it black here, the Scavenger may promote a DescriptorArray and any
100   // already marked descriptors will not have any slots recorded.
101   if (!marking_state_.IsBlack(descriptor_array)) {
102     marking_state_.WhiteToGrey(descriptor_array);
103     marking_state_.GreyToBlack(descriptor_array);
104     MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(),
105               descriptor_array.GetDescriptorSlot(0));
106   }
107   const int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
108       collector_->epoch(), number_of_own_descriptors);
109   if (old_marked < number_of_own_descriptors) {
110     // This marks the range from [old_marked, number_of_own_descriptors) instead
111     // of registering weak slots which may temporarily hold alive more objects
112     // for the current GC cycle. Weakness is not needed for actual trimming, see
113     // `MarkCompactCollector::TrimDescriptorArray()`.
114     MarkRange(descriptor_array,
115               MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
116               MaybeObjectSlot(descriptor_array.GetDescriptorSlot(
117                   number_of_own_descriptors)));
118   }
119 }
120 
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)121 void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
122                                      HeapObject target) {
123   DCHECK(IsCurrentMarkingBarrier());
124   if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target)) return;
125 
126   MarkCompactCollector::RecordRelocSlotInfo info =
127       MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
128 
129   auto& typed_slots = typed_slots_map_[info.memory_chunk];
130   if (!typed_slots) {
131     typed_slots.reset(new TypedSlots());
132   }
133   typed_slots->Insert(info.slot_type, info.offset);
134 }
135 
136 // static
ActivateAll(Heap * heap,bool is_compacting)137 void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
138   heap->marking_barrier()->Activate(is_compacting);
139   heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) {
140     local_heap->marking_barrier()->Activate(is_compacting);
141   });
142 }
143 
144 // static
DeactivateAll(Heap * heap)145 void MarkingBarrier::DeactivateAll(Heap* heap) {
146   heap->marking_barrier()->Deactivate();
147   heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
148     local_heap->marking_barrier()->Deactivate();
149   });
150 }
151 
152 // static
PublishAll(Heap * heap)153 void MarkingBarrier::PublishAll(Heap* heap) {
154   heap->marking_barrier()->Publish();
155   heap->safepoint()->IterateLocalHeaps(
156       [](LocalHeap* local_heap) { local_heap->marking_barrier()->Publish(); });
157 }
158 
Publish()159 void MarkingBarrier::Publish() {
160   if (is_activated_) {
161     worklist_.Publish();
162     for (auto& it : typed_slots_map_) {
163       MemoryChunk* memory_chunk = it.first;
164       // Access to TypeSlots need to be protected, since LocalHeaps might
165       // publish code in the background thread.
166       base::Optional<base::MutexGuard> opt_guard;
167       if (FLAG_concurrent_sparkplug) {
168         opt_guard.emplace(memory_chunk->mutex());
169       }
170       std::unique_ptr<TypedSlots>& typed_slots = it.second;
171       RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
172                                             std::move(typed_slots));
173     }
174     typed_slots_map_.clear();
175   }
176 }
177 
DeactivateSpace(PagedSpace * space)178 void MarkingBarrier::DeactivateSpace(PagedSpace* space) {
179   DCHECK(is_main_thread_barrier_);
180   for (Page* p : *space) {
181     p->SetOldGenerationPageFlags(false);
182   }
183 }
184 
DeactivateSpace(NewSpace * space)185 void MarkingBarrier::DeactivateSpace(NewSpace* space) {
186   DCHECK(is_main_thread_barrier_);
187   for (Page* p : *space) {
188     p->SetYoungGenerationPageFlags(false);
189   }
190 }
191 
Deactivate()192 void MarkingBarrier::Deactivate() {
193   is_activated_ = false;
194   is_compacting_ = false;
195   if (is_main_thread_barrier_) {
196     DeactivateSpace(heap_->old_space());
197     if (heap_->map_space()) DeactivateSpace(heap_->map_space());
198     DeactivateSpace(heap_->code_space());
199     DeactivateSpace(heap_->new_space());
200     for (LargePage* p : *heap_->new_lo_space()) {
201       p->SetYoungGenerationPageFlags(false);
202       DCHECK(p->IsLargePage());
203     }
204     for (LargePage* p : *heap_->lo_space()) {
205       p->SetOldGenerationPageFlags(false);
206     }
207     for (LargePage* p : *heap_->code_lo_space()) {
208       p->SetOldGenerationPageFlags(false);
209     }
210   }
211   DCHECK(typed_slots_map_.empty());
212   DCHECK(worklist_.IsLocalEmpty());
213 }
214 
ActivateSpace(PagedSpace * space)215 void MarkingBarrier::ActivateSpace(PagedSpace* space) {
216   DCHECK(is_main_thread_barrier_);
217   for (Page* p : *space) {
218     p->SetOldGenerationPageFlags(true);
219   }
220 }
221 
ActivateSpace(NewSpace * space)222 void MarkingBarrier::ActivateSpace(NewSpace* space) {
223   DCHECK(is_main_thread_barrier_);
224   for (Page* p : *space) {
225     p->SetYoungGenerationPageFlags(true);
226   }
227 }
228 
Activate(bool is_compacting)229 void MarkingBarrier::Activate(bool is_compacting) {
230   DCHECK(!is_activated_);
231   DCHECK(worklist_.IsLocalEmpty());
232   is_compacting_ = is_compacting;
233   is_activated_ = true;
234   if (is_main_thread_barrier_) {
235     ActivateSpace(heap_->old_space());
236     if (heap_->map_space()) ActivateSpace(heap_->map_space());
237     ActivateSpace(heap_->code_space());
238     ActivateSpace(heap_->new_space());
239 
240     for (LargePage* p : *heap_->new_lo_space()) {
241       p->SetYoungGenerationPageFlags(true);
242       DCHECK(p->IsLargePage());
243     }
244 
245     for (LargePage* p : *heap_->lo_space()) {
246       p->SetOldGenerationPageFlags(true);
247     }
248 
249     for (LargePage* p : *heap_->code_lo_space()) {
250       p->SetOldGenerationPageFlags(true);
251     }
252   }
253 }
254 
IsCurrentMarkingBarrier()255 bool MarkingBarrier::IsCurrentMarkingBarrier() {
256   return WriteBarrier::CurrentMarkingBarrier(heap_) == this;
257 }
258 
259 }  // namespace internal
260 }  // namespace v8
261