1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/marker.h"
6
7 #include <memory>
8
9 #include "include/cppgc/internal/process-heap.h"
10 #include "include/cppgc/platform.h"
11 #include "src/heap/cppgc/heap-object-header.h"
12 #include "src/heap/cppgc/heap-page.h"
13 #include "src/heap/cppgc/heap-visitor.h"
14 #include "src/heap/cppgc/heap.h"
15 #include "src/heap/cppgc/liveness-broker.h"
16 #include "src/heap/cppgc/marking-state.h"
17 #include "src/heap/cppgc/marking-visitor.h"
18 #include "src/heap/cppgc/process-heap.h"
19 #include "src/heap/cppgc/stats-collector.h"
20
21 #if defined(CPPGC_CAGED_HEAP)
22 #include "include/cppgc/internal/caged-heap-local-data.h"
23 #endif
24
25 namespace cppgc {
26 namespace internal {
27
28 namespace {
29
EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,HeapBase & heap)30 bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
31 HeapBase& heap) {
32 if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
33 config.marking_type ==
34 Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
35 ProcessHeap::EnterIncrementalOrConcurrentMarking();
36 #if defined(CPPGC_CAGED_HEAP)
37 heap.caged_heap().local_data().is_marking_in_progress = true;
38 #endif
39 return true;
40 }
41 return false;
42 }
43
ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,HeapBase & heap)44 bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
45 HeapBase& heap) {
46 if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
47 config.marking_type ==
48 Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
49 ProcessHeap::ExitIncrementalOrConcurrentMarking();
50 #if defined(CPPGC_CAGED_HEAP)
51 heap.caged_heap().local_data().is_marking_in_progress = false;
52 #endif
53 return true;
54 }
55 return false;
56 }
57
58 // Visit remembered set that was recorded in the generational barrier.
VisitRememberedSlots(HeapBase & heap,MutatorMarkingState & mutator_marking_state)59 void VisitRememberedSlots(HeapBase& heap,
60 MutatorMarkingState& mutator_marking_state) {
61 #if defined(CPPGC_YOUNG_GENERATION)
62 for (void* slot : heap.remembered_slots()) {
63 auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
64 ->ObjectHeaderFromInnerAddress(slot);
65 if (slot_header.IsYoung()) continue;
66 // The design of young generation requires collections to be executed at the
67 // top level (with the guarantee that no objects are currently being in
68 // construction). This can be ensured by running young GCs from safe points
69 // or by reintroducing nested allocation scopes that avoid finalization.
70 DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
71
72 void* value = *reinterpret_cast<void**>(slot);
73 mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
74 }
75 #endif
76 }
77
78 // Assumes that all spaces have their LABs reset.
ResetRememberedSet(HeapBase & heap)79 void ResetRememberedSet(HeapBase& heap) {
80 #if defined(CPPGC_YOUNG_GENERATION)
81 auto& local_data = heap.caged_heap().local_data();
82 local_data.age_table.Reset(&heap.caged_heap().allocator());
83 heap.remembered_slots().clear();
84 #endif
85 }
86
87 static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
88
89 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
90 typename WorklistLocal, typename Callback>
DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase & marking_state,size_t marked_bytes_deadline,v8::base::TimeTicks time_deadline,WorklistLocal & worklist_local,Callback callback)91 bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
92 size_t marked_bytes_deadline,
93 v8::base::TimeTicks time_deadline,
94 WorklistLocal& worklist_local,
95 Callback callback) {
96 return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
97 [&marking_state, marked_bytes_deadline, time_deadline]() {
98 return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
99 (time_deadline <= v8::base::TimeTicks::Now());
100 },
101 worklist_local, callback);
102 }
103
GetNextIncrementalStepDuration(IncrementalMarkingSchedule & schedule,HeapBase & heap)104 size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
105 HeapBase& heap) {
106 return schedule.GetNextIncrementalStepDuration(
107 heap.stats_collector()->allocated_object_size());
108 }
109
110 } // namespace
111
112 constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration;
113
IncrementalMarkingTask(MarkerBase * marker,MarkingConfig::StackState stack_state)114 MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
115 MarkerBase* marker, MarkingConfig::StackState stack_state)
116 : marker_(marker),
117 stack_state_(stack_state),
118 handle_(Handle::NonEmptyTag{}) {}
119
120 // static
121 MarkerBase::IncrementalMarkingTask::Handle
Post(cppgc::TaskRunner * runner,MarkerBase * marker)122 MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
123 MarkerBase* marker) {
124 // Incremental GC is possible only via the GCInvoker, so getting here
125 // guarantees that either non-nestable tasks or conservative stack
126 // scanning are supported. This is required so that the incremental
127 // task can safely finalize GC if needed.
128 DCHECK_IMPLIES(marker->heap().stack_support() !=
129 HeapBase::StackSupport::kSupportsConservativeStackScan,
130 runner->NonNestableTasksEnabled());
131 MarkingConfig::StackState stack_state_for_task =
132 runner->NonNestableTasksEnabled()
133 ? MarkingConfig::StackState::kNoHeapPointers
134 : MarkingConfig::StackState::kMayContainHeapPointers;
135 auto task =
136 std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
137 auto handle = task->handle_;
138 if (runner->NonNestableTasksEnabled()) {
139 runner->PostNonNestableTask(std::move(task));
140 } else {
141 runner->PostTask(std::move(task));
142 }
143 return handle;
144 }
145
Run()146 void MarkerBase::IncrementalMarkingTask::Run() {
147 if (handle_.IsCanceled()) return;
148
149 if (marker_->IncrementalMarkingStep(stack_state_)) {
150 // Incremental marking is done so should finalize GC.
151 marker_->heap().FinalizeIncrementalGarbageCollectionIfNeeded(stack_state_);
152 }
153 }
154
MarkerBase(Key,HeapBase & heap,cppgc::Platform * platform,MarkingConfig config)155 MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
156 MarkingConfig config)
157 : heap_(heap),
158 config_(config),
159 platform_(platform),
160 foreground_task_runner_(platform_->GetForegroundTaskRunner()),
161 mutator_marking_state_(heap, marking_worklists_,
162 heap.compactor().compaction_worklists()) {}
163
~MarkerBase()164 MarkerBase::~MarkerBase() {
165 // The fixed point iteration may have found not-fully-constructed objects.
166 // Such objects should have already been found through the stack scan though
167 // and should thus already be marked.
168 if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
169 #if DEBUG
170 DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
171 std::unordered_set<HeapObjectHeader*> objects =
172 mutator_marking_state_.not_fully_constructed_worklist().Extract();
173 for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
174 #else
175 marking_worklists_.not_fully_constructed_worklist()->Clear();
176 #endif
177 }
178
179 // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
180 // dead keys.
181 if (!marking_worklists_.discovered_ephemeron_pairs_worklist()->IsEmpty()) {
182 #if DEBUG
183 MarkingWorklists::EphemeronPairItem item;
184 while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
185 &item)) {
186 DCHECK(!HeapObjectHeader::FromPayload(item.key).IsMarked());
187 }
188 #else
189 marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
190 #endif
191 }
192
193 marking_worklists_.weak_containers_worklist()->Clear();
194 }
195
StartMarking()196 void MarkerBase::StartMarking() {
197 DCHECK(!is_marking_started_);
198 heap().stats_collector()->NotifyMarkingStarted();
199
200 is_marking_started_ = true;
201 if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
202 // Performing incremental or concurrent marking.
203 schedule_.NotifyIncrementalMarkingStart();
204 // Scanning the stack is expensive so we only do it at the atomic pause.
205 VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
206 ScheduleIncrementalMarkingTask();
207 if (config_.marking_type ==
208 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
209 mutator_marking_state_.Publish();
210 concurrent_marker_->Start();
211 }
212 }
213 }
214
EnterAtomicPause(MarkingConfig::StackState stack_state)215 void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
216 if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
217 // Cancel remaining concurrent/incremental tasks.
218 concurrent_marker_->Cancel();
219 incremental_marking_handle_.Cancel();
220 }
221 config_.stack_state = stack_state;
222 config_.marking_type = MarkingConfig::MarkingType::kAtomic;
223
224 // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
225 // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
226 // converted into a CrossThreadPersistent which requires that the handle
227 // is either cleared or the object is retained.
228 g_process_mutex.Pointer()->Lock();
229
230 // VisitRoots also resets the LABs.
231 VisitRoots(config_.stack_state);
232 if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
233 mutator_marking_state_.FlushNotFullyConstructedObjects();
234 DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
235 } else {
236 MarkNotFullyConstructedObjects();
237 }
238 }
239
LeaveAtomicPause()240 void MarkerBase::LeaveAtomicPause() {
241 DCHECK(!incremental_marking_handle_);
242 ResetRememberedSet(heap());
243 heap().stats_collector()->NotifyMarkingCompleted(
244 // GetOverallMarkedBytes also includes concurrently marked bytes.
245 schedule_.GetOverallMarkedBytes());
246 is_marking_started_ = false;
247 ProcessWeakness();
248 g_process_mutex.Pointer()->Unlock();
249 }
250
FinishMarking(MarkingConfig::StackState stack_state)251 void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
252 DCHECK(is_marking_started_);
253 EnterAtomicPause(stack_state);
254 CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
255 v8::base::TimeTicks::Max()));
256 mutator_marking_state_.Publish();
257 LeaveAtomicPause();
258 }
259
ProcessWeakness()260 void MarkerBase::ProcessWeakness() {
261 DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
262 heap().GetWeakPersistentRegion().Trace(&visitor());
263 // Processing cross-thread handles requires taking the process lock.
264 g_process_mutex.Get().AssertHeld();
265 heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
266
267 // Call weak callbacks on objects that may now be pointing to dead objects.
268 MarkingWorklists::WeakCallbackItem item;
269 LivenessBroker broker = LivenessBrokerFactory::Create();
270 MarkingWorklists::WeakCallbackWorklist::Local& local =
271 mutator_marking_state_.weak_callback_worklist();
272 while (local.Pop(&item)) {
273 item.callback(broker, item.parameter);
274 }
275 // Weak callbacks should not add any new objects for marking.
276 DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
277 }
278
VisitRoots(MarkingConfig::StackState stack_state)279 void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
280 // Reset LABs before scanning roots. LABs are cleared to allow
281 // ObjectStartBitmap handling without considering LABs.
282 heap().object_allocator().ResetLinearAllocationBuffers();
283
284 heap().GetStrongPersistentRegion().Trace(&visitor());
285 if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
286 g_process_mutex.Get().AssertHeld();
287 heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
288 }
289 if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
290 heap().stack()->IteratePointers(&stack_visitor());
291 }
292 if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
293 VisitRememberedSlots(heap(), mutator_marking_state_);
294 }
295 }
296
ScheduleIncrementalMarkingTask()297 void MarkerBase::ScheduleIncrementalMarkingTask() {
298 DCHECK(platform_);
299 if (!foreground_task_runner_ || incremental_marking_handle_) return;
300 incremental_marking_handle_ =
301 IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
302 }
303
IncrementalMarkingStepForTesting(MarkingConfig::StackState stack_state)304 bool MarkerBase::IncrementalMarkingStepForTesting(
305 MarkingConfig::StackState stack_state) {
306 return IncrementalMarkingStep(stack_state);
307 }
308
IncrementalMarkingStep(MarkingConfig::StackState stack_state)309 bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
310 if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
311 mutator_marking_state_.FlushNotFullyConstructedObjects();
312 }
313 config_.stack_state = stack_state;
314
315 return AdvanceMarkingWithDeadline();
316 }
317
AdvanceMarkingOnAllocation()318 void MarkerBase::AdvanceMarkingOnAllocation() {
319 if (AdvanceMarkingWithDeadline()) {
320 // Schedule another incremental task for finalizing without a stack.
321 ScheduleIncrementalMarkingTask();
322 }
323 }
324
AdvanceMarkingWithMaxDuration(v8::base::TimeDelta max_duration)325 bool MarkerBase::AdvanceMarkingWithMaxDuration(
326 v8::base::TimeDelta max_duration) {
327 return AdvanceMarkingWithDeadline(max_duration);
328 }
329
AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration)330 bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
331 bool is_done = false;
332 if (!incremental_marking_disabled_for_testing_) {
333 size_t step_size_in_bytes =
334 GetNextIncrementalStepDuration(schedule_, heap_);
335 is_done = ProcessWorklistsWithDeadline(
336 mutator_marking_state_.marked_bytes() + step_size_in_bytes,
337 v8::base::TimeTicks::Now() + max_duration);
338 schedule_.UpdateIncrementalMarkedBytes(
339 mutator_marking_state_.marked_bytes());
340 }
341 mutator_marking_state_.Publish();
342 if (!is_done) {
343 // If marking is atomic, |is_done| should always be true.
344 DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
345 ScheduleIncrementalMarkingTask();
346 if (config_.marking_type ==
347 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
348 concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
349 }
350 }
351 return is_done;
352 }
353
ProcessWorklistsWithDeadline(size_t marked_bytes_deadline,v8::base::TimeTicks time_deadline)354 bool MarkerBase::ProcessWorklistsWithDeadline(
355 size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
356 do {
357 if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
358 schedule_.ShouldFlushEphemeronPairs()) {
359 mutator_marking_state_.FlushDiscoveredEphemeronPairs();
360 }
361
362 // Bailout objects may be complicated to trace and thus might take longer
363 // than other objects. Therefore we reduce the interval between deadline
364 // checks to guarantee the deadline is not exceeded.
365 if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
366 5>(
367 mutator_marking_state_, marked_bytes_deadline, time_deadline,
368 mutator_marking_state_.concurrent_marking_bailout_worklist(),
369 [this](const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
370 mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
371 item.callback(&visitor(), item.parameter);
372 })) {
373 return false;
374 }
375
376 if (!DrainWorklistWithBytesAndTimeDeadline(
377 mutator_marking_state_, marked_bytes_deadline, time_deadline,
378 mutator_marking_state_.previously_not_fully_constructed_worklist(),
379 [this](HeapObjectHeader* header) {
380 mutator_marking_state_.AccountMarkedBytes(*header);
381 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
382 *header);
383 })) {
384 return false;
385 }
386
387 if (!DrainWorklistWithBytesAndTimeDeadline(
388 mutator_marking_state_, marked_bytes_deadline, time_deadline,
389 mutator_marking_state_.marking_worklist(),
390 [this](const MarkingWorklists::MarkingItem& item) {
391 const HeapObjectHeader& header =
392 HeapObjectHeader::FromPayload(item.base_object_payload);
393 DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
394 DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
395 mutator_marking_state_.AccountMarkedBytes(header);
396 item.callback(&visitor(), item.base_object_payload);
397 })) {
398 return false;
399 }
400
401 if (!DrainWorklistWithBytesAndTimeDeadline(
402 mutator_marking_state_, marked_bytes_deadline, time_deadline,
403 mutator_marking_state_.write_barrier_worklist(),
404 [this](HeapObjectHeader* header) {
405 mutator_marking_state_.AccountMarkedBytes(*header);
406 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
407 *header);
408 })) {
409 return false;
410 }
411
412 if (!DrainWorklistWithBytesAndTimeDeadline(
413 mutator_marking_state_, marked_bytes_deadline, time_deadline,
414 mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
415 [this](const MarkingWorklists::EphemeronPairItem& item) {
416 mutator_marking_state_.ProcessEphemeron(item.key,
417 item.value_desc);
418 })) {
419 return false;
420 }
421 } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
422 return true;
423 }
424
MarkNotFullyConstructedObjects()425 void MarkerBase::MarkNotFullyConstructedObjects() {
426 std::unordered_set<HeapObjectHeader*> objects =
427 mutator_marking_state_.not_fully_constructed_worklist().Extract();
428 for (HeapObjectHeader* object : objects) {
429 DCHECK(object);
430 if (!mutator_marking_state_.MarkNoPush(*object)) continue;
431 // TraceConservativelyIfNeeded will either push to a worklist
432 // or trace conservatively and call AccountMarkedBytes.
433 conservative_visitor().TraceConservativelyIfNeeded(*object);
434 }
435 }
436
ClearAllWorklistsForTesting()437 void MarkerBase::ClearAllWorklistsForTesting() {
438 marking_worklists_.ClearForTesting();
439 auto* compaction_worklists = heap_.compactor().compaction_worklists();
440 if (compaction_worklists) compaction_worklists->ClearForTesting();
441 }
442
DisableIncrementalMarkingForTesting()443 void MarkerBase::DisableIncrementalMarkingForTesting() {
444 incremental_marking_disabled_for_testing_ = true;
445 }
446
WaitForConcurrentMarkingForTesting()447 void MarkerBase::WaitForConcurrentMarkingForTesting() {
448 concurrent_marker_->JoinForTesting();
449 }
450
NotifyCompactionCancelled()451 void MarkerBase::NotifyCompactionCancelled() {
452 // Compaction cannot be cancelled while concurrent marking is active.
453 DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
454 DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
455 mutator_marking_state_.NotifyCompactionCancelled();
456 }
457
Marker(Key key,HeapBase & heap,cppgc::Platform * platform,MarkingConfig config)458 Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
459 MarkingConfig config)
460 : MarkerBase(key, heap, platform, config),
461 marking_visitor_(heap, mutator_marking_state_),
462 conservative_marking_visitor_(heap, mutator_marking_state_,
463 marking_visitor_) {
464 concurrent_marker_ = std::make_unique<ConcurrentMarker>(
465 heap_, marking_worklists_, schedule_, platform_);
466 }
467
468 } // namespace internal
469 } // namespace cppgc
470