1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/marker.h"
6
7 #include <cstddef>
8 #include <cstdint>
9 #include <memory>
10
11 #include "include/cppgc/heap-consistency.h"
12 #include "include/cppgc/platform.h"
13 #include "src/base/platform/time.h"
14 #include "src/heap/cppgc/globals.h"
15 #include "src/heap/cppgc/heap-object-header.h"
16 #include "src/heap/cppgc/heap-page.h"
17 #include "src/heap/cppgc/heap-visitor.h"
18 #include "src/heap/cppgc/heap.h"
19 #include "src/heap/cppgc/liveness-broker.h"
20 #include "src/heap/cppgc/marking-state.h"
21 #include "src/heap/cppgc/marking-visitor.h"
22 #include "src/heap/cppgc/process-heap.h"
23 #include "src/heap/cppgc/stats-collector.h"
24 #include "src/heap/cppgc/write-barrier.h"
25
26 #if defined(CPPGC_CAGED_HEAP)
27 #include "include/cppgc/internal/caged-heap-local-data.h"
28 #endif
29
30 namespace cppgc {
31 namespace internal {
32
33 namespace {
34
EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,HeapBase & heap)35 bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
36 HeapBase& heap) {
37 if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
38 config.marking_type ==
39 Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
40 WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
41 #if defined(CPPGC_CAGED_HEAP)
42 heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
43 #endif // defined(CPPGC_CAGED_HEAP)
44 return true;
45 }
46 return false;
47 }
48
ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,HeapBase & heap)49 bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
50 HeapBase& heap) {
51 if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
52 config.marking_type ==
53 Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
54 WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
55 #if defined(CPPGC_CAGED_HEAP)
56 heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
57 #endif // defined(CPPGC_CAGED_HEAP)
58 return true;
59 }
60 return false;
61 }
62
63 static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
64
65 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
66 typename WorklistLocal, typename Callback>
DrainWorklistWithBytesAndTimeDeadline(BasicMarkingState & marking_state,size_t marked_bytes_deadline,v8::base::TimeTicks time_deadline,WorklistLocal & worklist_local,Callback callback)67 bool DrainWorklistWithBytesAndTimeDeadline(BasicMarkingState& marking_state,
68 size_t marked_bytes_deadline,
69 v8::base::TimeTicks time_deadline,
70 WorklistLocal& worklist_local,
71 Callback callback) {
72 return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
73 [&marking_state, marked_bytes_deadline, time_deadline]() {
74 return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
75 (time_deadline <= v8::base::TimeTicks::Now());
76 },
77 worklist_local, callback);
78 }
79
GetNextIncrementalStepDuration(IncrementalMarkingSchedule & schedule,HeapBase & heap)80 size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
81 HeapBase& heap) {
82 return schedule.GetNextIncrementalStepDuration(
83 heap.stats_collector()->allocated_object_size());
84 }
85
86 } // namespace
87
88 constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration;
89
90 class MarkerBase::IncrementalMarkingTask final : public cppgc::Task {
91 public:
92 using Handle = SingleThreadedHandle;
93
94 IncrementalMarkingTask(MarkerBase*, MarkingConfig::StackState);
95
96 static Handle Post(cppgc::TaskRunner*, MarkerBase*);
97
98 private:
99 void Run() final;
100
101 MarkerBase* const marker_;
102 MarkingConfig::StackState stack_state_;
103 // TODO(chromium:1056170): Change to CancelableTask.
104 Handle handle_;
105 };
106
IncrementalMarkingTask(MarkerBase * marker,MarkingConfig::StackState stack_state)107 MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
108 MarkerBase* marker, MarkingConfig::StackState stack_state)
109 : marker_(marker),
110 stack_state_(stack_state),
111 handle_(Handle::NonEmptyTag{}) {}
112
113 // static
114 MarkerBase::IncrementalMarkingTask::Handle
Post(cppgc::TaskRunner * runner,MarkerBase * marker)115 MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
116 MarkerBase* marker) {
117 // Incremental GC is possible only via the GCInvoker, so getting here
118 // guarantees that either non-nestable tasks or conservative stack
119 // scanning are supported. This is required so that the incremental
120 // task can safely finalize GC if needed.
121 DCHECK_IMPLIES(marker->heap().stack_support() !=
122 HeapBase::StackSupport::kSupportsConservativeStackScan,
123 runner->NonNestableTasksEnabled());
124 MarkingConfig::StackState stack_state_for_task =
125 runner->NonNestableTasksEnabled()
126 ? MarkingConfig::StackState::kNoHeapPointers
127 : MarkingConfig::StackState::kMayContainHeapPointers;
128 auto task =
129 std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
130 auto handle = task->handle_;
131 if (runner->NonNestableTasksEnabled()) {
132 runner->PostNonNestableTask(std::move(task));
133 } else {
134 runner->PostTask(std::move(task));
135 }
136 return handle;
137 }
138
Run()139 void MarkerBase::IncrementalMarkingTask::Run() {
140 if (handle_.IsCanceled()) return;
141
142 StatsCollector::EnabledScope stats_scope(marker_->heap().stats_collector(),
143 StatsCollector::kIncrementalMark);
144
145 if (marker_->IncrementalMarkingStep(stack_state_)) {
146 // Incremental marking is done so should finalize GC.
147 marker_->heap().FinalizeIncrementalGarbageCollectionIfNeeded(stack_state_);
148 }
149 }
150
MarkerBase(HeapBase & heap,cppgc::Platform * platform,MarkingConfig config)151 MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
152 MarkingConfig config)
153 : heap_(heap),
154 config_(config),
155 platform_(platform),
156 foreground_task_runner_(platform_->GetForegroundTaskRunner()),
157 mutator_marking_state_(heap, marking_worklists_,
158 heap.compactor().compaction_worklists()) {}
159
~MarkerBase()160 MarkerBase::~MarkerBase() {
161 // The fixed point iteration may have found not-fully-constructed objects.
162 // Such objects should have already been found through the stack scan though
163 // and should thus already be marked.
164 if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
165 #if DEBUG
166 DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
167 std::unordered_set<HeapObjectHeader*> objects =
168 mutator_marking_state_.not_fully_constructed_worklist().Extract();
169 for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
170 #else
171 marking_worklists_.not_fully_constructed_worklist()->Clear();
172 #endif
173 }
174
175 // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
176 // dead keys.
177 if (!marking_worklists_.discovered_ephemeron_pairs_worklist()->IsEmpty()) {
178 #if DEBUG
179 MarkingWorklists::EphemeronPairItem item;
180 while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
181 &item)) {
182 DCHECK(!HeapObjectHeader::FromObject(item.key).IsMarked());
183 }
184 #else
185 marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
186 #endif
187 }
188
189 marking_worklists_.weak_containers_worklist()->Clear();
190 }
191
192 class MarkerBase::IncrementalMarkingAllocationObserver final
193 : public StatsCollector::AllocationObserver {
194 public:
195 static constexpr size_t kMinAllocatedBytesPerStep = 256 * kKB;
196
IncrementalMarkingAllocationObserver(MarkerBase & marker)197 explicit IncrementalMarkingAllocationObserver(MarkerBase& marker)
198 : marker_(marker) {}
199
AllocatedObjectSizeIncreased(size_t delta)200 void AllocatedObjectSizeIncreased(size_t delta) final {
201 current_allocated_size_ += delta;
202 if (current_allocated_size_ > kMinAllocatedBytesPerStep) {
203 marker_.AdvanceMarkingOnAllocation();
204 current_allocated_size_ = 0;
205 }
206 }
207
208 private:
209 MarkerBase& marker_;
210 size_t current_allocated_size_ = 0;
211 };
212
StartMarking()213 void MarkerBase::StartMarking() {
214 DCHECK(!is_marking_);
215 StatsCollector::EnabledScope stats_scope(
216 heap().stats_collector(),
217 config_.marking_type == MarkingConfig::MarkingType::kAtomic
218 ? StatsCollector::kAtomicMark
219 : StatsCollector::kIncrementalMark);
220
221 heap().stats_collector()->NotifyMarkingStarted(config_.collection_type,
222 config_.is_forced_gc);
223
224 is_marking_ = true;
225 if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
226 StatsCollector::EnabledScope inner_stats_scope(
227 heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
228
229 // Performing incremental or concurrent marking.
230 schedule_.NotifyIncrementalMarkingStart();
231 // Scanning the stack is expensive so we only do it at the atomic pause.
232 VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
233 ScheduleIncrementalMarkingTask();
234 if (config_.marking_type ==
235 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
236 mutator_marking_state_.Publish();
237 concurrent_marker_->Start();
238 }
239 incremental_marking_allocation_observer_ =
240 std::make_unique<IncrementalMarkingAllocationObserver>(*this);
241 heap().stats_collector()->RegisterObserver(
242 incremental_marking_allocation_observer_.get());
243 }
244 }
245
HandleNotFullyConstructedObjects()246 void MarkerBase::HandleNotFullyConstructedObjects() {
247 if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
248 mutator_marking_state_.FlushNotFullyConstructedObjects();
249 } else {
250 MarkNotFullyConstructedObjects();
251 }
252 }
253
EnterAtomicPause(MarkingConfig::StackState stack_state)254 void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
255 StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
256 StatsCollector::kAtomicMark);
257 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
258 StatsCollector::kMarkAtomicPrologue);
259
260 if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
261 // Cancel remaining incremental tasks. Concurrent marking jobs are left to
262 // run in parallel with the atomic pause until the mutator thread runs out
263 // of work.
264 incremental_marking_handle_.Cancel();
265 heap().stats_collector()->UnregisterObserver(
266 incremental_marking_allocation_observer_.get());
267 incremental_marking_allocation_observer_.reset();
268 }
269 config_.stack_state = stack_state;
270 config_.marking_type = MarkingConfig::MarkingType::kAtomic;
271 mutator_marking_state_.set_in_atomic_pause();
272
273 {
274 // VisitRoots also resets the LABs.
275 VisitRoots(config_.stack_state);
276 HandleNotFullyConstructedObjects();
277 }
278 if (heap().marking_support() ==
279 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
280 // Start parallel marking.
281 mutator_marking_state_.Publish();
282 if (concurrent_marker_->IsActive()) {
283 concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
284 } else {
285 concurrent_marker_->Start();
286 }
287 }
288 }
289
LeaveAtomicPause()290 void MarkerBase::LeaveAtomicPause() {
291 {
292 StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
293 StatsCollector::kAtomicMark);
294 StatsCollector::EnabledScope stats_scope(
295 heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
296 DCHECK(!incremental_marking_handle_);
297 heap().stats_collector()->NotifyMarkingCompleted(
298 // GetOverallMarkedBytes also includes concurrently marked bytes.
299 schedule_.GetOverallMarkedBytes());
300 is_marking_ = false;
301 }
302 {
303 // Weakness callbacks are forbidden from allocating objects.
304 cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_);
305 ProcessWeakness();
306 }
307 // TODO(chromium:1056170): It would be better if the call to Unlock was
308 // covered by some cppgc scope.
309 g_process_mutex.Pointer()->Unlock();
310 heap().SetStackStateOfPrevGC(config_.stack_state);
311 }
312
FinishMarking(MarkingConfig::StackState stack_state)313 void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
314 DCHECK(is_marking_);
315 EnterAtomicPause(stack_state);
316 {
317 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
318 StatsCollector::kAtomicMark);
319 CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
320 if (JoinConcurrentMarkingIfNeeded()) {
321 CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
322 }
323 mutator_marking_state_.Publish();
324 }
325 LeaveAtomicPause();
326 }
327
ProcessWeakness()328 void MarkerBase::ProcessWeakness() {
329 DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
330
331 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
332 StatsCollector::kAtomicWeak);
333
334 heap().GetWeakPersistentRegion().Trace(&visitor());
335 // Processing cross-thread handles requires taking the process lock.
336 g_process_mutex.Get().AssertHeld();
337 CHECK(visited_cross_thread_persistents_in_atomic_pause_);
338 heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
339
340 // Call weak callbacks on objects that may now be pointing to dead objects.
341 LivenessBroker broker = LivenessBrokerFactory::Create();
342 #if defined(CPPGC_YOUNG_GENERATION)
343 auto& remembered_set = heap().remembered_set();
344 if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
345 // Custom callbacks assume that untraced pointers point to not yet freed
346 // objects. They must make sure that upon callback completion no
347 // UntracedMember points to a freed object. This may not hold true if a
348 // custom callback for an old object operates with a reference to a young
349 // object that was freed on a minor collection cycle. To maintain the
350 // invariant that UntracedMembers always point to valid objects, execute
351 // custom callbacks for old objects on each minor collection cycle.
352 remembered_set.ExecuteCustomCallbacks(broker);
353 } else {
354 // For major GCs, just release all the remembered weak callbacks.
355 remembered_set.ReleaseCustomCallbacks();
356 }
357 #endif // defined(CPPGC_YOUNG_GENERATION)
358
359 MarkingWorklists::WeakCallbackItem item;
360 MarkingWorklists::WeakCallbackWorklist::Local& local =
361 mutator_marking_state_.weak_callback_worklist();
362 while (local.Pop(&item)) {
363 item.callback(broker, item.parameter);
364 #if defined(CPPGC_YOUNG_GENERATION)
365 heap().remembered_set().AddWeakCallback(item);
366 #endif // defined(CPPGC_YOUNG_GENERATION)
367 }
368
369 // Weak callbacks should not add any new objects for marking.
370 DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
371 }
372
VisitRoots(MarkingConfig::StackState stack_state)373 void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
374 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
375 StatsCollector::kMarkVisitRoots);
376
377 // Reset LABs before scanning roots. LABs are cleared to allow
378 // ObjectStartBitmap handling without considering LABs.
379 heap().object_allocator().ResetLinearAllocationBuffers();
380
381 {
382 {
383 StatsCollector::DisabledScope inner_stats_scope(
384 heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
385 heap().GetStrongPersistentRegion().Trace(&visitor());
386 }
387 }
388
389 if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
390 StatsCollector::DisabledScope stack_stats_scope(
391 heap().stats_collector(), StatsCollector::kMarkVisitStack);
392 heap().stack()->IteratePointers(&stack_visitor());
393 }
394 #if defined(CPPGC_YOUNG_GENERATION)
395 if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
396 StatsCollector::EnabledScope stats_scope(
397 heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
398 heap().remembered_set().Visit(visitor(), mutator_marking_state_);
399 }
400 #endif // defined(CPPGC_YOUNG_GENERATION)
401 }
402
VisitCrossThreadPersistentsIfNeeded()403 bool MarkerBase::VisitCrossThreadPersistentsIfNeeded() {
404 if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
405 visited_cross_thread_persistents_in_atomic_pause_)
406 return false;
407
408 StatsCollector::DisabledScope inner_stats_scope(
409 heap().stats_collector(),
410 StatsCollector::kMarkVisitCrossThreadPersistents);
411 // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
412 // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
413 // converted into a CrossThreadPersistent which requires that the handle
414 // is either cleared or the object is retained.
415 g_process_mutex.Pointer()->Lock();
416 heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
417 visited_cross_thread_persistents_in_atomic_pause_ = true;
418 return (heap().GetStrongCrossThreadPersistentRegion().NodesInUse() > 0);
419 }
420
ScheduleIncrementalMarkingTask()421 void MarkerBase::ScheduleIncrementalMarkingTask() {
422 DCHECK(platform_);
423 if (!foreground_task_runner_ || incremental_marking_handle_) return;
424 incremental_marking_handle_ =
425 IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
426 }
427
IncrementalMarkingStepForTesting(MarkingConfig::StackState stack_state)428 bool MarkerBase::IncrementalMarkingStepForTesting(
429 MarkingConfig::StackState stack_state) {
430 return IncrementalMarkingStep(stack_state);
431 }
432
IncrementalMarkingStep(MarkingConfig::StackState stack_state)433 bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
434 if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
435 mutator_marking_state_.FlushNotFullyConstructedObjects();
436 }
437 config_.stack_state = stack_state;
438
439 return AdvanceMarkingWithLimits();
440 }
441
AdvanceMarkingOnAllocation()442 void MarkerBase::AdvanceMarkingOnAllocation() {
443 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
444 StatsCollector::kIncrementalMark);
445 StatsCollector::EnabledScope nested_scope(heap().stats_collector(),
446 StatsCollector::kMarkOnAllocation);
447 if (AdvanceMarkingWithLimits()) {
448 // Schedule another incremental task for finalizing without a stack.
449 ScheduleIncrementalMarkingTask();
450 }
451 }
452
JoinConcurrentMarkingIfNeeded()453 bool MarkerBase::JoinConcurrentMarkingIfNeeded() {
454 if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
455 !concurrent_marker_->Join())
456 return false;
457
458 // Concurrent markers may have pushed some "leftover" in-construction objects
459 // after flushing in EnterAtomicPause.
460 HandleNotFullyConstructedObjects();
461 DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
462 return true;
463 }
464
AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,size_t marked_bytes_limit)465 bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
466 size_t marked_bytes_limit) {
467 bool is_done = false;
468 if (!main_marking_disabled_for_testing_) {
469 if (marked_bytes_limit == 0) {
470 marked_bytes_limit = mutator_marking_state_.marked_bytes() +
471 GetNextIncrementalStepDuration(schedule_, heap_);
472 }
473 StatsCollector::EnabledScope deadline_scope(
474 heap().stats_collector(),
475 StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
476 max_duration.InMillisecondsF());
477 const auto deadline = v8::base::TimeTicks::Now() + max_duration;
478 is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
479 if (is_done && VisitCrossThreadPersistentsIfNeeded()) {
480 // Both limits are absolute and hence can be passed along without further
481 // adjustment.
482 is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
483 }
484 schedule_.UpdateMutatorThreadMarkedBytes(
485 mutator_marking_state_.marked_bytes());
486 }
487 mutator_marking_state_.Publish();
488 if (!is_done) {
489 // If marking is atomic, |is_done| should always be true.
490 DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
491 ScheduleIncrementalMarkingTask();
492 if (config_.marking_type ==
493 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
494 concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
495 }
496 }
497 return is_done;
498 }
499
ProcessWorklistsWithDeadline(size_t marked_bytes_deadline,v8::base::TimeTicks time_deadline)500 bool MarkerBase::ProcessWorklistsWithDeadline(
501 size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
502 StatsCollector::EnabledScope stats_scope(
503 heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
504 bool saved_did_discover_new_ephemeron_pairs;
505 do {
506 mutator_marking_state_.ResetDidDiscoverNewEphemeronPairs();
507 if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
508 schedule_.ShouldFlushEphemeronPairs()) {
509 mutator_marking_state_.FlushDiscoveredEphemeronPairs();
510 }
511
512 // Bailout objects may be complicated to trace and thus might take longer
513 // than other objects. Therefore we reduce the interval between deadline
514 // checks to guarantee the deadline is not exceeded.
515 {
516 StatsCollector::EnabledScope inner_scope(
517 heap().stats_collector(), StatsCollector::kMarkProcessBailOutObjects);
518 if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
519 5>(
520 mutator_marking_state_, marked_bytes_deadline, time_deadline,
521 mutator_marking_state_.concurrent_marking_bailout_worklist(),
522 [this](
523 const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
524 mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
525 item.callback(&visitor(), item.parameter);
526 })) {
527 return false;
528 }
529 }
530
531 {
532 StatsCollector::EnabledScope inner_scope(
533 heap().stats_collector(),
534 StatsCollector::kMarkProcessNotFullyconstructedWorklist);
535 if (!DrainWorklistWithBytesAndTimeDeadline(
536 mutator_marking_state_, marked_bytes_deadline, time_deadline,
537 mutator_marking_state_
538 .previously_not_fully_constructed_worklist(),
539 [this](HeapObjectHeader* header) {
540 mutator_marking_state_.AccountMarkedBytes(*header);
541 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
542 *header);
543 })) {
544 return false;
545 }
546 }
547
548 {
549 StatsCollector::EnabledScope inner_scope(
550 heap().stats_collector(),
551 StatsCollector::kMarkProcessMarkingWorklist);
552 if (!DrainWorklistWithBytesAndTimeDeadline(
553 mutator_marking_state_, marked_bytes_deadline, time_deadline,
554 mutator_marking_state_.marking_worklist(),
555 [this](const MarkingWorklists::MarkingItem& item) {
556 const HeapObjectHeader& header =
557 HeapObjectHeader::FromObject(item.base_object_payload);
558 DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
559 DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
560 mutator_marking_state_.AccountMarkedBytes(header);
561 item.callback(&visitor(), item.base_object_payload);
562 })) {
563 return false;
564 }
565 }
566
567 {
568 StatsCollector::EnabledScope inner_scope(
569 heap().stats_collector(),
570 StatsCollector::kMarkProcessWriteBarrierWorklist);
571 if (!DrainWorklistWithBytesAndTimeDeadline(
572 mutator_marking_state_, marked_bytes_deadline, time_deadline,
573 mutator_marking_state_.write_barrier_worklist(),
574 [this](HeapObjectHeader* header) {
575 mutator_marking_state_.AccountMarkedBytes(*header);
576 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
577 *header);
578 })) {
579 return false;
580 }
581 if (!DrainWorklistWithBytesAndTimeDeadline(
582 mutator_marking_state_, marked_bytes_deadline, time_deadline,
583 mutator_marking_state_.retrace_marked_objects_worklist(),
584 [this](HeapObjectHeader* header) {
585 // Retracing does not increment marked bytes as the object has
586 // already been processed before.
587 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
588 *header);
589 })) {
590 return false;
591 }
592 }
593
594 saved_did_discover_new_ephemeron_pairs =
595 mutator_marking_state_.DidDiscoverNewEphemeronPairs();
596 {
597 StatsCollector::EnabledScope inner_stats_scope(
598 heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
599 if (!DrainWorklistWithBytesAndTimeDeadline(
600 mutator_marking_state_, marked_bytes_deadline, time_deadline,
601 mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
602 [this](const MarkingWorklists::EphemeronPairItem& item) {
603 mutator_marking_state_.ProcessEphemeron(
604 item.key, item.value, item.value_desc, visitor());
605 })) {
606 return false;
607 }
608 }
609 } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty() ||
610 saved_did_discover_new_ephemeron_pairs);
611 return true;
612 }
613
MarkNotFullyConstructedObjects()614 void MarkerBase::MarkNotFullyConstructedObjects() {
615 StatsCollector::DisabledScope stats_scope(
616 heap().stats_collector(),
617 StatsCollector::kMarkVisitNotFullyConstructedObjects);
618 std::unordered_set<HeapObjectHeader*> objects =
619 mutator_marking_state_.not_fully_constructed_worklist().Extract();
620 for (HeapObjectHeader* object : objects) {
621 DCHECK(object);
622 // TraceConservativelyIfNeeded delegates to either in-construction or
623 // fully constructed handling. Both handlers have their own marked bytes
624 // accounting and markbit handling (bailout).
625 conservative_visitor().TraceConservativelyIfNeeded(*object);
626 }
627 }
628
ClearAllWorklistsForTesting()629 void MarkerBase::ClearAllWorklistsForTesting() {
630 marking_worklists_.ClearForTesting();
631 auto* compaction_worklists = heap_.compactor().compaction_worklists();
632 if (compaction_worklists) compaction_worklists->ClearForTesting();
633 }
634
SetMainThreadMarkingDisabledForTesting(bool value)635 void MarkerBase::SetMainThreadMarkingDisabledForTesting(bool value) {
636 main_marking_disabled_for_testing_ = value;
637 }
638
WaitForConcurrentMarkingForTesting()639 void MarkerBase::WaitForConcurrentMarkingForTesting() {
640 concurrent_marker_->Join();
641 }
642
PauseConcurrentMarkingScope(MarkerBase & marker)643 MarkerBase::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
644 MarkerBase& marker)
645 : marker_(marker), resume_on_exit_(marker_.concurrent_marker_->Cancel()) {}
646
~PauseConcurrentMarkingScope()647 MarkerBase::PauseConcurrentMarkingScope::~PauseConcurrentMarkingScope() {
648 if (resume_on_exit_) {
649 marker_.concurrent_marker_->Start();
650 }
651 }
652
Marker(HeapBase & heap,cppgc::Platform * platform,MarkingConfig config)653 Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
654 : MarkerBase(heap, platform, config),
655 marking_visitor_(heap, mutator_marking_state_),
656 conservative_marking_visitor_(heap, mutator_marking_state_,
657 marking_visitor_) {
658 concurrent_marker_ = std::make_unique<ConcurrentMarker>(
659 heap_, marking_worklists_, schedule_, platform_);
660 }
661
662 } // namespace internal
663 } // namespace cppgc
664