1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/concurrent-marker.h"
6
7 #include "include/cppgc/platform.h"
8 #include "src/heap/cppgc/heap-object-header.h"
9 #include "src/heap/cppgc/heap.h"
10 #include "src/heap/cppgc/liveness-broker.h"
11 #include "src/heap/cppgc/marking-state.h"
12 #include "src/heap/cppgc/marking-visitor.h"
13 #include "src/heap/cppgc/stats-collector.h"
14
15 namespace cppgc {
16 namespace internal {
17
18 namespace {
19
20 static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
21 0.5;
22
23 static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
24
25 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
26 typename WorklistLocal, typename Callback>
DrainWorklistWithYielding(JobDelegate * job_delegate,ConcurrentMarkingState & marking_state,IncrementalMarkingSchedule & incremental_marking_schedule,WorklistLocal & worklist_local,Callback callback)27 bool DrainWorklistWithYielding(
28 JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
29 IncrementalMarkingSchedule& incremental_marking_schedule,
30 WorklistLocal& worklist_local, Callback callback) {
31 return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
32 [&incremental_marking_schedule, &marking_state, job_delegate]() {
33 incremental_marking_schedule.AddConcurrentlyMarkedBytes(
34 marking_state.RecentlyMarkedBytes());
35 return job_delegate->ShouldYield();
36 },
37 worklist_local, callback);
38 }
39
WorkSizeForConcurrentMarking(MarkingWorklists & marking_worklists)40 size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
41 return marking_worklists.marking_worklist()->Size() +
42 marking_worklists.write_barrier_worklist()->Size() +
43 marking_worklists.previously_not_fully_constructed_worklist()->Size();
44 }
45
46 // Checks whether worklists' global pools hold any segment a concurrent marker
47 // can steal. This is called before the concurrent marker holds any Locals, so
48 // no need to check local segments.
HasWorkForConcurrentMarking(MarkingWorklists & marking_worklists)49 bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
50 return !marking_worklists.marking_worklist()->IsEmpty() ||
51 !marking_worklists.write_barrier_worklist()->IsEmpty() ||
52 !marking_worklists.previously_not_fully_constructed_worklist()
53 ->IsEmpty();
54 }
55
56 class ConcurrentMarkingTask final : public v8::JobTask {
57 public:
58 explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
59
60 void Run(JobDelegate* delegate) final;
61
62 size_t GetMaxConcurrency(size_t) const final;
63
64 private:
65 void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
66
67 const ConcurrentMarkerBase& concurrent_marker_;
68 };
69
ConcurrentMarkingTask(ConcurrentMarkerBase & concurrent_marker)70 ConcurrentMarkingTask::ConcurrentMarkingTask(
71 ConcurrentMarkerBase& concurrent_marker)
72 : concurrent_marker_(concurrent_marker) {}
73
Run(JobDelegate * job_delegate)74 void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
75 StatsCollector::EnabledConcurrentScope stats_scope(
76 concurrent_marker_.heap().stats_collector(),
77 StatsCollector::kConcurrentMark);
78
79 if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
80 return;
81 ConcurrentMarkingState concurrent_marking_state(
82 concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
83 concurrent_marker_.heap().compactor().compaction_worklists());
84 std::unique_ptr<Visitor> concurrent_marking_visitor =
85 concurrent_marker_.CreateConcurrentMarkingVisitor(
86 concurrent_marking_state);
87 ProcessWorklists(job_delegate, concurrent_marking_state,
88 *concurrent_marking_visitor.get());
89 concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
90 concurrent_marking_state.RecentlyMarkedBytes());
91 concurrent_marking_state.Publish();
92 }
93
GetMaxConcurrency(size_t current_worker_count) const94 size_t ConcurrentMarkingTask::GetMaxConcurrency(
95 size_t current_worker_count) const {
96 return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
97 current_worker_count;
98 }
99
ProcessWorklists(JobDelegate * job_delegate,ConcurrentMarkingState & concurrent_marking_state,Visitor & concurrent_marking_visitor)100 void ConcurrentMarkingTask::ProcessWorklists(
101 JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
102 Visitor& concurrent_marking_visitor) {
103 do {
104 if (!DrainWorklistWithYielding(
105 job_delegate, concurrent_marking_state,
106 concurrent_marker_.incremental_marking_schedule(),
107 concurrent_marking_state
108 .previously_not_fully_constructed_worklist(),
109 [&concurrent_marking_state,
110 &concurrent_marking_visitor](HeapObjectHeader* header) {
111 BasePage::FromPayload(header)->SynchronizedLoad();
112 concurrent_marking_state.AccountMarkedBytes(*header);
113 DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
114 concurrent_marking_visitor, *header);
115 })) {
116 return;
117 }
118
119 if (!DrainWorklistWithYielding(
120 job_delegate, concurrent_marking_state,
121 concurrent_marker_.incremental_marking_schedule(),
122 concurrent_marking_state.marking_worklist(),
123 [&concurrent_marking_state, &concurrent_marking_visitor](
124 const MarkingWorklists::MarkingItem& item) {
125 BasePage::FromPayload(item.base_object_payload)
126 ->SynchronizedLoad();
127 const HeapObjectHeader& header =
128 HeapObjectHeader::FromObject(item.base_object_payload);
129 DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
130 DCHECK(header.IsMarked<AccessMode::kAtomic>());
131 concurrent_marking_state.AccountMarkedBytes(header);
132 item.callback(&concurrent_marking_visitor,
133 item.base_object_payload);
134 })) {
135 return;
136 }
137
138 if (!DrainWorklistWithYielding(
139 job_delegate, concurrent_marking_state,
140 concurrent_marker_.incremental_marking_schedule(),
141 concurrent_marking_state.write_barrier_worklist(),
142 [&concurrent_marking_state,
143 &concurrent_marking_visitor](HeapObjectHeader* header) {
144 BasePage::FromPayload(header)->SynchronizedLoad();
145 concurrent_marking_state.AccountMarkedBytes(*header);
146 DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
147 concurrent_marking_visitor, *header);
148 })) {
149 return;
150 }
151
152 if (!DrainWorklistWithYielding(
153 job_delegate, concurrent_marking_state,
154 concurrent_marker_.incremental_marking_schedule(),
155 concurrent_marking_state.retrace_marked_objects_worklist(),
156 [&concurrent_marking_visitor](HeapObjectHeader* header) {
157 BasePage::FromPayload(header)->SynchronizedLoad();
158 // Retracing does not increment marked bytes as the object has
159 // already been processed before.
160 DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
161 concurrent_marking_visitor, *header);
162 })) {
163 return;
164 }
165
166 {
167 StatsCollector::DisabledConcurrentScope stats_scope(
168 concurrent_marker_.heap().stats_collector(),
169 StatsCollector::kConcurrentMarkProcessEphemerons);
170 if (!DrainWorklistWithYielding(
171 job_delegate, concurrent_marking_state,
172 concurrent_marker_.incremental_marking_schedule(),
173 concurrent_marking_state
174 .ephemeron_pairs_for_processing_worklist(),
175 [&concurrent_marking_state, &concurrent_marking_visitor](
176 const MarkingWorklists::EphemeronPairItem& item) {
177 concurrent_marking_state.ProcessEphemeron(
178 item.key, item.value, item.value_desc,
179 concurrent_marking_visitor);
180 })) {
181 return;
182 }
183 }
184 } while (
185 !concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
186 }
187
188 } // namespace
189
ConcurrentMarkerBase(HeapBase & heap,MarkingWorklists & marking_worklists,IncrementalMarkingSchedule & incremental_marking_schedule,cppgc::Platform * platform)190 ConcurrentMarkerBase::ConcurrentMarkerBase(
191 HeapBase& heap, MarkingWorklists& marking_worklists,
192 IncrementalMarkingSchedule& incremental_marking_schedule,
193 cppgc::Platform* platform)
194 : heap_(heap),
195 marking_worklists_(marking_worklists),
196 incremental_marking_schedule_(incremental_marking_schedule),
197 platform_(platform) {}
198
Start()199 void ConcurrentMarkerBase::Start() {
200 DCHECK(platform_);
201 concurrent_marking_handle_ =
202 platform_->PostJob(v8::TaskPriority::kUserVisible,
203 std::make_unique<ConcurrentMarkingTask>(*this));
204 }
205
Join()206 bool ConcurrentMarkerBase::Join() {
207 if (!concurrent_marking_handle_ || !concurrent_marking_handle_->IsValid())
208 return false;
209
210 concurrent_marking_handle_->Join();
211 return true;
212 }
213
Cancel()214 bool ConcurrentMarkerBase::Cancel() {
215 if (!concurrent_marking_handle_ || !concurrent_marking_handle_->IsValid())
216 return false;
217
218 concurrent_marking_handle_->Cancel();
219 return true;
220 }
221
IsActive() const222 bool ConcurrentMarkerBase::IsActive() const {
223 return concurrent_marking_handle_ && concurrent_marking_handle_->IsValid();
224 }
225
~ConcurrentMarkerBase()226 ConcurrentMarkerBase::~ConcurrentMarkerBase() {
227 CHECK_IMPLIES(concurrent_marking_handle_,
228 !concurrent_marking_handle_->IsValid());
229 }
230
NotifyIncrementalMutatorStepCompleted()231 void ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
232 DCHECK(concurrent_marking_handle_);
233 if (HasWorkForConcurrentMarking(marking_worklists_)) {
234 // Notifies the scheduler that max concurrency might have increased.
235 // This will adjust the number of markers if necessary.
236 IncreaseMarkingPriorityIfNeeded();
237 concurrent_marking_handle_->NotifyConcurrencyIncrease();
238 }
239 }
240
IncreaseMarkingPriorityIfNeeded()241 void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
242 if (!concurrent_marking_handle_->UpdatePriorityEnabled()) return;
243 if (concurrent_marking_priority_increased_) return;
244 // If concurrent tasks aren't executed, it might delay GC finalization.
245 // As long as GC is active so is the write barrier, which incurs a
246 // performance cost. Marking is estimated to take overall
247 // |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
248 // concurrent marking tasks have not reported any progress (i.e. the
249 // concurrently marked bytes count as not changed) in over
250 // |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| of
251 // that expected duration, we increase the concurrent task priority
252 // for the duration of the current GC. This is meant to prevent the
253 // GC from exceeding it's expected end time.
254 size_t current_concurrently_marked_bytes_ =
255 incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
256 if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
257 last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
258 last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
259 } else if ((v8::base::TimeTicks::Now() -
260 last_concurrently_marked_bytes_update_)
261 .InMilliseconds() >
262 kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
263 IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
264 concurrent_marking_handle_->UpdatePriority(
265 cppgc::TaskPriority::kUserBlocking);
266 concurrent_marking_priority_increased_ = true;
267 }
268 }
269
CreateConcurrentMarkingVisitor(ConcurrentMarkingState & marking_state) const270 std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
271 ConcurrentMarkingState& marking_state) const {
272 return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
273 }
274
275 } // namespace internal
276 } // namespace cppgc
277