• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/concurrent-marker.h"
6 
7 #include "include/cppgc/platform.h"
8 #include "src/heap/cppgc/heap-object-header.h"
9 #include "src/heap/cppgc/heap.h"
10 #include "src/heap/cppgc/liveness-broker.h"
11 #include "src/heap/cppgc/marking-state.h"
12 #include "src/heap/cppgc/marking-visitor.h"
13 
14 namespace cppgc {
15 namespace internal {
16 
17 namespace {
18 
19 static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
20     0.5;
21 
22 static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
23 
24 template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
25           typename WorklistLocal, typename Callback>
DrainWorklistWithYielding(JobDelegate * job_delegate,ConcurrentMarkingState & marking_state,IncrementalMarkingSchedule & incremental_marking_schedule,WorklistLocal & worklist_local,Callback callback)26 bool DrainWorklistWithYielding(
27     JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
28     IncrementalMarkingSchedule& incremental_marking_schedule,
29     WorklistLocal& worklist_local, Callback callback) {
30   return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
31       [&incremental_marking_schedule, &marking_state, job_delegate]() {
32         incremental_marking_schedule.AddConcurrentlyMarkedBytes(
33             marking_state.RecentlyMarkedBytes());
34         return job_delegate->ShouldYield();
35       },
36       worklist_local, callback);
37 }
38 
WorkSizeForConcurrentMarking(MarkingWorklists & marking_worklists)39 size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
40   return marking_worklists.marking_worklist()->Size() +
41          marking_worklists.write_barrier_worklist()->Size() +
42          marking_worklists.previously_not_fully_constructed_worklist()->Size();
43 }
44 
45 // Checks whether worklists' global pools hold any segment a concurrent marker
46 // can steal. This is called before the concurrent marker holds any Locals, so
47 // no need to check local segments.
HasWorkForConcurrentMarking(MarkingWorklists & marking_worklists)48 bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
49   return !marking_worklists.marking_worklist()->IsEmpty() ||
50          !marking_worklists.write_barrier_worklist()->IsEmpty() ||
51          !marking_worklists.previously_not_fully_constructed_worklist()
52               ->IsEmpty();
53 }
54 
55 class ConcurrentMarkingTask final : public v8::JobTask {
56  public:
57   explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
58 
59   void Run(JobDelegate* delegate) final;
60 
61   size_t GetMaxConcurrency(size_t) const final;
62 
63  private:
64   void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
65 
66   const ConcurrentMarkerBase& concurrent_marker_;
67 };
68 
ConcurrentMarkingTask(ConcurrentMarkerBase & concurrent_marker)69 ConcurrentMarkingTask::ConcurrentMarkingTask(
70     ConcurrentMarkerBase& concurrent_marker)
71     : concurrent_marker_(concurrent_marker) {}
72 
Run(JobDelegate * job_delegate)73 void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
74   if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
75     return;
76   ConcurrentMarkingState concurrent_marking_state(
77       concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
78       concurrent_marker_.heap().compactor().compaction_worklists());
79   std::unique_ptr<Visitor> concurrent_marking_visitor =
80       concurrent_marker_.CreateConcurrentMarkingVisitor(
81           concurrent_marking_state);
82   ProcessWorklists(job_delegate, concurrent_marking_state,
83                    *concurrent_marking_visitor.get());
84   concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
85       concurrent_marking_state.RecentlyMarkedBytes());
86   concurrent_marking_state.Publish();
87 }
88 
GetMaxConcurrency(size_t current_worker_count) const89 size_t ConcurrentMarkingTask::GetMaxConcurrency(
90     size_t current_worker_count) const {
91   return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
92          current_worker_count;
93 }
94 
ProcessWorklists(JobDelegate * job_delegate,ConcurrentMarkingState & concurrent_marking_state,Visitor & concurrent_marking_visitor)95 void ConcurrentMarkingTask::ProcessWorklists(
96     JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
97     Visitor& concurrent_marking_visitor) {
98   do {
99     if (!DrainWorklistWithYielding(
100             job_delegate, concurrent_marking_state,
101             concurrent_marker_.incremental_marking_schedule(),
102             concurrent_marking_state
103                 .previously_not_fully_constructed_worklist(),
104             [&concurrent_marking_state,
105              &concurrent_marking_visitor](HeapObjectHeader* header) {
106               BasePage::FromPayload(header)->SynchronizedLoad();
107               concurrent_marking_state.AccountMarkedBytes(*header);
108               DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
109                   concurrent_marking_visitor, *header);
110             })) {
111       return;
112     }
113 
114     if (!DrainWorklistWithYielding(
115             job_delegate, concurrent_marking_state,
116             concurrent_marker_.incremental_marking_schedule(),
117             concurrent_marking_state.marking_worklist(),
118             [&concurrent_marking_state, &concurrent_marking_visitor](
119                 const MarkingWorklists::MarkingItem& item) {
120               BasePage::FromPayload(item.base_object_payload)
121                   ->SynchronizedLoad();
122               const HeapObjectHeader& header =
123                   HeapObjectHeader::FromPayload(item.base_object_payload);
124               DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
125               DCHECK(header.IsMarked<AccessMode::kAtomic>());
126               concurrent_marking_state.AccountMarkedBytes(header);
127               item.callback(&concurrent_marking_visitor,
128                             item.base_object_payload);
129             })) {
130       return;
131     }
132 
133     if (!DrainWorklistWithYielding(
134             job_delegate, concurrent_marking_state,
135             concurrent_marker_.incremental_marking_schedule(),
136             concurrent_marking_state.write_barrier_worklist(),
137             [&concurrent_marking_state,
138              &concurrent_marking_visitor](HeapObjectHeader* header) {
139               BasePage::FromPayload(header)->SynchronizedLoad();
140               concurrent_marking_state.AccountMarkedBytes(*header);
141               DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
142                   concurrent_marking_visitor, *header);
143             })) {
144       return;
145     }
146 
147     if (!DrainWorklistWithYielding(
148             job_delegate, concurrent_marking_state,
149             concurrent_marker_.incremental_marking_schedule(),
150             concurrent_marking_state.ephemeron_pairs_for_processing_worklist(),
151             [&concurrent_marking_state](
152                 const MarkingWorklists::EphemeronPairItem& item) {
153               concurrent_marking_state.ProcessEphemeron(item.key,
154                                                         item.value_desc);
155             })) {
156       return;
157     }
158   } while (
159       !concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
160 }
161 
162 }  // namespace
163 
ConcurrentMarkerBase(HeapBase & heap,MarkingWorklists & marking_worklists,IncrementalMarkingSchedule & incremental_marking_schedule,cppgc::Platform * platform)164 ConcurrentMarkerBase::ConcurrentMarkerBase(
165     HeapBase& heap, MarkingWorklists& marking_worklists,
166     IncrementalMarkingSchedule& incremental_marking_schedule,
167     cppgc::Platform* platform)
168     : heap_(heap),
169       marking_worklists_(marking_worklists),
170       incremental_marking_schedule_(incremental_marking_schedule),
171       platform_(platform) {}
172 
Start()173 void ConcurrentMarkerBase::Start() {
174   DCHECK(platform_);
175   concurrent_marking_handle_ =
176       platform_->PostJob(v8::TaskPriority::kUserVisible,
177                          std::make_unique<ConcurrentMarkingTask>(*this));
178 }
179 
Cancel()180 void ConcurrentMarkerBase::Cancel() {
181   if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
182     concurrent_marking_handle_->Cancel();
183 }
184 
JoinForTesting()185 void ConcurrentMarkerBase::JoinForTesting() {
186   if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
187     concurrent_marking_handle_->Join();
188 }
189 
IsActive() const190 bool ConcurrentMarkerBase::IsActive() const {
191   return concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning();
192 }
193 
~ConcurrentMarkerBase()194 ConcurrentMarkerBase::~ConcurrentMarkerBase() {
195   CHECK_IMPLIES(concurrent_marking_handle_,
196                 !concurrent_marking_handle_->IsValid());
197 }
198 
NotifyIncrementalMutatorStepCompleted()199 bool ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
200   DCHECK(concurrent_marking_handle_);
201   if (HasWorkForConcurrentMarking(marking_worklists_)) {
202     // Notifies the scheduler that max concurrency might have increased.
203     // This will adjust the number of markers if necessary.
204     IncreaseMarkingPriorityIfNeeded();
205     concurrent_marking_handle_->NotifyConcurrencyIncrease();
206     return false;
207   }
208   return !concurrent_marking_handle_->IsActive();
209 }
210 
IncreaseMarkingPriorityIfNeeded()211 void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
212   if (!concurrent_marking_handle_->UpdatePriorityEnabled()) return;
213   if (concurrent_marking_priority_increased_) return;
214   // If concurrent tasks aren't executed, it might delay GC finalization.
215   // As long as GC is active so is the write barrier, which incurs a
216   // performance cost. Marking is estimated to take overall
217   // |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
218   // concurrent marking tasks have not reported any progress (i.e. the
219   // concurrently marked bytes count as not changed) in over
220   // |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| of
221   // that expected duration, we increase the concurrent task priority
222   // for the duration of the current GC. This is meant to prevent the
223   // GC from exceeding it's expected end time.
224   size_t current_concurrently_marked_bytes_ =
225       incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
226   if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
227     last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
228     last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
229   } else if ((v8::base::TimeTicks::Now() -
230               last_concurrently_marked_bytes_update_)
231                  .InMilliseconds() >
232              kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
233                  IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
234     concurrent_marking_handle_->UpdatePriority(
235         cppgc::TaskPriority::kUserBlocking);
236     concurrent_marking_priority_increased_ = true;
237   }
238 }
239 
CreateConcurrentMarkingVisitor(ConcurrentMarkingState & marking_state) const240 std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
241     ConcurrentMarkingState& marking_state) const {
242   return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
243 }
244 
245 }  // namespace internal
246 }  // namespace cppgc
247