1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task_scheduler/task_tracker.h"
6
7 #include <limits>
8 #include <string>
9 #include <vector>
10
11 #include "base/base_switches.h"
12 #include "base/callback.h"
13 #include "base/command_line.h"
14 #include "base/json/json_writer.h"
15 #include "base/memory/ptr_util.h"
16 #include "base/metrics/histogram_macros.h"
17 #include "base/sequence_token.h"
18 #include "base/synchronization/condition_variable.h"
19 #include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
20 #include "base/threading/sequence_local_storage_map.h"
21 #include "base/threading/sequenced_task_runner_handle.h"
22 #include "base/threading/thread_restrictions.h"
23 #include "base/threading/thread_task_runner_handle.h"
24 #include "base/time/time.h"
25 #include "base/trace_event/trace_event.h"
26 #include "base/values.h"
27
28 namespace base {
29 namespace internal {
30
31 namespace {
32
33 constexpr char kParallelExecutionMode[] = "parallel";
34 constexpr char kSequencedExecutionMode[] = "sequenced";
35 constexpr char kSingleThreadExecutionMode[] = "single thread";
36
37 // An immutable copy of a scheduler task's info required by tracing.
38 class TaskTracingInfo : public trace_event::ConvertableToTraceFormat {
39 public:
TaskTracingInfo(const TaskTraits & task_traits,const char * execution_mode,const SequenceToken & sequence_token)40 TaskTracingInfo(const TaskTraits& task_traits,
41 const char* execution_mode,
42 const SequenceToken& sequence_token)
43 : task_traits_(task_traits),
44 execution_mode_(execution_mode),
45 sequence_token_(sequence_token) {}
46
47 // trace_event::ConvertableToTraceFormat implementation.
48 void AppendAsTraceFormat(std::string* out) const override;
49
50 private:
51 const TaskTraits task_traits_;
52 const char* const execution_mode_;
53 const SequenceToken sequence_token_;
54
55 DISALLOW_COPY_AND_ASSIGN(TaskTracingInfo);
56 };
57
AppendAsTraceFormat(std::string * out) const58 void TaskTracingInfo::AppendAsTraceFormat(std::string* out) const {
59 DictionaryValue dict;
60
61 dict.SetString("task_priority",
62 base::TaskPriorityToString(task_traits_.priority()));
63 dict.SetString("execution_mode", execution_mode_);
64 if (execution_mode_ != kParallelExecutionMode)
65 dict.SetInteger("sequence_token", sequence_token_.ToInternalValue());
66
67 std::string tmp;
68 JSONWriter::Write(dict, &tmp);
69 out->append(tmp);
70 }
71
72 // These name conveys that a Task is posted to/run by the task scheduler without
73 // revealing its implementation details.
74 constexpr char kQueueFunctionName[] = "TaskScheduler PostTask";
75 constexpr char kRunFunctionName[] = "TaskScheduler RunTask";
76
77 constexpr char kTaskSchedulerFlowTracingCategory[] =
78 TRACE_DISABLED_BY_DEFAULT("task_scheduler.flow");
79
80 // Constructs a histogram to track latency which is logging to
81 // "TaskScheduler.{histogram_name}.{histogram_label}.{task_type_suffix}".
GetLatencyHistogram(StringPiece histogram_name,StringPiece histogram_label,StringPiece task_type_suffix)82 HistogramBase* GetLatencyHistogram(StringPiece histogram_name,
83 StringPiece histogram_label,
84 StringPiece task_type_suffix) {
85 DCHECK(!histogram_name.empty());
86 DCHECK(!histogram_label.empty());
87 DCHECK(!task_type_suffix.empty());
88 // Mimics the UMA_HISTOGRAM_HIGH_RESOLUTION_CUSTOM_TIMES macro. The minimums
89 // and maximums were chosen to place the 1ms mark at around the 70% range
90 // coverage for buckets giving us good info for tasks that have a latency
91 // below 1ms (most of them) and enough info to assess how bad the latency is
92 // for tasks that exceed this threshold.
93 const std::string histogram = JoinString(
94 {"TaskScheduler", histogram_name, histogram_label, task_type_suffix},
95 ".");
96 return Histogram::FactoryMicrosecondsTimeGet(
97 histogram, TimeDelta::FromMicroseconds(1),
98 TimeDelta::FromMilliseconds(20), 50,
99 HistogramBase::kUmaTargetedHistogramFlag);
100 }
101
102 // Upper bound for the
103 // TaskScheduler.BlockShutdownTasksPostedDuringShutdown histogram.
104 constexpr HistogramBase::Sample kMaxBlockShutdownTasksPostedDuringShutdown =
105 1000;
106
RecordNumBlockShutdownTasksPostedDuringShutdown(HistogramBase::Sample value)107 void RecordNumBlockShutdownTasksPostedDuringShutdown(
108 HistogramBase::Sample value) {
109 UMA_HISTOGRAM_CUSTOM_COUNTS(
110 "TaskScheduler.BlockShutdownTasksPostedDuringShutdown", value, 1,
111 kMaxBlockShutdownTasksPostedDuringShutdown, 50);
112 }
113
114 // Returns the maximum number of TaskPriority::BACKGROUND sequences that can be
115 // scheduled concurrently based on command line flags.
GetMaxNumScheduledBackgroundSequences()116 int GetMaxNumScheduledBackgroundSequences() {
117 // The CommandLine might not be initialized if TaskScheduler is initialized
118 // in a dynamic library which doesn't have access to argc/argv.
119 if (CommandLine::InitializedForCurrentProcess() &&
120 CommandLine::ForCurrentProcess()->HasSwitch(
121 switches::kDisableBackgroundTasks)) {
122 return 0;
123 }
124 return std::numeric_limits<int>::max();
125 }
126
127 } // namespace
128
129 // Atomic internal state used by TaskTracker. Sequential consistency shouldn't
130 // be assumed from these calls (i.e. a thread reading
131 // |HasShutdownStarted() == true| isn't guaranteed to see all writes made before
132 // |StartShutdown()| on the thread that invoked it).
133 class TaskTracker::State {
134 public:
135 State() = default;
136
137 // Sets a flag indicating that shutdown has started. Returns true if there are
138 // tasks blocking shutdown. Can only be called once.
StartShutdown()139 bool StartShutdown() {
140 const auto new_value =
141 subtle::NoBarrier_AtomicIncrement(&bits_, kShutdownHasStartedMask);
142
143 // Check that the "shutdown has started" bit isn't zero. This would happen
144 // if it was incremented twice.
145 DCHECK(new_value & kShutdownHasStartedMask);
146
147 const auto num_tasks_blocking_shutdown =
148 new_value >> kNumTasksBlockingShutdownBitOffset;
149 return num_tasks_blocking_shutdown != 0;
150 }
151
152 // Returns true if shutdown has started.
HasShutdownStarted() const153 bool HasShutdownStarted() const {
154 return subtle::NoBarrier_Load(&bits_) & kShutdownHasStartedMask;
155 }
156
157 // Returns true if there are tasks blocking shutdown.
AreTasksBlockingShutdown() const158 bool AreTasksBlockingShutdown() const {
159 const auto num_tasks_blocking_shutdown =
160 subtle::NoBarrier_Load(&bits_) >> kNumTasksBlockingShutdownBitOffset;
161 DCHECK_GE(num_tasks_blocking_shutdown, 0);
162 return num_tasks_blocking_shutdown != 0;
163 }
164
165 // Increments the number of tasks blocking shutdown. Returns true if shutdown
166 // has started.
IncrementNumTasksBlockingShutdown()167 bool IncrementNumTasksBlockingShutdown() {
168 #if DCHECK_IS_ON()
169 // Verify that no overflow will occur.
170 const auto num_tasks_blocking_shutdown =
171 subtle::NoBarrier_Load(&bits_) >> kNumTasksBlockingShutdownBitOffset;
172 DCHECK_LT(num_tasks_blocking_shutdown,
173 std::numeric_limits<subtle::Atomic32>::max() -
174 kNumTasksBlockingShutdownIncrement);
175 #endif
176
177 const auto new_bits = subtle::NoBarrier_AtomicIncrement(
178 &bits_, kNumTasksBlockingShutdownIncrement);
179 return new_bits & kShutdownHasStartedMask;
180 }
181
182 // Decrements the number of tasks blocking shutdown. Returns true if shutdown
183 // has started and the number of tasks blocking shutdown becomes zero.
DecrementNumTasksBlockingShutdown()184 bool DecrementNumTasksBlockingShutdown() {
185 const auto new_bits = subtle::NoBarrier_AtomicIncrement(
186 &bits_, -kNumTasksBlockingShutdownIncrement);
187 const bool shutdown_has_started = new_bits & kShutdownHasStartedMask;
188 const auto num_tasks_blocking_shutdown =
189 new_bits >> kNumTasksBlockingShutdownBitOffset;
190 DCHECK_GE(num_tasks_blocking_shutdown, 0);
191 return shutdown_has_started && num_tasks_blocking_shutdown == 0;
192 }
193
194 private:
195 static constexpr subtle::Atomic32 kShutdownHasStartedMask = 1;
196 static constexpr subtle::Atomic32 kNumTasksBlockingShutdownBitOffset = 1;
197 static constexpr subtle::Atomic32 kNumTasksBlockingShutdownIncrement =
198 1 << kNumTasksBlockingShutdownBitOffset;
199
200 // The LSB indicates whether shutdown has started. The other bits count the
201 // number of tasks blocking shutdown.
202 // No barriers are required to read/write |bits_| as this class is only used
203 // as an atomic state checker, it doesn't provide sequential consistency
204 // guarantees w.r.t. external state. Sequencing of the TaskTracker::State
205 // operations themselves is guaranteed by the AtomicIncrement RMW (read-
206 // modify-write) semantics however. For example, if two threads are racing to
207 // call IncrementNumTasksBlockingShutdown() and StartShutdown() respectively,
208 // either the first thread will win and the StartShutdown() call will see the
209 // blocking task or the second thread will win and
210 // IncrementNumTasksBlockingShutdown() will know that shutdown has started.
211 subtle::Atomic32 bits_ = 0;
212
213 DISALLOW_COPY_AND_ASSIGN(State);
214 };
215
216 struct TaskTracker::PreemptedBackgroundSequence {
217 PreemptedBackgroundSequence() = default;
PreemptedBackgroundSequencebase::internal::TaskTracker::PreemptedBackgroundSequence218 PreemptedBackgroundSequence(scoped_refptr<Sequence> sequence_in,
219 TimeTicks next_task_sequenced_time_in,
220 CanScheduleSequenceObserver* observer_in)
221 : sequence(std::move(sequence_in)),
222 next_task_sequenced_time(next_task_sequenced_time_in),
223 observer(observer_in) {}
224 PreemptedBackgroundSequence(PreemptedBackgroundSequence&& other) = default;
225 ~PreemptedBackgroundSequence() = default;
226 PreemptedBackgroundSequence& operator=(PreemptedBackgroundSequence&& other) =
227 default;
operator <base::internal::TaskTracker::PreemptedBackgroundSequence228 bool operator<(const PreemptedBackgroundSequence& other) const {
229 return next_task_sequenced_time < other.next_task_sequenced_time;
230 }
operator >base::internal::TaskTracker::PreemptedBackgroundSequence231 bool operator>(const PreemptedBackgroundSequence& other) const {
232 return next_task_sequenced_time > other.next_task_sequenced_time;
233 }
234
235 // A background sequence waiting to be scheduled.
236 scoped_refptr<Sequence> sequence;
237
238 // The sequenced time of the next task in |sequence|.
239 TimeTicks next_task_sequenced_time;
240
241 // An observer to notify when |sequence| can be scheduled.
242 CanScheduleSequenceObserver* observer = nullptr;
243
244 private:
245 DISALLOW_COPY_AND_ASSIGN(PreemptedBackgroundSequence);
246 };
247
TaskTracker(StringPiece histogram_label)248 TaskTracker::TaskTracker(StringPiece histogram_label)
249 : TaskTracker(histogram_label, GetMaxNumScheduledBackgroundSequences()) {}
250
TaskTracker(StringPiece histogram_label,int max_num_scheduled_background_sequences)251 TaskTracker::TaskTracker(StringPiece histogram_label,
252 int max_num_scheduled_background_sequences)
253 : state_(new State),
254 flush_cv_(flush_lock_.CreateConditionVariable()),
255 shutdown_lock_(&flush_lock_),
256 max_num_scheduled_background_sequences_(
257 max_num_scheduled_background_sequences),
258 task_latency_histograms_{
259 {GetLatencyHistogram("TaskLatencyMicroseconds",
260 histogram_label,
261 "BackgroundTaskPriority"),
262 GetLatencyHistogram("TaskLatencyMicroseconds",
263 histogram_label,
264 "BackgroundTaskPriority_MayBlock")},
265 {GetLatencyHistogram("TaskLatencyMicroseconds",
266 histogram_label,
267 "UserVisibleTaskPriority"),
268 GetLatencyHistogram("TaskLatencyMicroseconds",
269 histogram_label,
270 "UserVisibleTaskPriority_MayBlock")},
271 {GetLatencyHistogram("TaskLatencyMicroseconds",
272 histogram_label,
273 "UserBlockingTaskPriority"),
274 GetLatencyHistogram("TaskLatencyMicroseconds",
275 histogram_label,
276 "UserBlockingTaskPriority_MayBlock")}},
277 heartbeat_latency_histograms_{
278 {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
279 histogram_label,
280 "BackgroundTaskPriority"),
281 GetLatencyHistogram("HeartbeatLatencyMicroseconds",
282 histogram_label,
283 "BackgroundTaskPriority_MayBlock")},
284 {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
285 histogram_label,
286 "UserVisibleTaskPriority"),
287 GetLatencyHistogram("HeartbeatLatencyMicroseconds",
288 histogram_label,
289 "UserVisibleTaskPriority_MayBlock")},
290 {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
291 histogram_label,
292 "UserBlockingTaskPriority"),
293 GetLatencyHistogram("HeartbeatLatencyMicroseconds",
294 histogram_label,
295 "UserBlockingTaskPriority_MayBlock")}},
296 tracked_ref_factory_(this) {
297 // Confirm that all |task_latency_histograms_| have been initialized above.
298 DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
299 1][0] -
300 1));
301 }
302
303 TaskTracker::~TaskTracker() = default;
304
Shutdown()305 void TaskTracker::Shutdown() {
306 PerformShutdown();
307 DCHECK(IsShutdownComplete());
308
309 // Unblock FlushForTesting() and perform the FlushAsyncForTesting callback
310 // when shutdown completes.
311 {
312 AutoSchedulerLock auto_lock(flush_lock_);
313 flush_cv_->Signal();
314 }
315 CallFlushCallbackForTesting();
316 }
317
FlushForTesting()318 void TaskTracker::FlushForTesting() {
319 AutoSchedulerLock auto_lock(flush_lock_);
320 while (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0 &&
321 !IsShutdownComplete()) {
322 flush_cv_->Wait();
323 }
324 }
325
FlushAsyncForTesting(OnceClosure flush_callback)326 void TaskTracker::FlushAsyncForTesting(OnceClosure flush_callback) {
327 DCHECK(flush_callback);
328 {
329 AutoSchedulerLock auto_lock(flush_lock_);
330 DCHECK(!flush_callback_for_testing_)
331 << "Only one FlushAsyncForTesting() may be pending at any time.";
332 flush_callback_for_testing_ = std::move(flush_callback);
333 }
334
335 if (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) == 0 ||
336 IsShutdownComplete()) {
337 CallFlushCallbackForTesting();
338 }
339 }
340
WillPostTask(Task * task)341 bool TaskTracker::WillPostTask(Task* task) {
342 DCHECK(task->task);
343
344 if (!BeforePostTask(task->traits.shutdown_behavior()))
345 return false;
346
347 if (task->delayed_run_time.is_null())
348 subtle::NoBarrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, 1);
349
350 {
351 TRACE_EVENT_WITH_FLOW0(
352 kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
353 TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(*task)),
354 TRACE_EVENT_FLAG_FLOW_OUT);
355 }
356
357 task_annotator_.WillQueueTask(nullptr, task);
358
359 return true;
360 }
361
WillScheduleSequence(scoped_refptr<Sequence> sequence,CanScheduleSequenceObserver * observer)362 scoped_refptr<Sequence> TaskTracker::WillScheduleSequence(
363 scoped_refptr<Sequence> sequence,
364 CanScheduleSequenceObserver* observer) {
365 const SequenceSortKey sort_key = sequence->GetSortKey();
366
367 // A foreground sequence can always be scheduled.
368 if (sort_key.priority() != TaskPriority::BACKGROUND)
369 return sequence;
370
371 // It is convenient not to have to specify an observer when scheduling
372 // foreground sequences in tests.
373 DCHECK(observer);
374
375 AutoSchedulerLock auto_lock(background_lock_);
376
377 if (num_scheduled_background_sequences_ <
378 max_num_scheduled_background_sequences_) {
379 ++num_scheduled_background_sequences_;
380 return sequence;
381 }
382
383 preempted_background_sequences_.emplace(
384 std::move(sequence), sort_key.next_task_sequenced_time(), observer);
385 return nullptr;
386 }
387
RunAndPopNextTask(scoped_refptr<Sequence> sequence,CanScheduleSequenceObserver * observer)388 scoped_refptr<Sequence> TaskTracker::RunAndPopNextTask(
389 scoped_refptr<Sequence> sequence,
390 CanScheduleSequenceObserver* observer) {
391 DCHECK(sequence);
392
393 // Run the next task in |sequence|.
394 Optional<Task> task = sequence->TakeTask();
395 // TODO(fdoray): Support TakeTask() returning null. https://crbug.com/783309
396 DCHECK(task);
397
398 const TaskShutdownBehavior shutdown_behavior =
399 task->traits.shutdown_behavior();
400 const TaskPriority task_priority = task->traits.priority();
401 const bool can_run_task = BeforeRunTask(shutdown_behavior);
402 const bool is_delayed = !task->delayed_run_time.is_null();
403
404 RunOrSkipTask(std::move(task.value()), sequence.get(), can_run_task);
405 if (can_run_task)
406 AfterRunTask(shutdown_behavior);
407
408 if (!is_delayed)
409 DecrementNumIncompleteUndelayedTasks();
410
411 const bool sequence_is_empty_after_pop = sequence->Pop();
412
413 // Never reschedule a Sequence emptied by Pop(). The contract is such that
414 // next poster to make it non-empty is responsible to schedule it.
415 if (sequence_is_empty_after_pop)
416 sequence = nullptr;
417
418 if (task_priority == TaskPriority::BACKGROUND) {
419 // Allow |sequence| to be rescheduled only if its next task is set to run
420 // earlier than the earliest currently preempted sequence
421 return ManageBackgroundSequencesAfterRunningTask(std::move(sequence),
422 observer);
423 }
424
425 return sequence;
426 }
427
HasShutdownStarted() const428 bool TaskTracker::HasShutdownStarted() const {
429 return state_->HasShutdownStarted();
430 }
431
IsShutdownComplete() const432 bool TaskTracker::IsShutdownComplete() const {
433 AutoSchedulerLock auto_lock(shutdown_lock_);
434 return shutdown_event_ && shutdown_event_->IsSignaled();
435 }
436
SetHasShutdownStartedForTesting()437 void TaskTracker::SetHasShutdownStartedForTesting() {
438 AutoSchedulerLock auto_lock(shutdown_lock_);
439
440 // Create a dummy |shutdown_event_| to satisfy TaskTracker's expectation of
441 // its existence during shutdown (e.g. in OnBlockingShutdownTasksComplete()).
442 shutdown_event_ = std::make_unique<WaitableEvent>();
443
444 state_->StartShutdown();
445 }
446
RecordLatencyHistogram(LatencyHistogramType latency_histogram_type,TaskTraits task_traits,TimeTicks posted_time) const447 void TaskTracker::RecordLatencyHistogram(
448 LatencyHistogramType latency_histogram_type,
449 TaskTraits task_traits,
450 TimeTicks posted_time) const {
451 const TimeDelta task_latency = TimeTicks::Now() - posted_time;
452
453 DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
454 latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
455 const auto& histograms =
456 latency_histogram_type == LatencyHistogramType::TASK_LATENCY
457 ? task_latency_histograms_
458 : heartbeat_latency_histograms_;
459 histograms[static_cast<int>(task_traits.priority())]
460 [task_traits.may_block() || task_traits.with_base_sync_primitives()
461 ? 1
462 : 0]
463 ->AddTimeMicrosecondsGranularity(task_latency);
464 }
465
RunOrSkipTask(Task task,Sequence * sequence,bool can_run_task)466 void TaskTracker::RunOrSkipTask(Task task,
467 Sequence* sequence,
468 bool can_run_task) {
469 RecordLatencyHistogram(LatencyHistogramType::TASK_LATENCY, task.traits,
470 task.sequenced_time);
471
472 const bool previous_singleton_allowed =
473 ThreadRestrictions::SetSingletonAllowed(
474 task.traits.shutdown_behavior() !=
475 TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
476 const bool previous_io_allowed =
477 ThreadRestrictions::SetIOAllowed(task.traits.may_block());
478 const bool previous_wait_allowed = ThreadRestrictions::SetWaitAllowed(
479 task.traits.with_base_sync_primitives());
480
481 {
482 const SequenceToken& sequence_token = sequence->token();
483 DCHECK(sequence_token.IsValid());
484 ScopedSetSequenceTokenForCurrentThread
485 scoped_set_sequence_token_for_current_thread(sequence_token);
486 ScopedSetTaskPriorityForCurrentThread
487 scoped_set_task_priority_for_current_thread(task.traits.priority());
488 ScopedSetSequenceLocalStorageMapForCurrentThread
489 scoped_set_sequence_local_storage_map_for_current_thread(
490 sequence->sequence_local_storage());
491
492 // Set up TaskRunnerHandle as expected for the scope of the task.
493 std::unique_ptr<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
494 std::unique_ptr<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
495 DCHECK(!task.sequenced_task_runner_ref ||
496 !task.single_thread_task_runner_ref);
497 if (task.sequenced_task_runner_ref) {
498 sequenced_task_runner_handle.reset(
499 new SequencedTaskRunnerHandle(task.sequenced_task_runner_ref));
500 } else if (task.single_thread_task_runner_ref) {
501 single_thread_task_runner_handle.reset(
502 new ThreadTaskRunnerHandle(task.single_thread_task_runner_ref));
503 }
504
505 if (can_run_task) {
506 TRACE_TASK_EXECUTION(kRunFunctionName, task);
507
508 const char* const execution_mode =
509 task.single_thread_task_runner_ref
510 ? kSingleThreadExecutionMode
511 : (task.sequenced_task_runner_ref ? kSequencedExecutionMode
512 : kParallelExecutionMode);
513 // TODO(gab): In a better world this would be tacked on as an extra arg
514 // to the trace event generated above. This is not possible however until
515 // http://crbug.com/652692 is resolved.
516 TRACE_EVENT1("task_scheduler", "TaskTracker::RunTask", "task_info",
517 std::make_unique<TaskTracingInfo>(
518 task.traits, execution_mode, sequence_token));
519
520 {
521 // Put this in its own scope so it preceeds rather than overlaps with
522 // RunTask() in the trace view.
523 TRACE_EVENT_WITH_FLOW0(
524 kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
525 TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(task)),
526 TRACE_EVENT_FLAG_FLOW_IN);
527 }
528
529 task_annotator_.RunTask(nullptr, &task);
530 }
531
532 // Make sure the arguments bound to the callback are deleted within the
533 // scope in which the callback runs.
534 task.task = OnceClosure();
535 }
536
537 ThreadRestrictions::SetWaitAllowed(previous_wait_allowed);
538 ThreadRestrictions::SetIOAllowed(previous_io_allowed);
539 ThreadRestrictions::SetSingletonAllowed(previous_singleton_allowed);
540 }
541
PerformShutdown()542 void TaskTracker::PerformShutdown() {
543 {
544 AutoSchedulerLock auto_lock(shutdown_lock_);
545
546 // This method can only be called once.
547 DCHECK(!shutdown_event_);
548 DCHECK(!num_block_shutdown_tasks_posted_during_shutdown_);
549 DCHECK(!state_->HasShutdownStarted());
550
551 shutdown_event_ = std::make_unique<WaitableEvent>();
552
553 const bool tasks_are_blocking_shutdown = state_->StartShutdown();
554
555 // From now, if a thread causes the number of tasks blocking shutdown to
556 // become zero, it will call OnBlockingShutdownTasksComplete().
557
558 if (!tasks_are_blocking_shutdown) {
559 // If another thread posts a BLOCK_SHUTDOWN task at this moment, it will
560 // block until this method releases |shutdown_lock_|. Then, it will fail
561 // DCHECK(!shutdown_event_->IsSignaled()). This is the desired behavior
562 // because posting a BLOCK_SHUTDOWN task when TaskTracker::Shutdown() has
563 // started and no tasks are blocking shutdown isn't allowed.
564 shutdown_event_->Signal();
565 return;
566 }
567 }
568
569 // Remove the cap on the maximum number of background sequences that can be
570 // scheduled concurrently. Done after starting shutdown to ensure that non-
571 // BLOCK_SHUTDOWN sequences don't get a chance to run and that BLOCK_SHUTDOWN
572 // sequences run on threads running with a normal priority.
573 SetMaxNumScheduledBackgroundSequences(std::numeric_limits<int>::max());
574
575 // It is safe to access |shutdown_event_| without holding |lock_| because the
576 // pointer never changes after being set above.
577 {
578 base::ThreadRestrictions::ScopedAllowWait allow_wait;
579 shutdown_event_->Wait();
580 }
581
582 {
583 AutoSchedulerLock auto_lock(shutdown_lock_);
584
585 // Record TaskScheduler.BlockShutdownTasksPostedDuringShutdown if less than
586 // |kMaxBlockShutdownTasksPostedDuringShutdown| BLOCK_SHUTDOWN tasks were
587 // posted during shutdown. Otherwise, the histogram has already been
588 // recorded in BeforePostTask().
589 if (num_block_shutdown_tasks_posted_during_shutdown_ <
590 kMaxBlockShutdownTasksPostedDuringShutdown) {
591 RecordNumBlockShutdownTasksPostedDuringShutdown(
592 num_block_shutdown_tasks_posted_during_shutdown_);
593 }
594 }
595 }
596
SetMaxNumScheduledBackgroundSequences(int max_num_scheduled_background_sequences)597 void TaskTracker::SetMaxNumScheduledBackgroundSequences(
598 int max_num_scheduled_background_sequences) {
599 std::vector<PreemptedBackgroundSequence> sequences_to_schedule;
600
601 {
602 AutoSchedulerLock auto_lock(background_lock_);
603 max_num_scheduled_background_sequences_ =
604 max_num_scheduled_background_sequences;
605
606 while (num_scheduled_background_sequences_ <
607 max_num_scheduled_background_sequences &&
608 !preempted_background_sequences_.empty()) {
609 sequences_to_schedule.push_back(
610 GetPreemptedBackgroundSequenceToScheduleLockRequired());
611 }
612 }
613
614 for (auto& sequence_to_schedule : sequences_to_schedule)
615 SchedulePreemptedBackgroundSequence(std::move(sequence_to_schedule));
616 }
617
618 TaskTracker::PreemptedBackgroundSequence
GetPreemptedBackgroundSequenceToScheduleLockRequired()619 TaskTracker::GetPreemptedBackgroundSequenceToScheduleLockRequired() {
620 background_lock_.AssertAcquired();
621 DCHECK(!preempted_background_sequences_.empty());
622
623 ++num_scheduled_background_sequences_;
624 DCHECK_LE(num_scheduled_background_sequences_,
625 max_num_scheduled_background_sequences_);
626
627 // The const_cast on top is okay since the PreemptedBackgroundSequence is
628 // transactionnaly being popped from |preempted_background_sequences_| right
629 // after and the move doesn't alter the sort order (a requirement for the
630 // Windows STL's consistency debug-checks for std::priority_queue::top()).
631 PreemptedBackgroundSequence popped_sequence =
632 std::move(const_cast<PreemptedBackgroundSequence&>(
633 preempted_background_sequences_.top()));
634 preempted_background_sequences_.pop();
635 return popped_sequence;
636 }
637
SchedulePreemptedBackgroundSequence(PreemptedBackgroundSequence sequence_to_schedule)638 void TaskTracker::SchedulePreemptedBackgroundSequence(
639 PreemptedBackgroundSequence sequence_to_schedule) {
640 DCHECK(sequence_to_schedule.observer);
641 sequence_to_schedule.observer->OnCanScheduleSequence(
642 std::move(sequence_to_schedule.sequence));
643 }
644
645 #if DCHECK_IS_ON()
IsPostingBlockShutdownTaskAfterShutdownAllowed()646 bool TaskTracker::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
647 return false;
648 }
649 #endif
650
HasIncompleteUndelayedTasksForTesting() const651 bool TaskTracker::HasIncompleteUndelayedTasksForTesting() const {
652 return subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0;
653 }
654
BeforePostTask(TaskShutdownBehavior shutdown_behavior)655 bool TaskTracker::BeforePostTask(TaskShutdownBehavior shutdown_behavior) {
656 if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
657 // BLOCK_SHUTDOWN tasks block shutdown between the moment they are posted
658 // and the moment they complete their execution.
659 const bool shutdown_started = state_->IncrementNumTasksBlockingShutdown();
660
661 if (shutdown_started) {
662 AutoSchedulerLock auto_lock(shutdown_lock_);
663
664 // A BLOCK_SHUTDOWN task posted after shutdown has completed is an
665 // ordering bug. This aims to catch those early.
666 DCHECK(shutdown_event_);
667 if (shutdown_event_->IsSignaled()) {
668 #if DCHECK_IS_ON()
669 // clang-format off
670 // TODO(robliao): http://crbug.com/698140. Since the service thread
671 // doesn't stop processing its own tasks at shutdown, we may still
672 // attempt to post a BLOCK_SHUTDOWN task in response to a
673 // FileDescriptorWatcher. Same is true for FilePathWatcher
674 // (http://crbug.com/728235). Until it's possible for such services to
675 // post to non-BLOCK_SHUTDOWN sequences which are themselves funneled to
676 // the main execution sequence (a future plan for the post_task.h API),
677 // this DCHECK will be flaky and must be disabled.
678 // DCHECK(IsPostingBlockShutdownTaskAfterShutdownAllowed());
679 // clang-format on
680 #endif
681 state_->DecrementNumTasksBlockingShutdown();
682 return false;
683 }
684
685 ++num_block_shutdown_tasks_posted_during_shutdown_;
686
687 if (num_block_shutdown_tasks_posted_during_shutdown_ ==
688 kMaxBlockShutdownTasksPostedDuringShutdown) {
689 // Record the TaskScheduler.BlockShutdownTasksPostedDuringShutdown
690 // histogram as soon as its upper bound is hit. That way, a value will
691 // be recorded even if an infinite number of BLOCK_SHUTDOWN tasks are
692 // posted, preventing shutdown to complete.
693 RecordNumBlockShutdownTasksPostedDuringShutdown(
694 num_block_shutdown_tasks_posted_during_shutdown_);
695 }
696 }
697
698 return true;
699 }
700
701 // A non BLOCK_SHUTDOWN task is allowed to be posted iff shutdown hasn't
702 // started.
703 return !state_->HasShutdownStarted();
704 }
705
BeforeRunTask(TaskShutdownBehavior shutdown_behavior)706 bool TaskTracker::BeforeRunTask(TaskShutdownBehavior shutdown_behavior) {
707 switch (shutdown_behavior) {
708 case TaskShutdownBehavior::BLOCK_SHUTDOWN: {
709 // The number of tasks blocking shutdown has been incremented when the
710 // task was posted.
711 DCHECK(state_->AreTasksBlockingShutdown());
712
713 // Trying to run a BLOCK_SHUTDOWN task after shutdown has completed is
714 // unexpected as it either shouldn't have been posted if shutdown
715 // completed or should be blocking shutdown if it was posted before it
716 // did.
717 DCHECK(!state_->HasShutdownStarted() || !IsShutdownComplete());
718
719 return true;
720 }
721
722 case TaskShutdownBehavior::SKIP_ON_SHUTDOWN: {
723 // SKIP_ON_SHUTDOWN tasks block shutdown while they are running.
724 const bool shutdown_started = state_->IncrementNumTasksBlockingShutdown();
725
726 if (shutdown_started) {
727 // The SKIP_ON_SHUTDOWN task isn't allowed to run during shutdown.
728 // Decrement the number of tasks blocking shutdown that was wrongly
729 // incremented.
730 const bool shutdown_started_and_no_tasks_block_shutdown =
731 state_->DecrementNumTasksBlockingShutdown();
732 if (shutdown_started_and_no_tasks_block_shutdown)
733 OnBlockingShutdownTasksComplete();
734
735 return false;
736 }
737
738 return true;
739 }
740
741 case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN: {
742 return !state_->HasShutdownStarted();
743 }
744 }
745
746 NOTREACHED();
747 return false;
748 }
749
AfterRunTask(TaskShutdownBehavior shutdown_behavior)750 void TaskTracker::AfterRunTask(TaskShutdownBehavior shutdown_behavior) {
751 if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN ||
752 shutdown_behavior == TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {
753 const bool shutdown_started_and_no_tasks_block_shutdown =
754 state_->DecrementNumTasksBlockingShutdown();
755 if (shutdown_started_and_no_tasks_block_shutdown)
756 OnBlockingShutdownTasksComplete();
757 }
758 }
759
OnBlockingShutdownTasksComplete()760 void TaskTracker::OnBlockingShutdownTasksComplete() {
761 AutoSchedulerLock auto_lock(shutdown_lock_);
762
763 // This method can only be called after shutdown has started.
764 DCHECK(state_->HasShutdownStarted());
765 DCHECK(shutdown_event_);
766
767 shutdown_event_->Signal();
768 }
769
DecrementNumIncompleteUndelayedTasks()770 void TaskTracker::DecrementNumIncompleteUndelayedTasks() {
771 const auto new_num_incomplete_undelayed_tasks =
772 subtle::Barrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, -1);
773 DCHECK_GE(new_num_incomplete_undelayed_tasks, 0);
774 if (new_num_incomplete_undelayed_tasks == 0) {
775 {
776 AutoSchedulerLock auto_lock(flush_lock_);
777 flush_cv_->Signal();
778 }
779 CallFlushCallbackForTesting();
780 }
781 }
782
ManageBackgroundSequencesAfterRunningTask(scoped_refptr<Sequence> just_ran_sequence,CanScheduleSequenceObserver * observer)783 scoped_refptr<Sequence> TaskTracker::ManageBackgroundSequencesAfterRunningTask(
784 scoped_refptr<Sequence> just_ran_sequence,
785 CanScheduleSequenceObserver* observer) {
786 const TimeTicks next_task_sequenced_time =
787 just_ran_sequence
788 ? just_ran_sequence->GetSortKey().next_task_sequenced_time()
789 : TimeTicks();
790 PreemptedBackgroundSequence sequence_to_schedule;
791
792 {
793 AutoSchedulerLock auto_lock(background_lock_);
794
795 DCHECK(preempted_background_sequences_.empty() ||
796 num_scheduled_background_sequences_ ==
797 max_num_scheduled_background_sequences_);
798 --num_scheduled_background_sequences_;
799
800 if (just_ran_sequence) {
801 if (preempted_background_sequences_.empty() ||
802 preempted_background_sequences_.top().next_task_sequenced_time >
803 next_task_sequenced_time) {
804 ++num_scheduled_background_sequences_;
805 return just_ran_sequence;
806 }
807
808 preempted_background_sequences_.emplace(
809 std::move(just_ran_sequence), next_task_sequenced_time, observer);
810 }
811
812 if (!preempted_background_sequences_.empty()) {
813 sequence_to_schedule =
814 GetPreemptedBackgroundSequenceToScheduleLockRequired();
815 }
816 }
817
818 // |sequence_to_schedule.sequence| may be null if there was no preempted
819 // background sequence.
820 if (sequence_to_schedule.sequence)
821 SchedulePreemptedBackgroundSequence(std::move(sequence_to_schedule));
822
823 return nullptr;
824 }
825
CallFlushCallbackForTesting()826 void TaskTracker::CallFlushCallbackForTesting() {
827 OnceClosure flush_callback;
828 {
829 AutoSchedulerLock auto_lock(flush_lock_);
830 flush_callback = std::move(flush_callback_for_testing_);
831 }
832 if (flush_callback)
833 std::move(flush_callback).Run();
834 }
835
836 } // namespace internal
837 } // namespace base
838