• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
6 
7 #include <algorithm>
8 #include <atomic>
9 #include <utility>
10 
11 #include "base/auto_reset.h"
12 #include "base/feature_list.h"
13 #include "base/logging.h"
14 #include "base/memory/ptr_util.h"
15 #include "base/memory/raw_ref.h"
16 #include "base/message_loop/message_pump.h"
17 #include "base/metrics/histogram.h"
18 #include "base/metrics/histogram_macros.h"
19 #include "base/task/sequence_manager/tasks.h"
20 #include "base/task/task_features.h"
21 #include "base/threading/hang_watcher.h"
22 #include "base/time/tick_clock.h"
23 #include "base/time/time.h"
24 #include "base/trace_event/base_tracing.h"
25 #include "build/build_config.h"
26 #include "third_party/abseil-cpp/absl/types/optional.h"
27 
28 #if BUILDFLAG(IS_IOS)
29 #include "base/message_loop/message_pump_apple.h"
30 #elif BUILDFLAG(IS_ANDROID)
31 #include "base/message_loop/message_pump_android.h"
32 #endif
33 
34 namespace base {
35 namespace sequence_manager {
36 namespace internal {
37 namespace {
38 
39 // Returns |next_run_time| capped at 1 day from |lazy_now|. This is used to
40 // mitigate https://crbug.com/850450 where some platforms are unhappy with
41 // delays > 100,000,000 seconds. In practice, a diagnosis metric showed that no
42 // sleep > 1 hour ever completes (always interrupted by an earlier MessageLoop
43 // event) and 99% of completed sleeps are the ones scheduled for <= 1 second.
44 // Details @ https://crrev.com/c/1142589.
CapAtOneDay(TimeTicks next_run_time,LazyNow * lazy_now)45 TimeTicks CapAtOneDay(TimeTicks next_run_time, LazyNow* lazy_now) {
46   return std::min(next_run_time, lazy_now->Now() + Days(1));
47 }
48 
49 // Feature to run tasks by batches before pumping out messages.
50 BASE_FEATURE(kRunTasksByBatches,
51              "RunTasksByBatches",
52              base::FEATURE_DISABLED_BY_DEFAULT);
53 
54 BASE_FEATURE(kAvoidScheduleWorkDuringNativeEventProcessing,
55              "AvoidScheduleWorkDuringNativeEventProcessing",
56              base::FEATURE_DISABLED_BY_DEFAULT);
57 
58 #if BUILDFLAG(IS_WIN)
59 // If enabled, deactivate the high resolution timer immediately in DoWork(),
60 // instead of waiting for next DoIdleWork.
61 BASE_FEATURE(kUseLessHighResTimers,
62              "UseLessHighResTimers",
63              base::FEATURE_ENABLED_BY_DEFAULT);
64 std::atomic_bool g_use_less_high_res_timers = true;
65 #endif
66 
67 std::atomic_bool g_run_tasks_by_batches = false;
68 std::atomic_bool g_avoid_schedule_calls_during_native_event_processing = false;
69 
GetLeewayForWakeUp(absl::optional<WakeUp> wake_up)70 base::TimeDelta GetLeewayForWakeUp(absl::optional<WakeUp> wake_up) {
71   if (!wake_up || wake_up->delay_policy == subtle::DelayPolicy::kPrecise) {
72     return TimeDelta();
73   }
74   return wake_up->leeway;
75 }
76 
77 }  // namespace
78 
79 // static
InitializeFeatures()80 void ThreadControllerWithMessagePumpImpl::InitializeFeatures() {
81   g_run_tasks_by_batches.store(FeatureList::IsEnabled(kRunTasksByBatches),
82                                std::memory_order_relaxed);
83   g_avoid_schedule_calls_during_native_event_processing.store(
84       FeatureList::IsEnabled(kAvoidScheduleWorkDuringNativeEventProcessing),
85       std::memory_order_relaxed);
86 #if BUILDFLAG(IS_WIN)
87   g_use_less_high_res_timers.store(
88       FeatureList::IsEnabled(kUseLessHighResTimers), std::memory_order_relaxed);
89 #endif
90 }
91 
92 // static
ResetFeatures()93 void ThreadControllerWithMessagePumpImpl::ResetFeatures() {
94   g_run_tasks_by_batches.store(
95       kRunTasksByBatches.default_state == FEATURE_ENABLED_BY_DEFAULT,
96       std::memory_order_relaxed);
97 }
98 
ThreadControllerWithMessagePumpImpl(const SequenceManager::Settings & settings)99 ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
100     const SequenceManager::Settings& settings)
101     : ThreadController(settings.clock),
102       work_deduplicator_(associated_thread_),
103       can_run_tasks_by_batches_(settings.can_run_tasks_by_batches) {}
104 
ThreadControllerWithMessagePumpImpl(std::unique_ptr<MessagePump> message_pump,const SequenceManager::Settings & settings)105 ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
106     std::unique_ptr<MessagePump> message_pump,
107     const SequenceManager::Settings& settings)
108     : ThreadControllerWithMessagePumpImpl(settings) {
109   BindToCurrentThread(std::move(message_pump));
110 }
111 
~ThreadControllerWithMessagePumpImpl()112 ThreadControllerWithMessagePumpImpl::~ThreadControllerWithMessagePumpImpl() {
113   // Destructors of MessagePump::Delegate and
114   // SingleThreadTaskRunner::CurrentDefaultHandle will do all the clean-up.
115   // ScopedSetSequenceLocalStorageMapForCurrentThread destructor will
116   // de-register the current thread as a sequence.
117 
118 #if BUILDFLAG(IS_WIN)
119   if (main_thread_only().in_high_res_mode) {
120     main_thread_only().in_high_res_mode = false;
121     Time::ActivateHighResolutionTimer(false);
122   }
123 #endif
124 }
125 
126 // static
127 std::unique_ptr<ThreadControllerWithMessagePumpImpl>
CreateUnbound(const SequenceManager::Settings & settings)128 ThreadControllerWithMessagePumpImpl::CreateUnbound(
129     const SequenceManager::Settings& settings) {
130   return base::WrapUnique(new ThreadControllerWithMessagePumpImpl(settings));
131 }
132 
133 ThreadControllerWithMessagePumpImpl::MainThreadOnly::MainThreadOnly() = default;
134 
135 ThreadControllerWithMessagePumpImpl::MainThreadOnly::~MainThreadOnly() =
136     default;
137 
SetSequencedTaskSource(SequencedTaskSource * task_source)138 void ThreadControllerWithMessagePumpImpl::SetSequencedTaskSource(
139     SequencedTaskSource* task_source) {
140   DCHECK(task_source);
141   DCHECK(!main_thread_only().task_source);
142   main_thread_only().task_source = task_source;
143 }
144 
BindToCurrentThread(std::unique_ptr<MessagePump> message_pump)145 void ThreadControllerWithMessagePumpImpl::BindToCurrentThread(
146     std::unique_ptr<MessagePump> message_pump) {
147   associated_thread_->BindToCurrentThread();
148   pump_ = std::move(message_pump);
149   work_id_provider_ = WorkIdProvider::GetForCurrentThread();
150   RunLoop::RegisterDelegateForCurrentThread(this);
151   scoped_set_sequence_local_storage_map_for_current_thread_ = std::make_unique<
152       base::internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
153       &sequence_local_storage_map_);
154   {
155     base::internal::CheckedAutoLock task_runner_lock(task_runner_lock_);
156     if (task_runner_)
157       InitializeSingleThreadTaskRunnerCurrentDefaultHandle();
158   }
159   if (work_deduplicator_.BindToCurrentThread() ==
160       ShouldScheduleWork::kScheduleImmediate) {
161     pump_->ScheduleWork();
162   }
163 }
164 
SetWorkBatchSize(int work_batch_size)165 void ThreadControllerWithMessagePumpImpl::SetWorkBatchSize(
166     int work_batch_size) {
167   DCHECK_GE(work_batch_size, 1);
168   CHECK(main_thread_only().can_change_batch_size);
169   main_thread_only().work_batch_size = work_batch_size;
170 }
171 
WillQueueTask(PendingTask * pending_task)172 void ThreadControllerWithMessagePumpImpl::WillQueueTask(
173     PendingTask* pending_task) {
174   task_annotator_.WillQueueTask("SequenceManager PostTask", pending_task);
175 }
176 
ScheduleWork()177 void ThreadControllerWithMessagePumpImpl::ScheduleWork() {
178   base::internal::CheckedLock::AssertNoLockHeldOnCurrentThread();
179   if (work_deduplicator_.OnWorkRequested() ==
180       ShouldScheduleWork::kScheduleImmediate) {
181     if (!associated_thread_->IsBoundToCurrentThread()) {
182       run_level_tracker_.RecordScheduleWork();
183     } else {
184       TRACE_EVENT_INSTANT("wakeup.flow", "ScheduleWorkToSelf");
185     }
186     pump_->ScheduleWork();
187   }
188 }
BeginNativeWorkBeforeDoWork()189 void ThreadControllerWithMessagePumpImpl::BeginNativeWorkBeforeDoWork() {
190   if (!g_avoid_schedule_calls_during_native_event_processing.load(
191           std::memory_order_relaxed)) {
192     return;
193   }
194   in_native_work_batch_ = true;
195 
196   // Reuse the deduplicator facility to indicate that there is no need for
197   // ScheduleWork() until the next time we look for work.
198   work_deduplicator_.OnWorkStarted();
199 }
200 
SetNextDelayedDoWork(LazyNow * lazy_now,absl::optional<WakeUp> wake_up)201 void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork(
202     LazyNow* lazy_now,
203     absl::optional<WakeUp> wake_up) {
204   DCHECK(!wake_up || !wake_up->is_immediate());
205   // It's very rare for PostDelayedTask to be called outside of a DoWork in
206   // production, so most of the time this does nothing.
207   if (work_deduplicator_.OnDelayedWorkRequested() !=
208       ShouldScheduleWork::kScheduleImmediate) {
209     return;
210   }
211   TimeTicks run_time =
212       wake_up.has_value()
213           ? pump_->AdjustDelayedRunTime(wake_up->earliest_time(), wake_up->time,
214                                         wake_up->latest_time())
215           : TimeTicks::Max();
216   DCHECK_LT(lazy_now->Now(), run_time);
217 
218   if (!run_time.is_max()) {
219     run_time = CapAtOneDay(run_time, lazy_now);
220   }
221   // |pump_| can't be null as all postTasks are cross-thread before binding,
222   // and delayed cross-thread postTasks do the thread hop through an immediate
223   // task.
224   pump_->ScheduleDelayedWork(
225       {run_time, GetLeewayForWakeUp(wake_up), lazy_now->Now()});
226 }
227 
RunsTasksInCurrentSequence()228 bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
229   return associated_thread_->IsBoundToCurrentThread();
230 }
231 
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)232 void ThreadControllerWithMessagePumpImpl::SetDefaultTaskRunner(
233     scoped_refptr<SingleThreadTaskRunner> task_runner) {
234   base::internal::CheckedAutoLock lock(task_runner_lock_);
235   task_runner_ = task_runner;
236   if (associated_thread_->IsBound()) {
237     DCHECK(associated_thread_->IsBoundToCurrentThread());
238     // Thread task runner handle will be created in BindToCurrentThread().
239     InitializeSingleThreadTaskRunnerCurrentDefaultHandle();
240   }
241 }
242 
243 void ThreadControllerWithMessagePumpImpl::
InitializeSingleThreadTaskRunnerCurrentDefaultHandle()244     InitializeSingleThreadTaskRunnerCurrentDefaultHandle() {
245   // Only one SingleThreadTaskRunner::CurrentDefaultHandle can exist at any
246   // time, so reset the old one.
247   main_thread_only().thread_task_runner_handle.reset();
248   main_thread_only().thread_task_runner_handle =
249       std::make_unique<SingleThreadTaskRunner::CurrentDefaultHandle>(
250           task_runner_);
251   // When the task runner is known, bind the power manager. Power notifications
252   // are received through that sequence.
253   power_monitor_.BindToCurrentThread();
254 }
255 
256 scoped_refptr<SingleThreadTaskRunner>
GetDefaultTaskRunner()257 ThreadControllerWithMessagePumpImpl::GetDefaultTaskRunner() {
258   base::internal::CheckedAutoLock lock(task_runner_lock_);
259   return task_runner_;
260 }
261 
RestoreDefaultTaskRunner()262 void ThreadControllerWithMessagePumpImpl::RestoreDefaultTaskRunner() {
263   // There is no default task runner (as opposed to ThreadControllerImpl).
264 }
265 
AddNestingObserver(RunLoop::NestingObserver * observer)266 void ThreadControllerWithMessagePumpImpl::AddNestingObserver(
267     RunLoop::NestingObserver* observer) {
268   DCHECK(!main_thread_only().nesting_observer);
269   DCHECK(observer);
270   main_thread_only().nesting_observer = observer;
271   RunLoop::AddNestingObserverOnCurrentThread(this);
272 }
273 
RemoveNestingObserver(RunLoop::NestingObserver * observer)274 void ThreadControllerWithMessagePumpImpl::RemoveNestingObserver(
275     RunLoop::NestingObserver* observer) {
276   DCHECK_EQ(main_thread_only().nesting_observer, observer);
277   main_thread_only().nesting_observer = nullptr;
278   RunLoop::RemoveNestingObserverOnCurrentThread(this);
279 }
280 
OnBeginWorkItem()281 void ThreadControllerWithMessagePumpImpl::OnBeginWorkItem() {
282   LazyNow lazy_now(time_source_);
283   OnBeginWorkItemImpl(lazy_now);
284 }
285 
OnBeginWorkItemImpl(LazyNow & lazy_now)286 void ThreadControllerWithMessagePumpImpl::OnBeginWorkItemImpl(
287     LazyNow& lazy_now) {
288   hang_watch_scope_.emplace();
289   work_id_provider_->IncrementWorkId();
290   run_level_tracker_.OnWorkStarted(lazy_now);
291 }
292 
OnEndWorkItem(int run_level_depth)293 void ThreadControllerWithMessagePumpImpl::OnEndWorkItem(int run_level_depth) {
294   LazyNow lazy_now(time_source_);
295   OnEndWorkItemImpl(lazy_now, run_level_depth);
296 }
297 
OnEndWorkItemImpl(LazyNow & lazy_now,int run_level_depth)298 void ThreadControllerWithMessagePumpImpl::OnEndWorkItemImpl(
299     LazyNow& lazy_now,
300     int run_level_depth) {
301   // Work completed, begin a new hang watch until the next task (watching the
302   // pump's overhead).
303   hang_watch_scope_.emplace();
304   work_id_provider_->IncrementWorkId();
305   run_level_tracker_.OnWorkEnded(lazy_now, run_level_depth);
306 }
307 
BeforeWait()308 void ThreadControllerWithMessagePumpImpl::BeforeWait() {
309   // DoWork is guaranteed to be called after native work batches and before
310   // wait.
311   CHECK(!in_native_work_batch_);
312 
313   // In most cases, DoIdleWork() will already have cleared the
314   // `hang_watch_scope_` but in some cases where the native side of the
315   // MessagePump impl is instrumented, it's possible to get a BeforeWait()
316   // outside of a DoWork cycle (e.g. message_pump_win.cc :
317   // MessagePumpForUI::HandleWorkMessage).
318   hang_watch_scope_.reset();
319 
320   work_id_provider_->IncrementWorkId();
321   LazyNow lazy_now(time_source_);
322   run_level_tracker_.OnIdle(lazy_now);
323 }
324 
325 MessagePump::Delegate::NextWorkInfo
DoWork()326 ThreadControllerWithMessagePumpImpl::DoWork() {
327   in_native_work_batch_ = false;
328 
329 #if BUILDFLAG(IS_WIN)
330   // We've been already in a wakeup here. Deactivate the high res timer of OS
331   // immediately instead of waiting for next DoIdleWork().
332   if (g_use_less_high_res_timers.load(std::memory_order_relaxed) &&
333       main_thread_only().in_high_res_mode) {
334     main_thread_only().in_high_res_mode = false;
335     Time::ActivateHighResolutionTimer(false);
336   }
337 #endif
338   MessagePump::Delegate::NextWorkInfo next_work_info{};
339 
340   work_deduplicator_.OnWorkStarted();
341   LazyNow continuation_lazy_now(time_source_);
342   absl::optional<WakeUp> next_wake_up = DoWorkImpl(&continuation_lazy_now);
343 
344   // If we are yielding after DoWorkImpl (a work batch) set the flag boolean.
345   // This will inform the MessagePump to schedule a new continuation based on
346   // the information below, but even if its immediate let the native sequence
347   // have a chance to run.
348   // When we have |g_run_tasks_by_batches| active we want to always set the flag
349   // to true to have a similar behavior on Android as on the desktop platforms
350   // for this experiment.
351   if (RunsTasksByBatches() ||
352       (!main_thread_only().yield_to_native_after_batch.is_null() &&
353        continuation_lazy_now.Now() <
354            main_thread_only().yield_to_native_after_batch)) {
355     next_work_info.yield_to_native = true;
356   }
357   // Schedule a continuation.
358   WorkDeduplicator::NextTask next_task =
359       (next_wake_up && next_wake_up->is_immediate())
360           ? WorkDeduplicator::NextTask::kIsImmediate
361           : WorkDeduplicator::NextTask::kIsDelayed;
362   if (work_deduplicator_.DidCheckForMoreWork(next_task) ==
363       ShouldScheduleWork::kScheduleImmediate) {
364     // Need to run new work immediately, but due to the contract of DoWork
365     // we only need to return a null TimeTicks to ensure that happens.
366     return next_work_info;
367   }
368 
369   // Special-casing here avoids unnecessarily sampling Now() when out of work.
370   if (!next_wake_up) {
371     next_work_info.delayed_run_time = TimeTicks::Max();
372     return next_work_info;
373   }
374 
375   // The MessagePump will schedule the wake up on our behalf, so we need to
376   // update |next_work_info.delayed_run_time|.
377   TimeTicks next_delayed_do_work = pump_->AdjustDelayedRunTime(
378       next_wake_up->earliest_time(), next_wake_up->time,
379       next_wake_up->latest_time());
380 
381   // Don't request a run time past |main_thread_only().quit_runloop_after|.
382   if (next_delayed_do_work > main_thread_only().quit_runloop_after) {
383     next_delayed_do_work = main_thread_only().quit_runloop_after;
384     // If we've passed |quit_runloop_after| there's no more work to do.
385     if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after) {
386       next_work_info.delayed_run_time = TimeTicks::Max();
387       return next_work_info;
388     }
389   }
390 
391   next_work_info.delayed_run_time =
392       CapAtOneDay(next_delayed_do_work, &continuation_lazy_now);
393   next_work_info.leeway = GetLeewayForWakeUp(next_wake_up);
394   next_work_info.recent_now = continuation_lazy_now.Now();
395   return next_work_info;
396 }
397 
DoWorkImpl(LazyNow * continuation_lazy_now)398 absl::optional<WakeUp> ThreadControllerWithMessagePumpImpl::DoWorkImpl(
399     LazyNow* continuation_lazy_now) {
400   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
401                "ThreadControllerImpl::DoWork");
402 
403   if (!main_thread_only().task_execution_allowed) {
404     // Broadcast in a trace event that application tasks were disallowed. This
405     // helps spot nested loops that intentionally starve application tasks.
406     TRACE_EVENT0("base", "ThreadController: application tasks disallowed");
407     if (main_thread_only().quit_runloop_after == TimeTicks::Max())
408       return absl::nullopt;
409     return WakeUp{main_thread_only().quit_runloop_after};
410   }
411 
412   DCHECK(main_thread_only().task_source);
413 
414   // Keep running tasks for up to 8ms before yielding to the pump when tasks are
415   // run by batches.
416   const base::TimeDelta batch_duration =
417       RunsTasksByBatches() ? base::Milliseconds(8) : base::Milliseconds(0);
418 
419   const absl::optional<base::TimeTicks> start_time =
420       batch_duration.is_zero()
421           ? absl::nullopt
422           : absl::optional<base::TimeTicks>(time_source_->NowTicks());
423   absl::optional<base::TimeTicks> recent_time = start_time;
424 
425   // Loops for |batch_duration|, or |work_batch_size| times if |batch_duration|
426   // is zero.
427   for (int num_tasks_executed = 0;
428        (!batch_duration.is_zero() &&
429         (recent_time.value() - start_time.value()) < batch_duration) ||
430        (batch_duration.is_zero() &&
431         num_tasks_executed < main_thread_only().work_batch_size);
432        ++num_tasks_executed) {
433     LazyNow lazy_now_select_task(recent_time, time_source_);
434     // Include SelectNextTask() in the scope of the work item. This ensures
435     // it's covered in tracing and hang reports. This is particularly
436     // important when SelectNextTask() finds no work immediately after a
437     // wakeup, otherwise the power-inefficient wakeup is invisible in
438     // tracing. OnApplicationTaskSelected() assumes this ordering as well.
439     OnBeginWorkItemImpl(lazy_now_select_task);
440     int run_depth = static_cast<int>(run_level_tracker_.num_run_levels());
441 
442     const SequencedTaskSource::SelectTaskOption select_task_option =
443         power_monitor_.IsProcessInPowerSuspendState()
444             ? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
445             : SequencedTaskSource::SelectTaskOption::kDefault;
446     absl::optional<SequencedTaskSource::SelectedTask> selected_task =
447         main_thread_only().task_source->SelectNextTask(lazy_now_select_task,
448                                                        select_task_option);
449     LazyNow lazy_now_task_selected(time_source_);
450     run_level_tracker_.OnApplicationTaskSelected(
451         (selected_task && selected_task->task.delayed_run_time.is_null())
452             ? selected_task->task.queue_time
453             : TimeTicks(),
454         lazy_now_task_selected);
455     if (!selected_task) {
456       OnEndWorkItemImpl(lazy_now_task_selected, run_depth);
457       break;
458     }
459 
460     // Execute the task and assume the worst: it is probably not reentrant.
461     AutoReset<bool> ban_nested_application_tasks(
462         &main_thread_only().task_execution_allowed, false);
463 
464     // Trace-parsing tools (DevTools, Lighthouse, etc) consume this event to
465     // determine long tasks.
466     // See https://crbug.com/681863 and https://crbug.com/874982
467     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
468 
469     {
470       // Always track the start of the task, as this is low-overhead.
471       TaskAnnotator::LongTaskTracker long_task_tracker(
472           time_source_, selected_task->task, &task_annotator_);
473 
474       // Note: all arguments after task are just passed to a TRACE_EVENT for
475       // logging so lambda captures are safe as lambda is executed inline.
476       SequencedTaskSource* source = main_thread_only().task_source;
477       task_annotator_.RunTask(
478           "ThreadControllerImpl::RunTask", selected_task->task,
479           [&selected_task, &source](perfetto::EventContext& ctx) {
480             if (selected_task->task_execution_trace_logger) {
481               selected_task->task_execution_trace_logger.Run(
482                   ctx, selected_task->task);
483             }
484             source->MaybeEmitTaskDetails(ctx, selected_task.value());
485           });
486     }
487 
488     // Reset `selected_task` before the call to `DidRunTask()` below makes its
489     // `PendingTask` reference dangling.
490     selected_task.reset();
491 
492     LazyNow lazy_now_after_run_task(time_source_);
493     main_thread_only().task_source->DidRunTask(lazy_now_after_run_task);
494     // End the work item scope after DidRunTask() as it can process microtasks
495     // (which are extensions of the RunTask).
496     OnEndWorkItemImpl(lazy_now_after_run_task, run_depth);
497 
498     // If DidRunTask() read the clock (lazy_now_after_run_task.has_value()) or
499     // if |batch_duration| > 0, store the clock value in `recent_time` so it can
500     // be reused by SelectNextTask() at the next loop iteration.
501     if (lazy_now_after_run_task.has_value() || !batch_duration.is_zero()) {
502       recent_time = lazy_now_after_run_task.Now();
503     } else {
504       recent_time.reset();
505     }
506 
507     // When Quit() is called we must stop running the batch because the
508     // caller expects per-task granularity.
509     if (main_thread_only().quit_pending)
510       break;
511   }
512 
513   if (main_thread_only().quit_pending)
514     return absl::nullopt;
515 
516   work_deduplicator_.WillCheckForMoreWork();
517 
518   // Re-check the state of the power after running tasks. An executed task may
519   // have been a power change notification.
520   const SequencedTaskSource::SelectTaskOption select_task_option =
521       power_monitor_.IsProcessInPowerSuspendState()
522           ? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
523           : SequencedTaskSource::SelectTaskOption::kDefault;
524   return main_thread_only().task_source->GetPendingWakeUp(continuation_lazy_now,
525                                                           select_task_option);
526 }
527 
RunsTasksByBatches() const528 bool ThreadControllerWithMessagePumpImpl::RunsTasksByBatches() const {
529   return can_run_tasks_by_batches_ &&
530          g_run_tasks_by_batches.load(std::memory_order_relaxed);
531 }
532 
DoIdleWork()533 bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
534   struct OnIdle {
535     OnIdle(const TickClock* time_source, RunLevelTracker& run_level_tracker_ref)
536         : lazy_now(time_source), run_level_tracker(run_level_tracker_ref) {}
537 
538     // Very last step before going idle, must be fast as this is hidden from the
539     // DoIdleWork trace event below.
540     ~OnIdle() { run_level_tracker->OnIdle(lazy_now); }
541 
542     LazyNow lazy_now;
543 
544    private:
545     const raw_ref<RunLevelTracker> run_level_tracker;
546   };
547   absl::optional<OnIdle> on_idle;
548 
549   // Must be after `on_idle` as this trace event's scope must end before the END
550   // of the "ThreadController active" trace event emitted from
551   // `run_level_tracker_.OnIdle()`.
552   TRACE_EVENT0("sequence_manager", "SequenceManager::DoIdleWork");
553 
554 #if BUILDFLAG(IS_WIN)
555   if (!power_monitor_.IsProcessInPowerSuspendState()) {
556     // Avoid calling Time::ActivateHighResolutionTimer() between
557     // suspend/resume as the system hangs if we do (crbug.com/1074028).
558     // OnResume() will generate a task on this thread per the
559     // ThreadControllerPowerMonitor observer and DoIdleWork() will thus get
560     // another chance to set the right high-resolution-timer-state before
561     // going to sleep after resume.
562 
563     const bool need_high_res_mode =
564         main_thread_only().task_source->HasPendingHighResolutionTasks();
565     if (main_thread_only().in_high_res_mode != need_high_res_mode) {
566       // On Windows we activate the high resolution timer so that the wait
567       // _if_ triggered by the timer happens with good resolution. If we don't
568       // do this the default resolution is 15ms which might not be acceptable
569       // for some tasks.
570       main_thread_only().in_high_res_mode = need_high_res_mode;
571       Time::ActivateHighResolutionTimer(need_high_res_mode);
572     }
573   }
574 #endif  // BUILDFLAG(IS_WIN)
575 
576   if (main_thread_only().task_source->OnSystemIdle()) {
577     // The OnSystemIdle() callback resulted in more immediate work, so schedule
578     // a DoWork callback. For some message pumps returning true from here is
579     // sufficient to do that but not on mac.
580     pump_->ScheduleWork();
581     return false;
582   }
583 
584   // This is mostly redundant with the identical call in BeforeWait (upcoming)
585   // but some uninstrumented MessagePump impls don't call BeforeWait so it must
586   // also be done here.
587   hang_watch_scope_.reset();
588 
589   // All return paths below are truly idle.
590   on_idle.emplace(time_source_, run_level_tracker_);
591 
592   // Check if any runloop timeout has expired.
593   if (main_thread_only().quit_runloop_after != TimeTicks::Max() &&
594       main_thread_only().quit_runloop_after <= on_idle->lazy_now.Now()) {
595     Quit();
596     return false;
597   }
598 
599   // RunLoop::Delegate knows whether we called Run() or RunUntilIdle().
600   if (ShouldQuitWhenIdle())
601     Quit();
602 
603   return false;
604 }
605 
RunDepth()606 int ThreadControllerWithMessagePumpImpl::RunDepth() {
607   return static_cast<int>(run_level_tracker_.num_run_levels());
608 }
609 
Run(bool application_tasks_allowed,TimeDelta timeout)610 void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed,
611                                               TimeDelta timeout) {
612   DCHECK(RunsTasksInCurrentSequence());
613 
614   LazyNow lazy_now_run_loop_start(time_source_);
615 
616   // RunLoops can be nested so we need to restore the previous value of
617   // |quit_runloop_after| upon exit. NB we could use saturated arithmetic here
618   // but don't because we have some tests which assert the number of calls to
619   // Now.
620   AutoReset<TimeTicks> quit_runloop_after(
621       &main_thread_only().quit_runloop_after,
622       (timeout == TimeDelta::Max()) ? TimeTicks::Max()
623                                     : lazy_now_run_loop_start.Now() + timeout);
624 
625   run_level_tracker_.OnRunLoopStarted(RunLevelTracker::kInBetweenWorkItems,
626                                       lazy_now_run_loop_start);
627 
628   // Quit may have been called outside of a Run(), so |quit_pending| might be
629   // true here. We can't use InTopLevelDoWork() in Quit() as this call may be
630   // outside top-level DoWork but still in Run().
631   main_thread_only().quit_pending = false;
632   hang_watch_scope_.emplace();
633   if (application_tasks_allowed && !main_thread_only().task_execution_allowed) {
634     // Allow nested task execution as explicitly requested.
635     DCHECK(RunLoop::IsNestedOnCurrentThread());
636     main_thread_only().task_execution_allowed = true;
637     pump_->Run(this);
638     main_thread_only().task_execution_allowed = false;
639   } else {
640     pump_->Run(this);
641   }
642 
643   run_level_tracker_.OnRunLoopEnded();
644   main_thread_only().quit_pending = false;
645 
646   // If this was a nested loop, hang watch the remainder of the task which
647   // caused it. Otherwise, stop watching as we're no longer running.
648   if (RunLoop::IsNestedOnCurrentThread()) {
649     hang_watch_scope_.emplace();
650   } else {
651     hang_watch_scope_.reset();
652   }
653   work_id_provider_->IncrementWorkId();
654 }
655 
OnBeginNestedRunLoop()656 void ThreadControllerWithMessagePumpImpl::OnBeginNestedRunLoop() {
657   // We don't need to ScheduleWork here! That's because the call to pump_->Run()
658   // above, which is always called for RunLoop().Run(), guarantees a call to
659   // DoWork on all platforms.
660   if (main_thread_only().nesting_observer)
661     main_thread_only().nesting_observer->OnBeginNestedRunLoop();
662 }
663 
OnExitNestedRunLoop()664 void ThreadControllerWithMessagePumpImpl::OnExitNestedRunLoop() {
665   if (main_thread_only().nesting_observer)
666     main_thread_only().nesting_observer->OnExitNestedRunLoop();
667 }
668 
Quit()669 void ThreadControllerWithMessagePumpImpl::Quit() {
670   DCHECK(RunsTasksInCurrentSequence());
671   // Interrupt a batch of work.
672   main_thread_only().quit_pending = true;
673 
674   // If we're in a nested RunLoop, continuation will be posted if necessary.
675   pump_->Quit();
676 }
677 
EnsureWorkScheduled()678 void ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled() {
679   if (work_deduplicator_.OnWorkRequested() ==
680       ShouldScheduleWork::kScheduleImmediate) {
681     pump_->ScheduleWork();
682   }
683 }
684 
SetTaskExecutionAllowed(bool allowed)685 void ThreadControllerWithMessagePumpImpl::SetTaskExecutionAllowed(
686     bool allowed) {
687   if (allowed) {
688     // We need to schedule work unconditionally because we might be about to
689     // enter an OS level nested message loop. Unlike a RunLoop().Run() we don't
690     // get a call to DoWork on entering for free.
691     work_deduplicator_.OnWorkRequested();  // Set the pending DoWork flag.
692     pump_->ScheduleWork();
693   } else {
694     // We've (probably) just left an OS level nested message loop. Make sure a
695     // subsequent PostTask within the same Task doesn't ScheduleWork with the
696     // pump (this will be done anyway when the task exits).
697     work_deduplicator_.OnWorkStarted();
698   }
699   main_thread_only().task_execution_allowed = allowed;
700 }
701 
IsTaskExecutionAllowed() const702 bool ThreadControllerWithMessagePumpImpl::IsTaskExecutionAllowed() const {
703   return main_thread_only().task_execution_allowed;
704 }
705 
GetBoundMessagePump() const706 MessagePump* ThreadControllerWithMessagePumpImpl::GetBoundMessagePump() const {
707   return pump_.get();
708 }
709 
PrioritizeYieldingToNative(base::TimeTicks prioritize_until)710 void ThreadControllerWithMessagePumpImpl::PrioritizeYieldingToNative(
711     base::TimeTicks prioritize_until) {
712   main_thread_only().yield_to_native_after_batch = prioritize_until;
713 }
714 
715 #if BUILDFLAG(IS_IOS)
AttachToMessagePump()716 void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
717   static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Attach(this);
718 }
719 
DetachFromMessagePump()720 void ThreadControllerWithMessagePumpImpl::DetachFromMessagePump() {
721   static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Detach();
722 }
723 #elif BUILDFLAG(IS_ANDROID)
AttachToMessagePump()724 void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
725   CHECK(main_thread_only().work_batch_size == 1);
726   // Aborting the message pump currently relies on the batch size being 1.
727   main_thread_only().can_change_batch_size = false;
728   static_cast<MessagePumpForUI*>(pump_.get())->Attach(this);
729 }
730 #endif
731 
ShouldQuitRunLoopWhenIdle()732 bool ThreadControllerWithMessagePumpImpl::ShouldQuitRunLoopWhenIdle() {
733   if (run_level_tracker_.num_run_levels() == 0)
734     return false;
735   // It's only safe to call ShouldQuitWhenIdle() when in a RunLoop.
736   return ShouldQuitWhenIdle();
737 }
738 
739 }  // namespace internal
740 }  // namespace sequence_manager
741 }  // namespace base
742