• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/sequence_manager_impl.h"
6 
7 #include <atomic>
8 #include <queue>
9 #include <vector>
10 
11 #include "base/compiler_specific.h"
12 #include "base/debug/crash_logging.h"
13 #include "base/debug/stack_trace.h"
14 #include "base/functional/bind.h"
15 #include "base/functional/callback.h"
16 #include "base/functional/callback_helpers.h"
17 #include "base/json/json_writer.h"
18 #include "base/logging.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/notreached.h"
21 #include "base/observer_list.h"
22 #include "base/rand_util.h"
23 #include "base/ranges/algorithm.h"
24 #include "base/task/sequence_manager/enqueue_order.h"
25 #include "base/task/sequence_manager/task_queue_impl.h"
26 #include "base/task/sequence_manager/task_time_observer.h"
27 #include "base/task/sequence_manager/thread_controller_impl.h"
28 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
29 #include "base/task/sequence_manager/time_domain.h"
30 #include "base/task/sequence_manager/wake_up_queue.h"
31 #include "base/task/sequence_manager/work_queue.h"
32 #include "base/task/sequence_manager/work_queue_sets.h"
33 #include "base/task/task_features.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/default_tick_clock.h"
36 #include "base/time/tick_clock.h"
37 #include "base/trace_event/base_tracing.h"
38 #include "build/build_config.h"
39 #include "third_party/abseil-cpp/absl/base/attributes.h"
40 #include "third_party/abseil-cpp/absl/types/optional.h"
41 
42 namespace base {
43 namespace sequence_manager {
44 namespace {
45 
46 // Whether SequenceManagerImpl records crash keys. Enable via Finch when needed
47 // for an investigation. Disabled by default to avoid unnecessary overhead.
48 BASE_FEATURE(kRecordSequenceManagerCrashKeys,
49              "RecordSequenceManagerCrashKeys",
50              base::FEATURE_DISABLED_BY_DEFAULT);
51 
52 ABSL_CONST_INIT thread_local internal::SequenceManagerImpl*
53     thread_local_sequence_manager = nullptr;
54 
55 class TracedBaseValue : public trace_event::ConvertableToTraceFormat {
56  public:
TracedBaseValue(Value value)57   explicit TracedBaseValue(Value value) : value_(std::move(value)) {}
58   ~TracedBaseValue() override = default;
59 
AppendAsTraceFormat(std::string * out) const60   void AppendAsTraceFormat(std::string* out) const override {
61     if (!value_.is_none()) {
62       std::string tmp;
63       JSONWriter::Write(value_, &tmp);
64       *out += tmp;
65     } else {
66       *out += "{}";
67     }
68   }
69 
70  private:
71   base::Value value_;
72 };
73 
74 }  // namespace
75 
CreateSequenceManagerOnCurrentThread(SequenceManager::Settings settings)76 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread(
77     SequenceManager::Settings settings) {
78   return internal::SequenceManagerImpl::CreateOnCurrentThread(
79       std::move(settings));
80 }
81 
CreateSequenceManagerOnCurrentThreadWithPump(std::unique_ptr<MessagePump> message_pump,SequenceManager::Settings settings)82 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThreadWithPump(
83     std::unique_ptr<MessagePump> message_pump,
84     SequenceManager::Settings settings) {
85   std::unique_ptr<SequenceManager> manager =
86       internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
87   manager->BindToMessagePump(std::move(message_pump));
88   return manager;
89 }
90 
CreateUnboundSequenceManager(SequenceManager::Settings settings)91 std::unique_ptr<SequenceManager> CreateUnboundSequenceManager(
92     SequenceManager::Settings settings) {
93   return internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
94 }
95 
96 namespace internal {
97 
CreateUnboundSequenceManagerImpl(PassKey<base::internal::SequenceManagerThreadDelegate>,SequenceManager::Settings settings)98 std::unique_ptr<SequenceManagerImpl> CreateUnboundSequenceManagerImpl(
99     PassKey<base::internal::SequenceManagerThreadDelegate>,
100     SequenceManager::Settings settings) {
101   return SequenceManagerImpl::CreateUnbound(std::move(settings));
102 }
103 
104 using TimeRecordingPolicy =
105     base::sequence_manager::TaskQueue::TaskTiming::TimeRecordingPolicy;
106 
107 namespace {
108 
109 constexpr TimeDelta kLongTaskTraceEventThreshold = Milliseconds(50);
110 // Proportion of tasks which will record thread time for metrics.
111 const double kTaskSamplingRateForRecordingCPUTime = 0.01;
112 // Proprortion of SequenceManagers which will record thread time for each task,
113 // enabling advanced metrics.
114 const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
115 
ReclaimMemoryFromQueue(internal::TaskQueueImpl * queue,LazyNow * lazy_now)116 void ReclaimMemoryFromQueue(internal::TaskQueueImpl* queue, LazyNow* lazy_now) {
117   queue->ReclaimMemory(lazy_now->Now());
118   // If the queue was shut down as a side-effect of reclaiming memory, |queue|
119   // will still be valid but the work queues will have been removed by
120   // TaskQueueImpl::UnregisterTaskQueue.
121   if (queue->delayed_work_queue()) {
122     queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
123     queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
124   }
125 }
126 
InitializeMetricRecordingSettings(bool randomised_sampling_enabled)127 SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings(
128     bool randomised_sampling_enabled) {
129   if (!randomised_sampling_enabled)
130     return SequenceManager::MetricRecordingSettings(0);
131   bool records_cpu_time_for_each_task =
132       base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
133   return SequenceManager::MetricRecordingSettings(
134       records_cpu_time_for_each_task ? 1
135                                      : kTaskSamplingRateForRecordingCPUTime);
136 }
137 
138 // Writes |address| in hexadecimal ("0x11223344") form starting from |output|
139 // and moving backwards in memory. Returns a pointer to the first digit of the
140 // result. Does *not* NUL-terminate the number.
141 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
PrependHexAddress(char * output,const void * address)142 char* PrependHexAddress(char* output, const void* address) {
143   uintptr_t value = reinterpret_cast<uintptr_t>(address);
144   static const char kHexChars[] = "0123456789ABCDEF";
145   do {
146     *output-- = kHexChars[value % 16];
147     value /= 16;
148   } while (value);
149   *output-- = 'x';
150   *output = '0';
151   return output;
152 }
153 #endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
154 
155 // Atomic to avoid TSAN flags when a test  tries to access the value before the
156 // feature list is available.
157 std::atomic_bool g_record_crash_keys = false;
158 
159 #if BUILDFLAG(IS_WIN)
160 bool g_explicit_high_resolution_timer_win = true;
161 #endif  // BUILDFLAG(IS_WIN)
162 
163 }  // namespace
164 
165 // static
GetCurrent()166 SequenceManagerImpl* SequenceManagerImpl::GetCurrent() {
167   // Workaround false-positive MSAN use-of-uninitialized-value on
168   // thread_local storage for loaded libraries:
169   // https://github.com/google/sanitizers/issues/1265
170   MSAN_UNPOISON(&thread_local_sequence_manager, sizeof(SequenceManagerImpl*));
171 
172   return thread_local_sequence_manager;
173 }
174 
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller,SequenceManager::Settings settings)175 SequenceManagerImpl::SequenceManagerImpl(
176     std::unique_ptr<internal::ThreadController> controller,
177     SequenceManager::Settings settings)
178     : associated_thread_(controller->GetAssociatedThread()),
179       controller_(std::move(controller)),
180       settings_(std::move(settings)),
181       metric_recording_settings_(InitializeMetricRecordingSettings(
182           settings_.randomised_sampling_enabled)),
183       add_queue_time_to_tasks_(settings_.add_queue_time_to_tasks),
184 
185       empty_queues_to_reload_(associated_thread_),
186       main_thread_only_(this, associated_thread_, settings_, settings_.clock),
187       clock_(settings_.clock) {
188   TRACE_EVENT_OBJECT_CREATED_WITH_ID(
189       TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
190   main_thread_only().selector.SetTaskQueueSelectorObserver(this);
191 
192   main_thread_only().next_time_to_reclaim_memory =
193       main_thread_clock()->NowTicks() + kReclaimMemoryInterval;
194 
195   controller_->SetSequencedTaskSource(this);
196 }
197 
~SequenceManagerImpl()198 SequenceManagerImpl::~SequenceManagerImpl() {
199   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
200   TRACE_EVENT_OBJECT_DELETED_WITH_ID(
201       TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
202 
203 #if BUILDFLAG(IS_IOS)
204   if (settings_.message_loop_type == MessagePumpType::UI &&
205       associated_thread_->IsBound()) {
206     controller_->DetachFromMessagePump();
207   }
208 #endif
209 
210   // Make sure no Task is running as given that RunLoop does not support the
211   // Delegate being destroyed from a Task and
212   // ThreadControllerWithMessagePumpImpl does not support being destroyed from a
213   // Task. If we are using a ThreadControllerImpl (i.e. no pump) destruction is
214   // fine
215   DCHECK(!controller_->GetBoundMessagePump() ||
216          main_thread_only().task_execution_stack.empty());
217 
218   for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
219     main_thread_only().selector.RemoveQueue(queue);
220     queue->UnregisterTaskQueue();
221   }
222 
223   // TODO(altimin): restore default task runner automatically when
224   // ThreadController is destroyed.
225   controller_->RestoreDefaultTaskRunner();
226 
227   main_thread_only().active_queues.clear();
228   main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
229 
230   // In the case of an early startup exits or in some tests a NestingObserver
231   // may not have been registered.
232   if (main_thread_only().nesting_observer_registered_)
233     controller_->RemoveNestingObserver(this);
234 
235   // Let interested parties have one last shot at accessing this.
236   for (auto& observer : main_thread_only().destruction_observers)
237     observer.WillDestroyCurrentMessageLoop();
238 
239   // OK, now make it so that no one can find us.
240   if (GetMessagePump()) {
241     DCHECK_EQ(this, GetCurrent());
242     thread_local_sequence_manager = nullptr;
243   }
244 }
245 
MainThreadOnly(SequenceManagerImpl * sequence_manager,const scoped_refptr<AssociatedThreadId> & associated_thread,const SequenceManager::Settings & settings,const base::TickClock * clock)246 SequenceManagerImpl::MainThreadOnly::MainThreadOnly(
247     SequenceManagerImpl* sequence_manager,
248     const scoped_refptr<AssociatedThreadId>& associated_thread,
249     const SequenceManager::Settings& settings,
250     const base::TickClock* clock)
251     : selector(associated_thread, settings),
252       default_clock(clock),
253       time_domain(nullptr),
254       wake_up_queue(std::make_unique<DefaultWakeUpQueue>(associated_thread,
255                                                          sequence_manager)),
256       non_waking_wake_up_queue(
257           std::make_unique<NonWakingWakeUpQueue>(associated_thread)) {
258   if (settings.randomised_sampling_enabled) {
259     metrics_subsampler = base::MetricsSubSampler();
260   }
261 }
262 
263 SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
264 
265 // static
266 std::unique_ptr<ThreadControllerImpl>
CreateThreadControllerImplForCurrentThread(const TickClock * clock)267 SequenceManagerImpl::CreateThreadControllerImplForCurrentThread(
268     const TickClock* clock) {
269   return ThreadControllerImpl::Create(GetCurrent(), clock);
270 }
271 
272 // static
CreateOnCurrentThread(SequenceManager::Settings settings)273 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateOnCurrentThread(
274     SequenceManager::Settings settings) {
275   auto thread_controller =
276       CreateThreadControllerImplForCurrentThread(settings.clock);
277   std::unique_ptr<SequenceManagerImpl> manager(new SequenceManagerImpl(
278       std::move(thread_controller), std::move(settings)));
279   manager->BindToCurrentThread();
280   return manager;
281 }
282 
283 // static
CreateUnbound(SequenceManager::Settings settings)284 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateUnbound(
285     SequenceManager::Settings settings) {
286   auto thread_controller =
287       ThreadControllerWithMessagePumpImpl::CreateUnbound(settings);
288   return WrapUnique(new SequenceManagerImpl(std::move(thread_controller),
289                                             std::move(settings)));
290 }
291 
292 // static
InitializeFeatures()293 void SequenceManagerImpl::InitializeFeatures() {
294   base::InitializeTaskLeeway();
295   TaskQueueImpl::InitializeFeatures();
296   MessagePump::InitializeFeatures();
297   ThreadControllerWithMessagePumpImpl::InitializeFeatures();
298 #if BUILDFLAG(IS_WIN)
299   g_explicit_high_resolution_timer_win =
300       FeatureList::IsEnabled(kExplicitHighResolutionTimerWin);
301 #endif  // BUILDFLAG(IS_WIN)
302 
303   g_record_crash_keys.store(
304       FeatureList::IsEnabled(kRecordSequenceManagerCrashKeys),
305       std::memory_order_relaxed);
306   TaskQueueSelector::InitializeFeatures();
307 }
308 
BindToMessagePump(std::unique_ptr<MessagePump> pump)309 void SequenceManagerImpl::BindToMessagePump(std::unique_ptr<MessagePump> pump) {
310   controller_->BindToCurrentThread(std::move(pump));
311   CompleteInitializationOnBoundThread();
312 
313   // On Android attach to the native loop when there is one.
314 #if BUILDFLAG(IS_ANDROID)
315   if (settings_.message_loop_type == MessagePumpType::UI ||
316       settings_.message_loop_type == MessagePumpType::JAVA) {
317     controller_->AttachToMessagePump();
318   }
319 #endif
320 
321   // On iOS attach to the native loop when there is one.
322 #if BUILDFLAG(IS_IOS)
323   if (settings_.message_loop_type == MessagePumpType::UI) {
324     controller_->AttachToMessagePump();
325   }
326 #endif
327 }
328 
BindToCurrentThread()329 void SequenceManagerImpl::BindToCurrentThread() {
330   associated_thread_->BindToCurrentThread();
331   CompleteInitializationOnBoundThread();
332 }
333 
334 scoped_refptr<SequencedTaskRunner>
GetTaskRunnerForCurrentTask()335 SequenceManagerImpl::GetTaskRunnerForCurrentTask() {
336   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
337   if (main_thread_only().task_execution_stack.empty())
338     return nullptr;
339   return main_thread_only()
340       .task_execution_stack.back()
341       .pending_task.task_runner;
342 }
343 
CompleteInitializationOnBoundThread()344 void SequenceManagerImpl::CompleteInitializationOnBoundThread() {
345   controller_->AddNestingObserver(this);
346   main_thread_only().nesting_observer_registered_ = true;
347   if (GetMessagePump()) {
348     DCHECK(!GetCurrent())
349         << "Can't register a second SequenceManagerImpl on the same thread.";
350     thread_local_sequence_manager = this;
351   }
352   for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
353     queue->CompleteInitializationOnBoundThread();
354   }
355 }
356 
SetTimeDomain(TimeDomain * time_domain)357 void SequenceManagerImpl::SetTimeDomain(TimeDomain* time_domain) {
358   DCHECK(!main_thread_only().time_domain);
359   DCHECK(time_domain);
360   time_domain->OnAssignedToSequenceManager(this);
361   controller_->SetTickClock(time_domain);
362   main_thread_only().time_domain = time_domain;
363   clock_.store(time_domain, std::memory_order_release);
364 }
365 
ResetTimeDomain()366 void SequenceManagerImpl::ResetTimeDomain() {
367   controller_->SetTickClock(main_thread_only().default_clock);
368   clock_.store(main_thread_only().default_clock.get(),
369                std::memory_order_release);
370   main_thread_only().time_domain = nullptr;
371 }
372 
373 std::unique_ptr<internal::TaskQueueImpl>
CreateTaskQueueImpl(const TaskQueue::Spec & spec)374 SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
375   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
376   std::unique_ptr<internal::TaskQueueImpl> task_queue =
377       std::make_unique<internal::TaskQueueImpl>(
378           this,
379           spec.non_waking ? main_thread_only().non_waking_wake_up_queue.get()
380                           : main_thread_only().wake_up_queue.get(),
381           spec);
382   main_thread_only().active_queues.insert(task_queue.get());
383   main_thread_only().selector.AddQueue(
384       task_queue.get(), settings().priority_settings.default_priority());
385   return task_queue;
386 }
387 
SetAddQueueTimeToTasks(bool enable)388 void SequenceManagerImpl::SetAddQueueTimeToTasks(bool enable) {
389   base::subtle::NoBarrier_Store(&add_queue_time_to_tasks_, enable ? 1 : 0);
390 }
391 
GetAddQueueTimeToTasks()392 bool SequenceManagerImpl::GetAddQueueTimeToTasks() {
393   return base::subtle::NoBarrier_Load(&add_queue_time_to_tasks_);
394 }
395 
SetObserver(Observer * observer)396 void SequenceManagerImpl::SetObserver(Observer* observer) {
397   main_thread_only().observer = observer;
398 }
399 
UnregisterTaskQueueImpl(std::unique_ptr<internal::TaskQueueImpl> task_queue)400 void SequenceManagerImpl::UnregisterTaskQueueImpl(
401     std::unique_ptr<internal::TaskQueueImpl> task_queue) {
402   TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
403                "queue_name", task_queue->GetName());
404   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
405 
406   main_thread_only().selector.RemoveQueue(task_queue.get());
407 
408   // After UnregisterTaskQueue returns no new tasks can be posted.
409   // It's important to call it first to avoid race condition between removing
410   // the task queue from various lists here and adding it to the same lists
411   // when posting a task.
412   task_queue->UnregisterTaskQueue();
413 
414   // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
415   // it from being freed while any of our structures hold hold a raw pointer to
416   // it.
417   main_thread_only().active_queues.erase(task_queue.get());
418   main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
419 }
420 
421 AtomicFlagSet::AtomicFlag
GetFlagToRequestReloadForEmptyQueue(TaskQueueImpl * task_queue)422 SequenceManagerImpl::GetFlagToRequestReloadForEmptyQueue(
423     TaskQueueImpl* task_queue) {
424   return empty_queues_to_reload_.AddFlag(BindRepeating(
425       &TaskQueueImpl::ReloadEmptyImmediateWorkQueue, Unretained(task_queue)));
426 }
427 
ReloadEmptyWorkQueues() const428 void SequenceManagerImpl::ReloadEmptyWorkQueues() const {
429   // There are two cases where a queue needs reloading.  First, it might be
430   // completely empty and we've just posted a task (this method handles that
431   // case). Secondly if the work queue becomes empty when calling
432   // WorkQueue::TakeTaskFromWorkQueue (handled there).
433   //
434   // Invokes callbacks created by GetFlagToRequestReloadForEmptyQueue above.
435   empty_queues_to_reload_.RunActiveCallbacks();
436 }
437 
MoveReadyDelayedTasksToWorkQueues(LazyNow * lazy_now)438 void SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now) {
439   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
440                "SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues");
441 
442   EnqueueOrder delayed_task_group_enqueue_order = GetNextSequenceNumber();
443   main_thread_only().wake_up_queue->MoveReadyDelayedTasksToWorkQueues(
444       lazy_now, delayed_task_group_enqueue_order);
445   main_thread_only()
446       .non_waking_wake_up_queue->MoveReadyDelayedTasksToWorkQueues(
447           lazy_now, delayed_task_group_enqueue_order);
448 }
449 
OnBeginNestedRunLoop()450 void SequenceManagerImpl::OnBeginNestedRunLoop() {
451   main_thread_only().nesting_depth++;
452   if (main_thread_only().observer)
453     main_thread_only().observer->OnBeginNestedRunLoop();
454 }
455 
OnExitNestedRunLoop()456 void SequenceManagerImpl::OnExitNestedRunLoop() {
457   main_thread_only().nesting_depth--;
458   DCHECK_GE(main_thread_only().nesting_depth, 0);
459   if (main_thread_only().nesting_depth == 0) {
460     // While we were nested some non-nestable tasks may have been deferred. We
461     // push them back onto the *front* of their original work queues, that's why
462     // we iterate |non_nestable_task_queue| in LIFO order (we want
463     // |non_nestable_task.front()| to be the last task pushed at the front of
464     // |task_queue|).
465     LazyNow exited_nested_now(main_thread_clock());
466     while (!main_thread_only().non_nestable_task_queue.empty()) {
467       internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
468           main_thread_only().non_nestable_task_queue.back();
469       if (!non_nestable_task.task.queue_time.is_null()) {
470         // Adjust the deferred tasks' queue time to now so that intentionally
471         // deferred tasks are not unfairly considered as having been stuck in
472         // the queue for a while. Note: this does not affect task ordering as
473         // |enqueue_order| is untouched and deferred tasks will still be pushed
474         // back to the front of the queue.
475         non_nestable_task.task.queue_time = exited_nested_now.Now();
476       }
477       auto* const task_queue = non_nestable_task.task_queue;
478       task_queue->RequeueDeferredNonNestableTask(std::move(non_nestable_task));
479       main_thread_only().non_nestable_task_queue.pop_back();
480     }
481   }
482   if (main_thread_only().observer)
483     main_thread_only().observer->OnExitNestedRunLoop();
484 }
485 
ScheduleWork()486 void SequenceManagerImpl::ScheduleWork() {
487   controller_->ScheduleWork();
488 }
489 
SetNextWakeUp(LazyNow * lazy_now,absl::optional<WakeUp> wake_up)490 void SequenceManagerImpl::SetNextWakeUp(LazyNow* lazy_now,
491                                         absl::optional<WakeUp> wake_up) {
492   auto next_wake_up = AdjustWakeUp(wake_up, lazy_now);
493   if (next_wake_up && next_wake_up->is_immediate()) {
494     ScheduleWork();
495   } else {
496     controller_->SetNextDelayedDoWork(lazy_now, next_wake_up);
497   }
498 }
499 
MaybeEmitTaskDetails(perfetto::EventContext & ctx,const SequencedTaskSource::SelectedTask & selected_task) const500 void SequenceManagerImpl::MaybeEmitTaskDetails(
501     perfetto::EventContext& ctx,
502     const SequencedTaskSource::SelectedTask& selected_task) const {
503 #if BUILDFLAG(ENABLE_BASE_TRACING)
504   // Other parameters are included only when "scheduler" category is enabled.
505   const uint8_t* scheduler_category_enabled =
506       TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("scheduler");
507 
508   if (!*scheduler_category_enabled)
509     return;
510   auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
511   auto* sequence_manager_task = event->set_sequence_manager_task();
512   sequence_manager_task->set_priority(
513       settings().priority_settings.TaskPriorityToProto(selected_task.priority));
514   sequence_manager_task->set_queue_name(selected_task.task_queue_name);
515 
516 #endif  //  BUILDFLAG(ENABLE_BASE_TRACING)
517 }
518 
519 absl::optional<SequenceManagerImpl::SelectedTask>
SelectNextTask(LazyNow & lazy_now,SelectTaskOption option)520 SequenceManagerImpl::SelectNextTask(LazyNow& lazy_now,
521                                     SelectTaskOption option) {
522   absl::optional<SelectedTask> selected_task =
523       SelectNextTaskImpl(lazy_now, option);
524 
525   return selected_task;
526 }
527 
528 #if DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
LogTaskDebugInfo(const WorkQueue * selected_work_queue) const529 void SequenceManagerImpl::LogTaskDebugInfo(
530     const WorkQueue* selected_work_queue) const {
531   const Task* task = selected_work_queue->GetFrontTask();
532   switch (settings_.task_execution_logging) {
533     case Settings::TaskLogging::kNone:
534       break;
535 
536     case Settings::TaskLogging::kEnabled:
537       LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
538                 << selected_work_queue->task_queue()->GetName()
539                 << (task->cross_thread_ ? " Run crossthread " : " Run ")
540                 << task->posted_from.ToString();
541       break;
542 
543     case Settings::TaskLogging::kEnabledWithBacktrace: {
544       std::array<const void*, PendingTask::kTaskBacktraceLength + 1> task_trace;
545       task_trace[0] = task->posted_from.program_counter();
546       ranges::copy(task->task_backtrace, task_trace.begin() + 1);
547       size_t length = 0;
548       while (length < task_trace.size() && task_trace[length])
549         ++length;
550       if (length == 0)
551         break;
552       LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
553                 << selected_work_queue->task_queue()->GetName()
554                 << (task->cross_thread_ ? " Run crossthread " : " Run ")
555                 << debug::StackTrace(task_trace.data(), length);
556       break;
557     }
558 
559     case Settings::TaskLogging::kReorderedOnly: {
560       std::vector<const Task*> skipped_tasks;
561       main_thread_only().selector.CollectSkippedOverLowerPriorityTasks(
562           selected_work_queue, &skipped_tasks);
563 
564       if (skipped_tasks.empty())
565         break;
566 
567       LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
568                 << selected_work_queue->task_queue()->GetName()
569                 << (task->cross_thread_ ? " Run crossthread " : " Run ")
570                 << task->posted_from.ToString();
571 
572       for (const Task* skipped_task : skipped_tasks) {
573         LOG(INFO) << "# (skipped over) "
574                   << static_cast<uint64_t>(skipped_task->enqueue_order()) << " "
575                   << skipped_task->posted_from.ToString();
576       }
577     }
578   }
579 }
580 #endif  // DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
581 
582 absl::optional<SequenceManagerImpl::SelectedTask>
SelectNextTaskImpl(LazyNow & lazy_now,SelectTaskOption option)583 SequenceManagerImpl::SelectNextTaskImpl(LazyNow& lazy_now,
584                                         SelectTaskOption option) {
585   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
586   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
587                "SequenceManagerImpl::SelectNextTask");
588 
589   ReloadEmptyWorkQueues();
590   MoveReadyDelayedTasksToWorkQueues(&lazy_now);
591 
592   // If we sampled now, check if it's time to reclaim memory next time we go
593   // idle.
594   if (lazy_now.has_value() &&
595       lazy_now.Now() >= main_thread_only().next_time_to_reclaim_memory) {
596     main_thread_only().memory_reclaim_scheduled = true;
597   }
598 
599   while (true) {
600     internal::WorkQueue* work_queue =
601         main_thread_only().selector.SelectWorkQueueToService(option);
602     TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
603         TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
604         this,
605         AsValueWithSelectorResultForTracing(work_queue,
606                                             /* force_verbose */ false));
607 
608     if (!work_queue)
609       return absl::nullopt;
610 
611     // If the head task was canceled, remove it and run the selector again.
612     if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront()))
613       continue;
614 
615     if (UNLIKELY(work_queue->GetFrontTask()->nestable ==
616                      Nestable::kNonNestable &&
617                  main_thread_only().nesting_depth > 0)) {
618       // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
619       // the additional delay should not be a problem.
620       // Note because we don't delete queues while nested, it's perfectly OK to
621       // store the raw pointer for |queue| here.
622       internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
623           work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
624           work_queue->queue_type()};
625       main_thread_only().non_nestable_task_queue.push_back(
626           std::move(deferred_task));
627       continue;
628     }
629 
630 #if DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
631     LogTaskDebugInfo(work_queue);
632 #endif  // DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
633 
634     main_thread_only().task_execution_stack.emplace_back(
635         work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
636         InitializeTaskTiming(work_queue->task_queue()));
637 
638     ExecutingTask& executing_task =
639         *main_thread_only().task_execution_stack.rbegin();
640     NotifyWillProcessTask(&executing_task, &lazy_now);
641 
642     // Maybe invalidate the delayed task handle. If already invalidated, then
643     // don't run this task.
644     if (!executing_task.pending_task.WillRunTask()) {
645       executing_task.pending_task.task = DoNothing();
646     }
647 
648     return SelectedTask(
649         executing_task.pending_task,
650         executing_task.task_queue->task_execution_trace_logger(),
651         executing_task.priority, executing_task.task_queue_name);
652   }
653 }
654 
DidRunTask(LazyNow & lazy_now)655 void SequenceManagerImpl::DidRunTask(LazyNow& lazy_now) {
656   ExecutingTask& executing_task =
657       *main_thread_only().task_execution_stack.rbegin();
658 
659   NotifyDidProcessTask(&executing_task, &lazy_now);
660   main_thread_only().task_execution_stack.pop_back();
661 
662   if (main_thread_only().nesting_depth == 0)
663     CleanUpQueues();
664 }
665 
RemoveAllCanceledDelayedTasksFromFront(LazyNow * lazy_now)666 void SequenceManagerImpl::RemoveAllCanceledDelayedTasksFromFront(
667     LazyNow* lazy_now) {
668   main_thread_only().wake_up_queue->RemoveAllCanceledDelayedTasksFromFront(
669       lazy_now);
670   main_thread_only()
671       .non_waking_wake_up_queue->RemoveAllCanceledDelayedTasksFromFront(
672           lazy_now);
673 }
674 
GetPendingWakeUp(LazyNow * lazy_now,SelectTaskOption option)675 absl::optional<WakeUp> SequenceManagerImpl::GetPendingWakeUp(
676     LazyNow* lazy_now,
677     SelectTaskOption option) {
678   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
679 
680   RemoveAllCanceledDelayedTasksFromFront(lazy_now);
681 
682   if (main_thread_only().selector.GetHighestPendingPriority(option)) {
683     // If the selector has non-empty queues we trivially know there is immediate
684     // work to be done. However we may want to yield to native work if it is
685     // more important.
686     return WakeUp{};
687   }
688 
689   // There may be some incoming immediate work which we haven't accounted for.
690   // NB ReloadEmptyWorkQueues involves a memory barrier, so it's fastest to not
691   // do this always.
692   ReloadEmptyWorkQueues();
693 
694   if (main_thread_only().selector.GetHighestPendingPriority(option)) {
695     return WakeUp{};
696   }
697 
698   // Otherwise we need to find the shortest delay, if any.  NB we don't need to
699   // call MoveReadyDelayedTasksToWorkQueues because it's assumed
700   // DelayTillNextTask will return TimeDelta>() if the delayed task is due to
701   // run now.
702   return AdjustWakeUp(GetNextDelayedWakeUpWithOption(option), lazy_now);
703 }
704 
GetNextDelayedWakeUp() const705 absl::optional<WakeUp> SequenceManagerImpl::GetNextDelayedWakeUp() const {
706   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
707   return main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
708 }
709 
GetNextDelayedWakeUpWithOption(SelectTaskOption option) const710 absl::optional<WakeUp> SequenceManagerImpl::GetNextDelayedWakeUpWithOption(
711     SelectTaskOption option) const {
712   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
713 
714   if (option == SelectTaskOption::kSkipDelayedTask)
715     return absl::nullopt;
716   return GetNextDelayedWakeUp();
717 }
718 
AdjustWakeUp(absl::optional<WakeUp> wake_up,LazyNow * lazy_now) const719 absl::optional<WakeUp> SequenceManagerImpl::AdjustWakeUp(
720     absl::optional<WakeUp> wake_up,
721     LazyNow* lazy_now) const {
722   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
723   if (!wake_up)
724     return absl::nullopt;
725   // Overdue work needs to be run immediately.
726   if (lazy_now->Now() >= wake_up->earliest_time())
727     return WakeUp{};
728   // If |time_domain| is present, we don't want an actual OS level delayed wake
729   // up scheduled, so pretend we have no more work. This will result in
730   // appearing idle and |time_domain| will decide what to do in
731   // MaybeFastForwardToWakeUp().
732   if (main_thread_only().time_domain)
733     return absl::nullopt;
734   return *wake_up;
735 }
736 
MaybeAddLeewayToTask(Task & task) const737 void SequenceManagerImpl::MaybeAddLeewayToTask(Task& task) const {
738   if (!main_thread_only().time_domain) {
739     task.leeway = GetTaskLeewayForCurrentThread();
740   }
741 }
742 
743 // TODO(crbug/1267874): Rename once ExplicitHighResolutionTimerWin experiment is
744 // shipped.
HasPendingHighResolutionTasks()745 bool SequenceManagerImpl::HasPendingHighResolutionTasks() {
746   // Only consider high-res tasks in the |wake_up_queue| (ignore the
747   // |non_waking_wake_up_queue|).
748 #if BUILDFLAG(IS_WIN)
749   if (g_explicit_high_resolution_timer_win) {
750     absl::optional<WakeUp> wake_up =
751         main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
752     if (!wake_up)
753       return false;
754     // Under the kExplicitHighResolutionTimerWin experiment, rely on leeway
755     // being larger than the minimum time of a low resolution timer (16ms). This
756     // way, we don't need to activate the high resolution timer for precise
757     // tasks that will run in more than 16ms if there are non precise tasks in
758     // front of them.
759     DCHECK_GE(GetDefaultTaskLeeway(),
760               Milliseconds(Time::kMinLowResolutionThresholdMs));
761     return wake_up->delay_policy == subtle::DelayPolicy::kPrecise;
762   }
763 #endif  // BUILDFLAG(IS_WIN)
764   return main_thread_only().wake_up_queue->has_pending_high_resolution_tasks();
765 }
766 
OnSystemIdle()767 bool SequenceManagerImpl::OnSystemIdle() {
768   bool have_work_to_do = false;
769   if (main_thread_only().time_domain) {
770     auto wakeup = main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
771     have_work_to_do = main_thread_only().time_domain->MaybeFastForwardToWakeUp(
772         wakeup, controller_->ShouldQuitRunLoopWhenIdle());
773   }
774   if (!have_work_to_do) {
775     MaybeReclaimMemory();
776     if (main_thread_only().on_next_idle_callback)
777       std::move(main_thread_only().on_next_idle_callback).Run();
778   }
779   return have_work_to_do;
780 }
781 
WillQueueTask(Task * pending_task)782 void SequenceManagerImpl::WillQueueTask(Task* pending_task) {
783   controller_->WillQueueTask(pending_task);
784 }
785 
InitializeTaskTiming(internal::TaskQueueImpl * task_queue)786 TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
787     internal::TaskQueueImpl* task_queue) {
788   bool records_wall_time =
789       ShouldRecordTaskTiming(task_queue) == TimeRecordingPolicy::DoRecord;
790   bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
791   return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
792 }
793 
ShouldRecordTaskTiming(const internal::TaskQueueImpl * task_queue)794 TimeRecordingPolicy SequenceManagerImpl::ShouldRecordTaskTiming(
795     const internal::TaskQueueImpl* task_queue) {
796   if (task_queue->RequiresTaskTiming())
797     return TimeRecordingPolicy::DoRecord;
798   if (main_thread_only().nesting_depth == 0 &&
799       !main_thread_only().task_time_observers.empty()) {
800     return TimeRecordingPolicy::DoRecord;
801   }
802   return TimeRecordingPolicy::DoNotRecord;
803 }
804 
NotifyWillProcessTask(ExecutingTask * executing_task,LazyNow * time_before_task)805 void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
806                                                 LazyNow* time_before_task) {
807   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
808                "SequenceManagerImpl::NotifyWillProcessTaskObservers");
809 
810   if (g_record_crash_keys.load(std::memory_order_relaxed)) {
811     RecordCrashKeys(executing_task->pending_task);
812   }
813 
814   if (executing_task->task_queue->GetQuiescenceMonitored())
815     main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
816 
817   TimeRecordingPolicy recording_policy =
818       ShouldRecordTaskTiming(executing_task->task_queue);
819   if (recording_policy == TimeRecordingPolicy::DoRecord)
820     executing_task->task_timing.RecordTaskStart(time_before_task);
821 
822   if (!executing_task->task_queue->GetShouldNotifyObservers())
823     return;
824 
825   const bool was_blocked_or_low_priority =
826       executing_task->task_queue->WasBlockedOrLowPriority(
827           executing_task->pending_task.enqueue_order());
828 
829   {
830     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
831                  "SequenceManager.WillProcessTaskObservers");
832     for (auto& observer : main_thread_only().task_observers) {
833       observer.WillProcessTask(executing_task->pending_task,
834                                was_blocked_or_low_priority);
835     }
836   }
837 
838   {
839     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
840                  "SequenceManager.QueueNotifyWillProcessTask");
841     executing_task->task_queue->NotifyWillProcessTask(
842         executing_task->pending_task, was_blocked_or_low_priority);
843   }
844 
845   if (recording_policy != TimeRecordingPolicy::DoRecord)
846     return;
847 
848   if (main_thread_only().nesting_depth == 0) {
849     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
850                  "SequenceManager.WillProcessTaskTimeObservers");
851     for (auto& observer : main_thread_only().task_time_observers)
852       observer.WillProcessTask(executing_task->task_timing.start_time());
853   }
854 
855   {
856     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
857                  "SequenceManager.QueueOnTaskStarted");
858     executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
859                                               executing_task->task_timing);
860   }
861 }
862 
NotifyDidProcessTask(ExecutingTask * executing_task,LazyNow * time_after_task)863 void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
864                                                LazyNow* time_after_task) {
865   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
866                "SequenceManagerImpl::NotifyDidProcessTaskObservers");
867   if (!executing_task->task_queue->GetShouldNotifyObservers())
868     return;
869 
870   TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
871 
872   {
873     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
874                  "SequenceManager.QueueOnTaskCompleted");
875     if (task_timing.has_wall_time()) {
876       executing_task->task_queue->OnTaskCompleted(
877           executing_task->pending_task, &task_timing, time_after_task);
878     }
879   }
880 
881   bool has_valid_start =
882       task_timing.state() != TaskQueue::TaskTiming::State::NotStarted;
883   TimeRecordingPolicy recording_policy =
884       ShouldRecordTaskTiming(executing_task->task_queue);
885   // Record end time ASAP to avoid bias due to the overhead of observers.
886   if (recording_policy == TimeRecordingPolicy::DoRecord && has_valid_start) {
887     task_timing.RecordTaskEnd(time_after_task);
888   }
889 
890   if (has_valid_start && task_timing.has_wall_time() &&
891       main_thread_only().nesting_depth == 0) {
892     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
893                  "SequenceManager.DidProcessTaskTimeObservers");
894     for (auto& observer : main_thread_only().task_time_observers) {
895       observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
896     }
897   }
898 
899   {
900     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
901                  "SequenceManager.DidProcessTaskObservers");
902     for (auto& observer : main_thread_only().task_observers)
903       observer.DidProcessTask(executing_task->pending_task);
904   }
905 
906   {
907     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
908                  "SequenceManager.QueueNotifyDidProcessTask");
909     executing_task->task_queue->NotifyDidProcessTask(
910         executing_task->pending_task);
911   }
912 
913   // TODO(altimin): Move this back to blink.
914   if (task_timing.has_wall_time() &&
915       recording_policy == TimeRecordingPolicy::DoRecord &&
916       task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
917       main_thread_only().nesting_depth == 0) {
918     TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
919                          "duration", task_timing.wall_duration().InSecondsF());
920   }
921 }
922 
SetWorkBatchSize(int work_batch_size)923 void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
924   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
925   DCHECK_GE(work_batch_size, 1);
926   controller_->SetWorkBatchSize(work_batch_size);
927 }
928 
AddTaskObserver(TaskObserver * task_observer)929 void SequenceManagerImpl::AddTaskObserver(TaskObserver* task_observer) {
930   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
931   main_thread_only().task_observers.AddObserver(task_observer);
932 }
933 
RemoveTaskObserver(TaskObserver * task_observer)934 void SequenceManagerImpl::RemoveTaskObserver(TaskObserver* task_observer) {
935   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
936   main_thread_only().task_observers.RemoveObserver(task_observer);
937 }
938 
AddTaskTimeObserver(TaskTimeObserver * task_time_observer)939 void SequenceManagerImpl::AddTaskTimeObserver(
940     TaskTimeObserver* task_time_observer) {
941   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
942   main_thread_only().task_time_observers.AddObserver(task_time_observer);
943 }
944 
RemoveTaskTimeObserver(TaskTimeObserver * task_time_observer)945 void SequenceManagerImpl::RemoveTaskTimeObserver(
946     TaskTimeObserver* task_time_observer) {
947   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
948   main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
949 }
950 
GetAndClearSystemIsQuiescentBit()951 bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
952   bool task_was_run =
953       main_thread_only().task_was_run_on_quiescence_monitored_queue;
954   main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
955   return !task_was_run;
956 }
957 
GetNextSequenceNumber()958 EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
959   return enqueue_order_generator_.GenerateNext();
960 }
961 
962 std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResultForTracing(internal::WorkQueue * selected_work_queue,bool force_verbose) const963 SequenceManagerImpl::AsValueWithSelectorResultForTracing(
964     internal::WorkQueue* selected_work_queue,
965     bool force_verbose) const {
966   return std::make_unique<TracedBaseValue>(
967       Value(AsValueWithSelectorResult(selected_work_queue, force_verbose)));
968 }
969 
AsValueWithSelectorResult(internal::WorkQueue * selected_work_queue,bool force_verbose) const970 Value::Dict SequenceManagerImpl::AsValueWithSelectorResult(
971     internal::WorkQueue* selected_work_queue,
972     bool force_verbose) const {
973   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
974   TimeTicks now = NowTicks();
975   Value::Dict state;
976   Value::List active_queues;
977   for (auto* const queue : main_thread_only().active_queues)
978     active_queues.Append(queue->AsValue(now, force_verbose));
979   state.Set("active_queues", std::move(active_queues));
980   Value::List shutdown_queues;
981   Value::List queues_to_delete;
982   for (const auto& pair : main_thread_only().queues_to_delete)
983     queues_to_delete.Append(pair.first->AsValue(now, force_verbose));
984   state.Set("queues_to_delete", std::move(queues_to_delete));
985   state.Set("selector", main_thread_only().selector.AsValue());
986   if (selected_work_queue) {
987     state.Set("selected_queue", selected_work_queue->task_queue()->GetName());
988     state.Set("work_queue_name", selected_work_queue->name());
989   }
990   state.Set("time_domain", main_thread_only().time_domain
991                                ? main_thread_only().time_domain->AsValue()
992                                : Value::Dict());
993   state.Set("wake_up_queue", main_thread_only().wake_up_queue->AsValue(now));
994   state.Set("non_waking_wake_up_queue",
995             main_thread_only().non_waking_wake_up_queue->AsValue(now));
996   return state;
997 }
998 
OnTaskQueueEnabled(internal::TaskQueueImpl * queue)999 void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
1000   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
1001   DCHECK(queue->IsQueueEnabled());
1002   // Only schedule DoWork if there's something to do.
1003   if (queue->HasTaskToRunImmediatelyOrReadyDelayedTask() &&
1004       !queue->BlockedByFence())
1005     ScheduleWork();
1006 }
1007 
MaybeReclaimMemory()1008 void SequenceManagerImpl::MaybeReclaimMemory() {
1009   if (!main_thread_only().memory_reclaim_scheduled)
1010     return;
1011 
1012   TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::MaybeReclaimMemory");
1013   ReclaimMemory();
1014 
1015   // To avoid performance regressions we only want to do this every so often.
1016   main_thread_only().next_time_to_reclaim_memory =
1017       NowTicks() + kReclaimMemoryInterval;
1018   main_thread_only().memory_reclaim_scheduled = false;
1019 }
1020 
ReclaimMemory()1021 void SequenceManagerImpl::ReclaimMemory() {
1022   LazyNow lazy_now(main_thread_clock());
1023   for (auto it = main_thread_only().active_queues.begin();
1024        it != main_thread_only().active_queues.end();) {
1025     auto* const queue = *it++;
1026     ReclaimMemoryFromQueue(queue, &lazy_now);
1027   }
1028 }
1029 
CleanUpQueues()1030 void SequenceManagerImpl::CleanUpQueues() {
1031   main_thread_only().queues_to_delete.clear();
1032 }
1033 
GetWeakPtr()1034 WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
1035   return weak_factory_.GetWeakPtr();
1036 }
1037 
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1038 void SequenceManagerImpl::SetDefaultTaskRunner(
1039     scoped_refptr<SingleThreadTaskRunner> task_runner) {
1040   controller_->SetDefaultTaskRunner(task_runner);
1041 }
1042 
GetTickClock() const1043 const TickClock* SequenceManagerImpl::GetTickClock() const {
1044   return any_thread_clock();
1045 }
1046 
NowTicks() const1047 TimeTicks SequenceManagerImpl::NowTicks() const {
1048   return any_thread_clock()->NowTicks();
1049 }
1050 
ShouldRecordCPUTimeForTask()1051 bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
1052   DCHECK(ThreadTicks::IsSupported() ||
1053          !metric_recording_settings_.records_cpu_time_for_some_tasks());
1054   return metric_recording_settings_.records_cpu_time_for_some_tasks() &&
1055          main_thread_only().metrics_subsampler->ShouldSample(
1056              metric_recording_settings_
1057                  .task_sampling_rate_for_recording_cpu_time);
1058 }
1059 
1060 const SequenceManager::MetricRecordingSettings&
GetMetricRecordingSettings() const1061 SequenceManagerImpl::GetMetricRecordingSettings() const {
1062   return metric_recording_settings_;
1063 }
1064 
SetTaskExecutionAllowed(bool allowed)1065 void SequenceManagerImpl::SetTaskExecutionAllowed(bool allowed) {
1066   controller_->SetTaskExecutionAllowed(allowed);
1067 }
1068 
IsTaskExecutionAllowed() const1069 bool SequenceManagerImpl::IsTaskExecutionAllowed() const {
1070   return controller_->IsTaskExecutionAllowed();
1071 }
1072 
1073 #if BUILDFLAG(IS_IOS)
AttachToMessagePump()1074 void SequenceManagerImpl::AttachToMessagePump() {
1075   return controller_->AttachToMessagePump();
1076 }
1077 #endif
1078 
IsIdleForTesting()1079 bool SequenceManagerImpl::IsIdleForTesting() {
1080   ReloadEmptyWorkQueues();
1081 
1082   // Make sure that canceled tasks don't affect the return value.
1083   for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
1084     queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
1085     queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
1086   }
1087 
1088   return !main_thread_only().selector.GetHighestPendingPriority().has_value();
1089 }
1090 
EnableMessagePumpTimeKeeperMetrics(const char * thread_name)1091 void SequenceManagerImpl::EnableMessagePumpTimeKeeperMetrics(
1092     const char* thread_name) {
1093   controller_->EnableMessagePumpTimeKeeperMetrics(thread_name);
1094 }
1095 
GetPendingTaskCountForTesting() const1096 size_t SequenceManagerImpl::GetPendingTaskCountForTesting() const {
1097   size_t total = 0;
1098   for (internal::TaskQueueImpl* task_queue : main_thread_only().active_queues) {
1099     total += task_queue->GetNumberOfPendingTasks();
1100   }
1101   return total;
1102 }
1103 
CreateTaskQueue(const TaskQueue::Spec & spec)1104 TaskQueue::Handle SequenceManagerImpl::CreateTaskQueue(
1105     const TaskQueue::Spec& spec) {
1106   return TaskQueue::Handle(CreateTaskQueueImpl(spec));
1107 }
1108 
DescribeAllPendingTasks() const1109 std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
1110   Value::Dict value =
1111       AsValueWithSelectorResult(nullptr, /* force_verbose */ true);
1112   std::string result;
1113   JSONWriter::Write(value, &result);
1114   return result;
1115 }
1116 
PrioritizeYieldingToNative(base::TimeTicks prioritize_until)1117 void SequenceManagerImpl::PrioritizeYieldingToNative(
1118     base::TimeTicks prioritize_until) {
1119   controller_->PrioritizeYieldingToNative(prioritize_until);
1120 }
1121 
AddDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1122 void SequenceManagerImpl::AddDestructionObserver(
1123     CurrentThread::DestructionObserver* destruction_observer) {
1124   main_thread_only().destruction_observers.AddObserver(destruction_observer);
1125 }
1126 
RemoveDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1127 void SequenceManagerImpl::RemoveDestructionObserver(
1128     CurrentThread::DestructionObserver* destruction_observer) {
1129   main_thread_only().destruction_observers.RemoveObserver(destruction_observer);
1130 }
1131 
RegisterOnNextIdleCallback(OnceClosure on_next_idle_callback)1132 void SequenceManagerImpl::RegisterOnNextIdleCallback(
1133     OnceClosure on_next_idle_callback) {
1134   DCHECK(!main_thread_only().on_next_idle_callback || !on_next_idle_callback);
1135   main_thread_only().on_next_idle_callback = std::move(on_next_idle_callback);
1136 }
1137 
SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1138 void SequenceManagerImpl::SetTaskRunner(
1139     scoped_refptr<SingleThreadTaskRunner> task_runner) {
1140   controller_->SetDefaultTaskRunner(task_runner);
1141 }
1142 
GetTaskRunner()1143 scoped_refptr<SingleThreadTaskRunner> SequenceManagerImpl::GetTaskRunner() {
1144   return controller_->GetDefaultTaskRunner();
1145 }
1146 
IsBoundToCurrentThread() const1147 bool SequenceManagerImpl::IsBoundToCurrentThread() const {
1148   return associated_thread_->IsBoundToCurrentThread();
1149 }
1150 
GetMessagePump() const1151 MessagePump* SequenceManagerImpl::GetMessagePump() const {
1152   return controller_->GetBoundMessagePump();
1153 }
1154 
IsType(MessagePumpType type) const1155 bool SequenceManagerImpl::IsType(MessagePumpType type) const {
1156   return settings_.message_loop_type == type;
1157 }
1158 
EnableCrashKeys(const char * async_stack_crash_key)1159 void SequenceManagerImpl::EnableCrashKeys(const char* async_stack_crash_key) {
1160   DCHECK(!main_thread_only().async_stack_crash_key);
1161 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1162   main_thread_only().async_stack_crash_key = debug::AllocateCrashKeyString(
1163       async_stack_crash_key, debug::CrashKeySize::Size64);
1164   static_assert(sizeof(main_thread_only().async_stack_buffer) ==
1165                     static_cast<size_t>(debug::CrashKeySize::Size64),
1166                 "Async stack buffer size must match crash key size.");
1167 #endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1168 }
1169 
RecordCrashKeys(const PendingTask & pending_task)1170 void SequenceManagerImpl::RecordCrashKeys(const PendingTask& pending_task) {
1171 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1172   // SetCrashKeyString is a no-op even if the crash key is null, but we'd still
1173   // have construct the StringPiece that is passed in.
1174   if (!main_thread_only().async_stack_crash_key)
1175     return;
1176 
1177   // Write the async stack trace onto a crash key as whitespace-delimited hex
1178   // addresses. These will be symbolized by the crash reporting system. With
1179   // 63 characters we can fit the address of the task that posted the current
1180   // task and its predecessor. Avoid HexEncode since it incurs a memory
1181   // allocation and snprintf because it's about 3.5x slower on Android this
1182   // this.
1183   //
1184   // See
1185   // https://chromium.googlesource.com/chromium/src/+/main/docs/debugging_with_crash_keys.md
1186   // for instructions for symbolizing these crash keys.
1187   //
1188   // TODO(skyostil): Find a way to extract the destination function address
1189   // from the task.
1190   size_t max_size = main_thread_only().async_stack_buffer.size();
1191   char* const buffer = &main_thread_only().async_stack_buffer[0];
1192   char* const buffer_end = &buffer[max_size - 1];
1193   char* pos = buffer_end;
1194   // Leave space for the NUL terminator.
1195   pos = PrependHexAddress(pos - 1, pending_task.task_backtrace[0]);
1196   *(--pos) = ' ';
1197   pos = PrependHexAddress(pos - 1, pending_task.posted_from.program_counter());
1198   DCHECK_GE(pos, buffer);
1199   debug::SetCrashKeyString(
1200       main_thread_only().async_stack_crash_key,
1201       StringPiece(pos, static_cast<size_t>(buffer_end - pos)));
1202 #endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1203 }
1204 
currently_executing_task_queue() const1205 internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
1206     const {
1207   if (main_thread_only().task_execution_stack.empty())
1208     return nullptr;
1209   return main_thread_only().task_execution_stack.rbegin()->task_queue;
1210 }
1211 
GetPriorityCount() const1212 TaskQueue::QueuePriority SequenceManagerImpl::GetPriorityCount() const {
1213   return settings().priority_settings.priority_count();
1214 }
1215 
1216 constexpr TimeDelta SequenceManagerImpl::kReclaimMemoryInterval;
1217 
1218 }  // namespace internal
1219 }  // namespace sequence_manager
1220 }  // namespace base
1221