1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task/sequence_manager/sequence_manager_impl.h"
6
7 #include <atomic>
8 #include <queue>
9 #include <vector>
10
11 #include "base/compiler_specific.h"
12 #include "base/debug/crash_logging.h"
13 #include "base/debug/stack_trace.h"
14 #include "base/functional/bind.h"
15 #include "base/functional/callback.h"
16 #include "base/functional/callback_helpers.h"
17 #include "base/json/json_writer.h"
18 #include "base/logging.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/notreached.h"
21 #include "base/observer_list.h"
22 #include "base/rand_util.h"
23 #include "base/ranges/algorithm.h"
24 #include "base/task/sequence_manager/enqueue_order.h"
25 #include "base/task/sequence_manager/task_time_observer.h"
26 #include "base/task/sequence_manager/thread_controller_impl.h"
27 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
28 #include "base/task/sequence_manager/time_domain.h"
29 #include "base/task/sequence_manager/wake_up_queue.h"
30 #include "base/task/sequence_manager/work_queue.h"
31 #include "base/task/sequence_manager/work_queue_sets.h"
32 #include "base/task/task_features.h"
33 #include "base/threading/thread_id_name_manager.h"
34 #include "base/time/default_tick_clock.h"
35 #include "base/time/tick_clock.h"
36 #include "base/trace_event/base_tracing.h"
37 #include "build/build_config.h"
38 #include "third_party/abseil-cpp/absl/base/attributes.h"
39 #include "third_party/abseil-cpp/absl/types/optional.h"
40
41 namespace base {
42 namespace sequence_manager {
43 namespace {
44
45 ABSL_CONST_INIT thread_local internal::SequenceManagerImpl*
46 thread_local_sequence_manager = nullptr;
47
48 class TracedBaseValue : public trace_event::ConvertableToTraceFormat {
49 public:
TracedBaseValue(Value value)50 explicit TracedBaseValue(Value value) : value_(std::move(value)) {}
51 ~TracedBaseValue() override = default;
52
AppendAsTraceFormat(std::string * out) const53 void AppendAsTraceFormat(std::string* out) const override {
54 if (!value_.is_none()) {
55 std::string tmp;
56 JSONWriter::Write(value_, &tmp);
57 *out += tmp;
58 } else {
59 *out += "{}";
60 }
61 }
62
63 private:
64 base::Value value_;
65 };
66
67 } // namespace
68
CreateSequenceManagerOnCurrentThread(SequenceManager::Settings settings)69 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread(
70 SequenceManager::Settings settings) {
71 return internal::SequenceManagerImpl::CreateOnCurrentThread(
72 std::move(settings));
73 }
74
CreateSequenceManagerOnCurrentThreadWithPump(std::unique_ptr<MessagePump> message_pump,SequenceManager::Settings settings)75 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThreadWithPump(
76 std::unique_ptr<MessagePump> message_pump,
77 SequenceManager::Settings settings) {
78 std::unique_ptr<SequenceManager> manager =
79 internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
80 manager->BindToMessagePump(std::move(message_pump));
81 return manager;
82 }
83
CreateUnboundSequenceManager(SequenceManager::Settings settings)84 std::unique_ptr<SequenceManager> CreateUnboundSequenceManager(
85 SequenceManager::Settings settings) {
86 return internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
87 }
88
89 namespace internal {
90
CreateUnboundSequenceManagerImpl(PassKey<base::internal::SequenceManagerThreadDelegate>,SequenceManager::Settings settings)91 std::unique_ptr<SequenceManagerImpl> CreateUnboundSequenceManagerImpl(
92 PassKey<base::internal::SequenceManagerThreadDelegate>,
93 SequenceManager::Settings settings) {
94 return SequenceManagerImpl::CreateUnbound(std::move(settings));
95 }
96
97 using TimeRecordingPolicy =
98 base::sequence_manager::TaskQueue::TaskTiming::TimeRecordingPolicy;
99
100 namespace {
101
102 constexpr TimeDelta kLongTaskTraceEventThreshold = Milliseconds(50);
103 // Proportion of tasks which will record thread time for metrics.
104 const double kTaskSamplingRateForRecordingCPUTime = 0.01;
105 // Proprortion of SequenceManagers which will record thread time for each task,
106 // enabling advanced metrics.
107 const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
108
ReclaimMemoryFromQueue(internal::TaskQueueImpl * queue,LazyNow * lazy_now)109 void ReclaimMemoryFromQueue(internal::TaskQueueImpl* queue, LazyNow* lazy_now) {
110 queue->ReclaimMemory(lazy_now->Now());
111 // If the queue was shut down as a side-effect of reclaiming memory, |queue|
112 // will still be valid but the work queues will have been removed by
113 // TaskQueueImpl::UnregisterTaskQueue.
114 if (queue->delayed_work_queue()) {
115 queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
116 queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
117 }
118 }
119
InitializeMetricRecordingSettings(bool randomised_sampling_enabled)120 SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings(
121 bool randomised_sampling_enabled) {
122 if (!randomised_sampling_enabled)
123 return SequenceManager::MetricRecordingSettings(0);
124 bool records_cpu_time_for_each_task =
125 base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
126 return SequenceManager::MetricRecordingSettings(
127 records_cpu_time_for_each_task ? 1
128 : kTaskSamplingRateForRecordingCPUTime);
129 }
130
131 // Writes |address| in hexadecimal ("0x11223344") form starting from |output|
132 // and moving backwards in memory. Returns a pointer to the first digit of the
133 // result. Does *not* NUL-terminate the number.
134 #if !BUILDFLAG(IS_NACL)
PrependHexAddress(char * output,const void * address)135 char* PrependHexAddress(char* output, const void* address) {
136 uintptr_t value = reinterpret_cast<uintptr_t>(address);
137 static const char kHexChars[] = "0123456789ABCDEF";
138 do {
139 *output-- = kHexChars[value % 16];
140 value /= 16;
141 } while (value);
142 *output-- = 'x';
143 *output = '0';
144 return output;
145 }
146 #endif // !BUILDFLAG(IS_NACL)
147
148 // Controls whether canceled tasks are removed from the front of the queue when
149 // deciding when the next wake up should happen.
150 // Note: An atomic is used here because some tests can initialize two different
151 // sequence managers on different threads (e.g. by using base::Thread).
152 std::atomic_bool g_no_wake_ups_for_canceled_tasks{true};
153
154 #if BUILDFLAG(IS_WIN)
155 bool g_explicit_high_resolution_timer_win = false;
156 #endif // BUILDFLAG(IS_WIN)
157
158 } // namespace
159
160 // static
GetCurrent()161 SequenceManagerImpl* SequenceManagerImpl::GetCurrent() {
162 // Workaround false-positive MSAN use-of-uninitialized-value on
163 // thread_local storage for loaded libraries:
164 // https://github.com/google/sanitizers/issues/1265
165 MSAN_UNPOISON(&thread_local_sequence_manager, sizeof(SequenceManagerImpl*));
166
167 return thread_local_sequence_manager;
168 }
169
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller,SequenceManager::Settings settings)170 SequenceManagerImpl::SequenceManagerImpl(
171 std::unique_ptr<internal::ThreadController> controller,
172 SequenceManager::Settings settings)
173 : associated_thread_(controller->GetAssociatedThread()),
174 controller_(std::move(controller)),
175 settings_(std::move(settings)),
176 metric_recording_settings_(InitializeMetricRecordingSettings(
177 settings_.randomised_sampling_enabled)),
178 add_queue_time_to_tasks_(settings_.add_queue_time_to_tasks),
179
180 empty_queues_to_reload_(associated_thread_),
181 main_thread_only_(this, associated_thread_, settings_, settings_.clock),
182 clock_(settings_.clock) {
183 TRACE_EVENT_OBJECT_CREATED_WITH_ID(
184 TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
185 main_thread_only().selector.SetTaskQueueSelectorObserver(this);
186
187 main_thread_only().next_time_to_reclaim_memory =
188 main_thread_clock()->NowTicks() + kReclaimMemoryInterval;
189
190 controller_->SetSequencedTaskSource(this);
191 }
192
~SequenceManagerImpl()193 SequenceManagerImpl::~SequenceManagerImpl() {
194 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
195 TRACE_EVENT_OBJECT_DELETED_WITH_ID(
196 TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
197
198 #if BUILDFLAG(IS_IOS)
199 if (settings_.message_loop_type == MessagePumpType::UI &&
200 associated_thread_->IsBound()) {
201 controller_->DetachFromMessagePump();
202 }
203 #endif
204
205 // Make sure no Task is running as given that RunLoop does not support the
206 // Delegate being destroyed from a Task and
207 // ThreadControllerWithMessagePumpImpl does not support being destroyed from a
208 // Task. If we are using a ThreadControllerImpl (i.e. no pump) destruction is
209 // fine
210 DCHECK(!controller_->GetBoundMessagePump() ||
211 main_thread_only().task_execution_stack.empty());
212
213 for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
214 main_thread_only().selector.RemoveQueue(queue);
215 queue->UnregisterTaskQueue();
216 }
217
218 // TODO(altimin): restore default task runner automatically when
219 // ThreadController is destroyed.
220 controller_->RestoreDefaultTaskRunner();
221
222 main_thread_only().active_queues.clear();
223 main_thread_only().queues_to_gracefully_shutdown.clear();
224 main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
225
226 // In the case of an early startup exits or in some tests a NestingObserver
227 // may not have been registered.
228 if (main_thread_only().nesting_observer_registered_)
229 controller_->RemoveNestingObserver(this);
230
231 // Let interested parties have one last shot at accessing this.
232 for (auto& observer : main_thread_only().destruction_observers)
233 observer.WillDestroyCurrentMessageLoop();
234
235 // OK, now make it so that no one can find us.
236 if (GetMessagePump()) {
237 DCHECK_EQ(this, GetCurrent());
238 thread_local_sequence_manager = nullptr;
239 }
240 }
241
MainThreadOnly(SequenceManagerImpl * sequence_manager,const scoped_refptr<AssociatedThreadId> & associated_thread,const SequenceManager::Settings & settings,const base::TickClock * clock)242 SequenceManagerImpl::MainThreadOnly::MainThreadOnly(
243 SequenceManagerImpl* sequence_manager,
244 const scoped_refptr<AssociatedThreadId>& associated_thread,
245 const SequenceManager::Settings& settings,
246 const base::TickClock* clock)
247 : selector(associated_thread, settings),
248 default_clock(clock),
249 time_domain(nullptr),
250 wake_up_queue(std::make_unique<DefaultWakeUpQueue>(associated_thread,
251 sequence_manager)),
252 non_waking_wake_up_queue(
253 std::make_unique<NonWakingWakeUpQueue>(associated_thread)) {
254 if (settings.randomised_sampling_enabled) {
255 metrics_subsampler = base::MetricsSubSampler();
256 }
257 }
258
259 SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
260
261 // static
262 std::unique_ptr<ThreadControllerImpl>
CreateThreadControllerImplForCurrentThread(const TickClock * clock)263 SequenceManagerImpl::CreateThreadControllerImplForCurrentThread(
264 const TickClock* clock) {
265 return ThreadControllerImpl::Create(GetCurrent(), clock);
266 }
267
268 // static
CreateOnCurrentThread(SequenceManager::Settings settings)269 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateOnCurrentThread(
270 SequenceManager::Settings settings) {
271 auto thread_controller =
272 CreateThreadControllerImplForCurrentThread(settings.clock);
273 std::unique_ptr<SequenceManagerImpl> manager(new SequenceManagerImpl(
274 std::move(thread_controller), std::move(settings)));
275 manager->BindToCurrentThread();
276 return manager;
277 }
278
279 // static
CreateUnbound(SequenceManager::Settings settings)280 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateUnbound(
281 SequenceManager::Settings settings) {
282 auto thread_controller =
283 ThreadControllerWithMessagePumpImpl::CreateUnbound(settings);
284 return WrapUnique(new SequenceManagerImpl(std::move(thread_controller),
285 std::move(settings)));
286 }
287
288 // static
InitializeFeatures()289 void SequenceManagerImpl::InitializeFeatures() {
290 base::InitializeTaskLeeway();
291 ApplyNoWakeUpsForCanceledTasks();
292 TaskQueueImpl::InitializeFeatures();
293 ThreadControllerWithMessagePumpImpl::InitializeFeatures();
294 #if BUILDFLAG(IS_WIN)
295 g_explicit_high_resolution_timer_win =
296 FeatureList::IsEnabled(kExplicitHighResolutionTimerWin);
297 #endif // BUILDFLAG(IS_WIN)
298 TaskQueueSelector::InitializeFeatures();
299 }
300
301 // static
ApplyNoWakeUpsForCanceledTasks()302 void SequenceManagerImpl::ApplyNoWakeUpsForCanceledTasks() {
303 // Since kNoWakeUpsForCanceledTasks is not constexpr (forbidden for Features),
304 // it cannot be used to initialize |g_no_wake_ups_for_canceled_tasks| at
305 // compile time. At least DCHECK that its initial value matches the default
306 // value of the feature here.
307 DCHECK_EQ(
308 g_no_wake_ups_for_canceled_tasks.load(std::memory_order_relaxed),
309 kNoWakeUpsForCanceledTasks.default_state == FEATURE_ENABLED_BY_DEFAULT);
310 g_no_wake_ups_for_canceled_tasks.store(
311 FeatureList::IsEnabled(kNoWakeUpsForCanceledTasks),
312 std::memory_order_relaxed);
313 }
314
315 // static
ResetNoWakeUpsForCanceledTasksForTesting()316 void SequenceManagerImpl::ResetNoWakeUpsForCanceledTasksForTesting() {
317 g_no_wake_ups_for_canceled_tasks.store(
318 kNoWakeUpsForCanceledTasks.default_state == FEATURE_ENABLED_BY_DEFAULT,
319 std::memory_order_relaxed);
320 }
321
BindToMessagePump(std::unique_ptr<MessagePump> pump)322 void SequenceManagerImpl::BindToMessagePump(std::unique_ptr<MessagePump> pump) {
323 controller_->BindToCurrentThread(std::move(pump));
324 CompleteInitializationOnBoundThread();
325
326 // On Android attach to the native loop when there is one.
327 #if BUILDFLAG(IS_ANDROID)
328 if (settings_.message_loop_type == MessagePumpType::UI ||
329 settings_.message_loop_type == MessagePumpType::JAVA) {
330 controller_->AttachToMessagePump();
331 }
332 #endif
333
334 // On iOS attach to the native loop when there is one.
335 #if BUILDFLAG(IS_IOS)
336 if (settings_.message_loop_type == MessagePumpType::UI) {
337 controller_->AttachToMessagePump();
338 }
339 #endif
340 }
341
BindToCurrentThread()342 void SequenceManagerImpl::BindToCurrentThread() {
343 associated_thread_->BindToCurrentThread();
344 CompleteInitializationOnBoundThread();
345 }
346
347 scoped_refptr<SequencedTaskRunner>
GetTaskRunnerForCurrentTask()348 SequenceManagerImpl::GetTaskRunnerForCurrentTask() {
349 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
350 if (main_thread_only().task_execution_stack.empty())
351 return nullptr;
352 return main_thread_only()
353 .task_execution_stack.back()
354 .pending_task.task_runner;
355 }
356
CompleteInitializationOnBoundThread()357 void SequenceManagerImpl::CompleteInitializationOnBoundThread() {
358 controller_->AddNestingObserver(this);
359 main_thread_only().nesting_observer_registered_ = true;
360 if (GetMessagePump()) {
361 DCHECK(!GetCurrent())
362 << "Can't register a second SequenceManagerImpl on the same thread.";
363 thread_local_sequence_manager = this;
364 }
365 }
366
SetTimeDomain(TimeDomain * time_domain)367 void SequenceManagerImpl::SetTimeDomain(TimeDomain* time_domain) {
368 DCHECK(!main_thread_only().time_domain);
369 DCHECK(time_domain);
370 time_domain->OnAssignedToSequenceManager(this);
371 controller_->SetTickClock(time_domain);
372 main_thread_only().time_domain = time_domain;
373 clock_.store(time_domain, std::memory_order_release);
374 }
375
ResetTimeDomain()376 void SequenceManagerImpl::ResetTimeDomain() {
377 controller_->SetTickClock(main_thread_only().default_clock);
378 clock_.store(main_thread_only().default_clock.get(),
379 std::memory_order_release);
380 main_thread_only().time_domain = nullptr;
381 }
382
383 std::unique_ptr<internal::TaskQueueImpl>
CreateTaskQueueImpl(const TaskQueue::Spec & spec)384 SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
385 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
386 std::unique_ptr<internal::TaskQueueImpl> task_queue =
387 std::make_unique<internal::TaskQueueImpl>(
388 this,
389 spec.non_waking ? main_thread_only().non_waking_wake_up_queue.get()
390 : main_thread_only().wake_up_queue.get(),
391 spec);
392 main_thread_only().active_queues.insert(task_queue.get());
393 main_thread_only().selector.AddQueue(
394 task_queue.get(), settings().priority_settings.default_priority());
395 return task_queue;
396 }
397
SetAddQueueTimeToTasks(bool enable)398 void SequenceManagerImpl::SetAddQueueTimeToTasks(bool enable) {
399 base::subtle::NoBarrier_Store(&add_queue_time_to_tasks_, enable ? 1 : 0);
400 }
401
GetAddQueueTimeToTasks()402 bool SequenceManagerImpl::GetAddQueueTimeToTasks() {
403 return base::subtle::NoBarrier_Load(&add_queue_time_to_tasks_);
404 }
405
SetObserver(Observer * observer)406 void SequenceManagerImpl::SetObserver(Observer* observer) {
407 main_thread_only().observer = observer;
408 }
409
ShutdownTaskQueueGracefully(std::unique_ptr<internal::TaskQueueImpl> task_queue)410 void SequenceManagerImpl::ShutdownTaskQueueGracefully(
411 std::unique_ptr<internal::TaskQueueImpl> task_queue) {
412 main_thread_only().queues_to_gracefully_shutdown[task_queue.get()] =
413 std::move(task_queue);
414 }
415
UnregisterTaskQueueImpl(std::unique_ptr<internal::TaskQueueImpl> task_queue)416 void SequenceManagerImpl::UnregisterTaskQueueImpl(
417 std::unique_ptr<internal::TaskQueueImpl> task_queue) {
418 TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
419 "queue_name", task_queue->GetName());
420 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
421
422 main_thread_only().selector.RemoveQueue(task_queue.get());
423
424 // After UnregisterTaskQueue returns no new tasks can be posted.
425 // It's important to call it first to avoid race condition between removing
426 // the task queue from various lists here and adding it to the same lists
427 // when posting a task.
428 task_queue->UnregisterTaskQueue();
429
430 // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
431 // it from being freed while any of our structures hold hold a raw pointer to
432 // it.
433 main_thread_only().active_queues.erase(task_queue.get());
434 main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
435 }
436
437 AtomicFlagSet::AtomicFlag
GetFlagToRequestReloadForEmptyQueue(TaskQueueImpl * task_queue)438 SequenceManagerImpl::GetFlagToRequestReloadForEmptyQueue(
439 TaskQueueImpl* task_queue) {
440 return empty_queues_to_reload_.AddFlag(BindRepeating(
441 &TaskQueueImpl::ReloadEmptyImmediateWorkQueue, Unretained(task_queue)));
442 }
443
ReloadEmptyWorkQueues() const444 void SequenceManagerImpl::ReloadEmptyWorkQueues() const {
445 // There are two cases where a queue needs reloading. First, it might be
446 // completely empty and we've just posted a task (this method handles that
447 // case). Secondly if the work queue becomes empty when calling
448 // WorkQueue::TakeTaskFromWorkQueue (handled there).
449 //
450 // Invokes callbacks created by GetFlagToRequestReloadForEmptyQueue above.
451 empty_queues_to_reload_.RunActiveCallbacks();
452 }
453
MoveReadyDelayedTasksToWorkQueues(LazyNow * lazy_now)454 void SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now) {
455 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
456 "SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues");
457
458 EnqueueOrder delayed_task_group_enqueue_order = GetNextSequenceNumber();
459 main_thread_only().wake_up_queue->MoveReadyDelayedTasksToWorkQueues(
460 lazy_now, delayed_task_group_enqueue_order);
461 main_thread_only()
462 .non_waking_wake_up_queue->MoveReadyDelayedTasksToWorkQueues(
463 lazy_now, delayed_task_group_enqueue_order);
464 }
465
OnBeginNestedRunLoop()466 void SequenceManagerImpl::OnBeginNestedRunLoop() {
467 main_thread_only().nesting_depth++;
468 if (main_thread_only().observer)
469 main_thread_only().observer->OnBeginNestedRunLoop();
470 }
471
OnExitNestedRunLoop()472 void SequenceManagerImpl::OnExitNestedRunLoop() {
473 main_thread_only().nesting_depth--;
474 DCHECK_GE(main_thread_only().nesting_depth, 0);
475 if (main_thread_only().nesting_depth == 0) {
476 // While we were nested some non-nestable tasks may have been deferred. We
477 // push them back onto the *front* of their original work queues, that's why
478 // we iterate |non_nestable_task_queue| in LIFO order (we want
479 // |non_nestable_task.front()| to be the last task pushed at the front of
480 // |task_queue|).
481 LazyNow exited_nested_now(main_thread_clock());
482 while (!main_thread_only().non_nestable_task_queue.empty()) {
483 internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
484 main_thread_only().non_nestable_task_queue.back();
485 if (!non_nestable_task.task.queue_time.is_null()) {
486 // Adjust the deferred tasks' queue time to now so that intentionally
487 // deferred tasks are not unfairly considered as having been stuck in
488 // the queue for a while. Note: this does not affect task ordering as
489 // |enqueue_order| is untouched and deferred tasks will still be pushed
490 // back to the front of the queue.
491 non_nestable_task.task.queue_time = exited_nested_now.Now();
492 }
493 auto* const task_queue = non_nestable_task.task_queue;
494 task_queue->RequeueDeferredNonNestableTask(std::move(non_nestable_task));
495 main_thread_only().non_nestable_task_queue.pop_back();
496 }
497 }
498 if (main_thread_only().observer)
499 main_thread_only().observer->OnExitNestedRunLoop();
500 }
501
ScheduleWork()502 void SequenceManagerImpl::ScheduleWork() {
503 controller_->ScheduleWork();
504 }
505
SetNextWakeUp(LazyNow * lazy_now,absl::optional<WakeUp> wake_up)506 void SequenceManagerImpl::SetNextWakeUp(LazyNow* lazy_now,
507 absl::optional<WakeUp> wake_up) {
508 auto next_wake_up = AdjustWakeUp(wake_up, lazy_now);
509 if (next_wake_up && next_wake_up->is_immediate()) {
510 ScheduleWork();
511 } else {
512 controller_->SetNextDelayedDoWork(lazy_now, next_wake_up);
513 }
514 }
515
MaybeEmitTaskDetails(perfetto::EventContext & ctx,const SequencedTaskSource::SelectedTask & selected_task) const516 void SequenceManagerImpl::MaybeEmitTaskDetails(
517 perfetto::EventContext& ctx,
518 const SequencedTaskSource::SelectedTask& selected_task) const {
519 #if BUILDFLAG(ENABLE_BASE_TRACING)
520 // Other parameters are included only when "scheduler" category is enabled.
521 const uint8_t* scheduler_category_enabled =
522 TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("scheduler");
523
524 if (!*scheduler_category_enabled)
525 return;
526 auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
527 auto* sequence_manager_task = event->set_sequence_manager_task();
528 sequence_manager_task->set_priority(
529 settings().priority_settings.TaskPriorityToProto(selected_task.priority));
530 sequence_manager_task->set_queue_name(selected_task.task_queue_name);
531
532 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
533 }
534
535 absl::optional<SequenceManagerImpl::SelectedTask>
SelectNextTask(LazyNow & lazy_now,SelectTaskOption option)536 SequenceManagerImpl::SelectNextTask(LazyNow& lazy_now,
537 SelectTaskOption option) {
538 absl::optional<SelectedTask> selected_task =
539 SelectNextTaskImpl(lazy_now, option);
540
541 return selected_task;
542 }
543
544 #if DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
LogTaskDebugInfo(const WorkQueue * selected_work_queue) const545 void SequenceManagerImpl::LogTaskDebugInfo(
546 const WorkQueue* selected_work_queue) const {
547 const Task* task = selected_work_queue->GetFrontTask();
548 switch (settings_.task_execution_logging) {
549 case Settings::TaskLogging::kNone:
550 break;
551
552 case Settings::TaskLogging::kEnabled:
553 LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
554 << selected_work_queue->task_queue()->GetName()
555 << (task->cross_thread_ ? " Run crossthread " : " Run ")
556 << task->posted_from.ToString();
557 break;
558
559 case Settings::TaskLogging::kEnabledWithBacktrace: {
560 std::array<const void*, PendingTask::kTaskBacktraceLength + 1> task_trace;
561 task_trace[0] = task->posted_from.program_counter();
562 ranges::copy(task->task_backtrace, task_trace.begin() + 1);
563 size_t length = 0;
564 while (length < task_trace.size() && task_trace[length])
565 ++length;
566 if (length == 0)
567 break;
568 LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
569 << selected_work_queue->task_queue()->GetName()
570 << (task->cross_thread_ ? " Run crossthread " : " Run ")
571 << debug::StackTrace(task_trace.data(), length);
572 break;
573 }
574
575 case Settings::TaskLogging::kReorderedOnly: {
576 std::vector<const Task*> skipped_tasks;
577 main_thread_only().selector.CollectSkippedOverLowerPriorityTasks(
578 selected_work_queue, &skipped_tasks);
579
580 if (skipped_tasks.empty())
581 break;
582
583 LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
584 << selected_work_queue->task_queue()->GetName()
585 << (task->cross_thread_ ? " Run crossthread " : " Run ")
586 << task->posted_from.ToString();
587
588 for (const Task* skipped_task : skipped_tasks) {
589 LOG(INFO) << "# (skipped over) "
590 << static_cast<uint64_t>(skipped_task->enqueue_order()) << " "
591 << skipped_task->posted_from.ToString();
592 }
593 }
594 }
595 }
596 #endif // DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
597
598 absl::optional<SequenceManagerImpl::SelectedTask>
SelectNextTaskImpl(LazyNow & lazy_now,SelectTaskOption option)599 SequenceManagerImpl::SelectNextTaskImpl(LazyNow& lazy_now,
600 SelectTaskOption option) {
601 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
602 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
603 "SequenceManagerImpl::SelectNextTask");
604
605 ReloadEmptyWorkQueues();
606 MoveReadyDelayedTasksToWorkQueues(&lazy_now);
607
608 // If we sampled now, check if it's time to reclaim memory next time we go
609 // idle.
610 if (lazy_now.has_value() &&
611 lazy_now.Now() >= main_thread_only().next_time_to_reclaim_memory) {
612 main_thread_only().memory_reclaim_scheduled = true;
613 }
614
615 while (true) {
616 internal::WorkQueue* work_queue =
617 main_thread_only().selector.SelectWorkQueueToService(option);
618 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
619 TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
620 this,
621 AsValueWithSelectorResultForTracing(work_queue,
622 /* force_verbose */ false));
623
624 if (!work_queue)
625 return absl::nullopt;
626
627 // If the head task was canceled, remove it and run the selector again.
628 if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront()))
629 continue;
630
631 if (UNLIKELY(work_queue->GetFrontTask()->nestable ==
632 Nestable::kNonNestable &&
633 main_thread_only().nesting_depth > 0)) {
634 // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
635 // the additional delay should not be a problem.
636 // Note because we don't delete queues while nested, it's perfectly OK to
637 // store the raw pointer for |queue| here.
638 internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
639 work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
640 work_queue->queue_type()};
641 main_thread_only().non_nestable_task_queue.push_back(
642 std::move(deferred_task));
643 continue;
644 }
645
646 #if DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
647 LogTaskDebugInfo(work_queue);
648 #endif // DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
649
650 main_thread_only().task_execution_stack.emplace_back(
651 work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
652 InitializeTaskTiming(work_queue->task_queue()));
653
654 ExecutingTask& executing_task =
655 *main_thread_only().task_execution_stack.rbegin();
656 NotifyWillProcessTask(&executing_task, &lazy_now);
657
658 // Maybe invalidate the delayed task handle. If already invalidated, then
659 // don't run this task.
660 if (!executing_task.pending_task.WillRunTask()) {
661 executing_task.pending_task.task = DoNothing();
662 }
663
664 return SelectedTask(
665 executing_task.pending_task,
666 executing_task.task_queue->task_execution_trace_logger(),
667 executing_task.priority, executing_task.task_queue_name);
668 }
669 }
670
DidRunTask(LazyNow & lazy_now)671 void SequenceManagerImpl::DidRunTask(LazyNow& lazy_now) {
672 ExecutingTask& executing_task =
673 *main_thread_only().task_execution_stack.rbegin();
674
675 NotifyDidProcessTask(&executing_task, &lazy_now);
676 main_thread_only().task_execution_stack.pop_back();
677
678 if (main_thread_only().nesting_depth == 0)
679 CleanUpQueues();
680 }
681
RemoveAllCanceledDelayedTasksFromFront(LazyNow * lazy_now)682 void SequenceManagerImpl::RemoveAllCanceledDelayedTasksFromFront(
683 LazyNow* lazy_now) {
684 if (!g_no_wake_ups_for_canceled_tasks.load(std::memory_order_relaxed))
685 return;
686
687 main_thread_only().wake_up_queue->RemoveAllCanceledDelayedTasksFromFront(
688 lazy_now);
689 main_thread_only()
690 .non_waking_wake_up_queue->RemoveAllCanceledDelayedTasksFromFront(
691 lazy_now);
692 }
693
GetPendingWakeUp(LazyNow * lazy_now,SelectTaskOption option) const694 absl::optional<WakeUp> SequenceManagerImpl::GetPendingWakeUp(
695 LazyNow* lazy_now,
696 SelectTaskOption option) const {
697 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
698
699 if (auto priority =
700 main_thread_only().selector.GetHighestPendingPriority(option)) {
701 // If the selector has non-empty queues we trivially know there is immediate
702 // work to be done. However we may want to yield to native work if it is
703 // more important.
704 return WakeUp{};
705 }
706
707 // There may be some incoming immediate work which we haven't accounted for.
708 // NB ReloadEmptyWorkQueues involves a memory barrier, so it's fastest to not
709 // do this always.
710 ReloadEmptyWorkQueues();
711
712 if (auto priority =
713 main_thread_only().selector.GetHighestPendingPriority(option)) {
714 return WakeUp{};
715 }
716
717 // Otherwise we need to find the shortest delay, if any. NB we don't need to
718 // call MoveReadyDelayedTasksToWorkQueues because it's assumed
719 // DelayTillNextTask will return TimeDelta>() if the delayed task is due to
720 // run now.
721 return AdjustWakeUp(GetNextDelayedWakeUpWithOption(option), lazy_now);
722 }
723
GetNextDelayedWakeUp() const724 absl::optional<WakeUp> SequenceManagerImpl::GetNextDelayedWakeUp() const {
725 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
726 return main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
727 }
728
GetNextDelayedWakeUpWithOption(SelectTaskOption option) const729 absl::optional<WakeUp> SequenceManagerImpl::GetNextDelayedWakeUpWithOption(
730 SelectTaskOption option) const {
731 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
732
733 if (option == SelectTaskOption::kSkipDelayedTask)
734 return absl::nullopt;
735 return GetNextDelayedWakeUp();
736 }
737
AdjustWakeUp(absl::optional<WakeUp> wake_up,LazyNow * lazy_now) const738 absl::optional<WakeUp> SequenceManagerImpl::AdjustWakeUp(
739 absl::optional<WakeUp> wake_up,
740 LazyNow* lazy_now) const {
741 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
742 if (!wake_up)
743 return absl::nullopt;
744 // Overdue work needs to be run immediately.
745 if (lazy_now->Now() >= wake_up->earliest_time())
746 return WakeUp{};
747 // If |time_domain| is present, we don't want an actual OS level delayed wake
748 // up scheduled, so pretend we have no more work. This will result in
749 // appearing idle and |time_domain| will decide what to do in
750 // MaybeFastForwardToWakeUp().
751 if (main_thread_only().time_domain)
752 return absl::nullopt;
753 return *wake_up;
754 }
755
MaybeAddLeewayToTask(Task & task) const756 void SequenceManagerImpl::MaybeAddLeewayToTask(Task& task) const {
757 if (!main_thread_only().time_domain) {
758 task.leeway = GetTaskLeewayForCurrentThread();
759 }
760 }
761
762 // TODO(crbug/1267874): Rename once ExplicitHighResolutionTimerWin experiment is
763 // shipped.
HasPendingHighResolutionTasks()764 bool SequenceManagerImpl::HasPendingHighResolutionTasks() {
765 // Only consider high-res tasks in the |wake_up_queue| (ignore the
766 // |non_waking_wake_up_queue|).
767 #if BUILDFLAG(IS_WIN)
768 if (g_explicit_high_resolution_timer_win) {
769 absl::optional<WakeUp> wake_up =
770 main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
771 if (!wake_up)
772 return false;
773 // Under the kExplicitHighResolutionTimerWin experiment, rely on leeway
774 // being larger than the minimum time of a low resolution timer (16ms). This
775 // way, we don't need to activate the high resolution timer for precise
776 // tasks that will run in more than 16ms if there are non precise tasks in
777 // front of them.
778 DCHECK_GE(GetDefaultTaskLeeway(),
779 Milliseconds(Time::kMinLowResolutionThresholdMs));
780 return wake_up->delay_policy == subtle::DelayPolicy::kPrecise;
781 }
782 #endif // BUILDFLAG(IS_WIN)
783 return main_thread_only().wake_up_queue->has_pending_high_resolution_tasks();
784 }
785
OnSystemIdle()786 bool SequenceManagerImpl::OnSystemIdle() {
787 bool have_work_to_do = false;
788 if (main_thread_only().time_domain) {
789 auto wakeup = main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
790 have_work_to_do = main_thread_only().time_domain->MaybeFastForwardToWakeUp(
791 wakeup, controller_->ShouldQuitRunLoopWhenIdle());
792 }
793 if (!have_work_to_do) {
794 MaybeReclaimMemory();
795 if (main_thread_only().on_next_idle_callback)
796 std::move(main_thread_only().on_next_idle_callback).Run();
797 }
798 return have_work_to_do;
799 }
800
WillQueueTask(Task * pending_task)801 void SequenceManagerImpl::WillQueueTask(Task* pending_task) {
802 controller_->WillQueueTask(pending_task);
803 }
804
InitializeTaskTiming(internal::TaskQueueImpl * task_queue)805 TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
806 internal::TaskQueueImpl* task_queue) {
807 bool records_wall_time =
808 ShouldRecordTaskTiming(task_queue) == TimeRecordingPolicy::DoRecord;
809 bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
810 return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
811 }
812
ShouldRecordTaskTiming(const internal::TaskQueueImpl * task_queue)813 TimeRecordingPolicy SequenceManagerImpl::ShouldRecordTaskTiming(
814 const internal::TaskQueueImpl* task_queue) {
815 if (task_queue->RequiresTaskTiming())
816 return TimeRecordingPolicy::DoRecord;
817 if (main_thread_only().nesting_depth == 0 &&
818 !main_thread_only().task_time_observers.empty()) {
819 return TimeRecordingPolicy::DoRecord;
820 }
821 return TimeRecordingPolicy::DoNotRecord;
822 }
823
NotifyWillProcessTask(ExecutingTask * executing_task,LazyNow * time_before_task)824 void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
825 LazyNow* time_before_task) {
826 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
827 "SequenceManagerImpl::NotifyWillProcessTaskObservers");
828
829 RecordCrashKeys(executing_task->pending_task);
830
831 if (executing_task->task_queue->GetQuiescenceMonitored())
832 main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
833
834 TimeRecordingPolicy recording_policy =
835 ShouldRecordTaskTiming(executing_task->task_queue);
836 if (recording_policy == TimeRecordingPolicy::DoRecord)
837 executing_task->task_timing.RecordTaskStart(time_before_task);
838
839 if (!executing_task->task_queue->GetShouldNotifyObservers())
840 return;
841
842 const bool was_blocked_or_low_priority =
843 executing_task->task_queue->WasBlockedOrLowPriority(
844 executing_task->pending_task.enqueue_order());
845
846 {
847 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
848 "SequenceManager.WillProcessTaskObservers");
849 for (auto& observer : main_thread_only().task_observers) {
850 observer.WillProcessTask(executing_task->pending_task,
851 was_blocked_or_low_priority);
852 }
853 }
854
855 {
856 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
857 "SequenceManager.QueueNotifyWillProcessTask");
858 executing_task->task_queue->NotifyWillProcessTask(
859 executing_task->pending_task, was_blocked_or_low_priority);
860 }
861
862 if (recording_policy != TimeRecordingPolicy::DoRecord)
863 return;
864
865 if (main_thread_only().nesting_depth == 0) {
866 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
867 "SequenceManager.WillProcessTaskTimeObservers");
868 for (auto& observer : main_thread_only().task_time_observers)
869 observer.WillProcessTask(executing_task->task_timing.start_time());
870 }
871
872 {
873 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
874 "SequenceManager.QueueOnTaskStarted");
875 executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
876 executing_task->task_timing);
877 }
878 }
879
NotifyDidProcessTask(ExecutingTask * executing_task,LazyNow * time_after_task)880 void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
881 LazyNow* time_after_task) {
882 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
883 "SequenceManagerImpl::NotifyDidProcessTaskObservers");
884 if (!executing_task->task_queue->GetShouldNotifyObservers())
885 return;
886
887 TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
888
889 {
890 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
891 "SequenceManager.QueueOnTaskCompleted");
892 if (task_timing.has_wall_time()) {
893 executing_task->task_queue->OnTaskCompleted(
894 executing_task->pending_task, &task_timing, time_after_task);
895 }
896 }
897
898 bool has_valid_start =
899 task_timing.state() != TaskQueue::TaskTiming::State::NotStarted;
900 TimeRecordingPolicy recording_policy =
901 ShouldRecordTaskTiming(executing_task->task_queue);
902 // Record end time ASAP to avoid bias due to the overhead of observers.
903 if (recording_policy == TimeRecordingPolicy::DoRecord && has_valid_start) {
904 task_timing.RecordTaskEnd(time_after_task);
905 }
906
907 if (has_valid_start && task_timing.has_wall_time() &&
908 main_thread_only().nesting_depth == 0) {
909 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
910 "SequenceManager.DidProcessTaskTimeObservers");
911 for (auto& observer : main_thread_only().task_time_observers) {
912 observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
913 }
914 }
915
916 {
917 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
918 "SequenceManager.DidProcessTaskObservers");
919 for (auto& observer : main_thread_only().task_observers)
920 observer.DidProcessTask(executing_task->pending_task);
921 }
922
923 {
924 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
925 "SequenceManager.QueueNotifyDidProcessTask");
926 executing_task->task_queue->NotifyDidProcessTask(
927 executing_task->pending_task);
928 }
929
930 // TODO(altimin): Move this back to blink.
931 if (task_timing.has_wall_time() &&
932 recording_policy == TimeRecordingPolicy::DoRecord &&
933 task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
934 main_thread_only().nesting_depth == 0) {
935 TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
936 "duration", task_timing.wall_duration().InSecondsF());
937 }
938 }
939
SetWorkBatchSize(int work_batch_size)940 void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
941 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
942 DCHECK_GE(work_batch_size, 1);
943 controller_->SetWorkBatchSize(work_batch_size);
944 }
945
SetTimerSlack(TimerSlack timer_slack)946 void SequenceManagerImpl::SetTimerSlack(TimerSlack timer_slack) {
947 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
948 controller_->SetTimerSlack(timer_slack);
949 }
950
AddTaskObserver(TaskObserver * task_observer)951 void SequenceManagerImpl::AddTaskObserver(TaskObserver* task_observer) {
952 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
953 main_thread_only().task_observers.AddObserver(task_observer);
954 }
955
RemoveTaskObserver(TaskObserver * task_observer)956 void SequenceManagerImpl::RemoveTaskObserver(TaskObserver* task_observer) {
957 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
958 main_thread_only().task_observers.RemoveObserver(task_observer);
959 }
960
AddTaskTimeObserver(TaskTimeObserver * task_time_observer)961 void SequenceManagerImpl::AddTaskTimeObserver(
962 TaskTimeObserver* task_time_observer) {
963 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
964 main_thread_only().task_time_observers.AddObserver(task_time_observer);
965 }
966
RemoveTaskTimeObserver(TaskTimeObserver * task_time_observer)967 void SequenceManagerImpl::RemoveTaskTimeObserver(
968 TaskTimeObserver* task_time_observer) {
969 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
970 main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
971 }
972
GetAndClearSystemIsQuiescentBit()973 bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
974 bool task_was_run =
975 main_thread_only().task_was_run_on_quiescence_monitored_queue;
976 main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
977 return !task_was_run;
978 }
979
GetNextSequenceNumber()980 EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
981 return enqueue_order_generator_.GenerateNext();
982 }
983
984 std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResultForTracing(internal::WorkQueue * selected_work_queue,bool force_verbose) const985 SequenceManagerImpl::AsValueWithSelectorResultForTracing(
986 internal::WorkQueue* selected_work_queue,
987 bool force_verbose) const {
988 return std::make_unique<TracedBaseValue>(
989 Value(AsValueWithSelectorResult(selected_work_queue, force_verbose)));
990 }
991
AsValueWithSelectorResult(internal::WorkQueue * selected_work_queue,bool force_verbose) const992 Value::Dict SequenceManagerImpl::AsValueWithSelectorResult(
993 internal::WorkQueue* selected_work_queue,
994 bool force_verbose) const {
995 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
996 TimeTicks now = NowTicks();
997 Value::Dict state;
998 Value::List active_queues;
999 for (auto* const queue : main_thread_only().active_queues)
1000 active_queues.Append(queue->AsValue(now, force_verbose));
1001 state.Set("active_queues", std::move(active_queues));
1002 Value::List shutdown_queues;
1003 for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
1004 shutdown_queues.Append(pair.first->AsValue(now, force_verbose));
1005 state.Set("queues_to_gracefully_shutdown", std::move(shutdown_queues));
1006 Value::List queues_to_delete;
1007 for (const auto& pair : main_thread_only().queues_to_delete)
1008 queues_to_delete.Append(pair.first->AsValue(now, force_verbose));
1009 state.Set("queues_to_delete", std::move(queues_to_delete));
1010 state.Set("selector", main_thread_only().selector.AsValue());
1011 if (selected_work_queue) {
1012 state.Set("selected_queue", selected_work_queue->task_queue()->GetName());
1013 state.Set("work_queue_name", selected_work_queue->name());
1014 }
1015 state.Set("time_domain", main_thread_only().time_domain
1016 ? main_thread_only().time_domain->AsValue()
1017 : Value::Dict());
1018 state.Set("wake_up_queue", main_thread_only().wake_up_queue->AsValue(now));
1019 state.Set("non_waking_wake_up_queue",
1020 main_thread_only().non_waking_wake_up_queue->AsValue(now));
1021 return state;
1022 }
1023
OnTaskQueueEnabled(internal::TaskQueueImpl * queue)1024 void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
1025 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
1026 DCHECK(queue->IsQueueEnabled());
1027 // Only schedule DoWork if there's something to do.
1028 if (queue->HasTaskToRunImmediatelyOrReadyDelayedTask() &&
1029 !queue->BlockedByFence())
1030 ScheduleWork();
1031 }
1032
MaybeReclaimMemory()1033 void SequenceManagerImpl::MaybeReclaimMemory() {
1034 if (!main_thread_only().memory_reclaim_scheduled)
1035 return;
1036
1037 TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::MaybeReclaimMemory");
1038 ReclaimMemory();
1039
1040 // To avoid performance regressions we only want to do this every so often.
1041 main_thread_only().next_time_to_reclaim_memory =
1042 NowTicks() + kReclaimMemoryInterval;
1043 main_thread_only().memory_reclaim_scheduled = false;
1044 }
1045
ReclaimMemory()1046 void SequenceManagerImpl::ReclaimMemory() {
1047 LazyNow lazy_now(main_thread_clock());
1048 for (auto it = main_thread_only().active_queues.begin();
1049 it != main_thread_only().active_queues.end();) {
1050 auto* const queue = *it++;
1051 ReclaimMemoryFromQueue(queue, &lazy_now);
1052 }
1053 for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
1054 it != main_thread_only().queues_to_gracefully_shutdown.end();) {
1055 auto* const queue = it->first;
1056 it++;
1057 ReclaimMemoryFromQueue(queue, &lazy_now);
1058 }
1059 }
1060
CleanUpQueues()1061 void SequenceManagerImpl::CleanUpQueues() {
1062 for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
1063 it != main_thread_only().queues_to_gracefully_shutdown.end();) {
1064 if (it->first->IsEmpty()) {
1065 UnregisterTaskQueueImpl(std::move(it->second));
1066 main_thread_only().active_queues.erase(it->first);
1067 main_thread_only().queues_to_gracefully_shutdown.erase(it++);
1068 } else {
1069 ++it;
1070 }
1071 }
1072 main_thread_only().queues_to_delete.clear();
1073 }
1074
RemoveAllCanceledTasksFromFrontOfWorkQueues()1075 void SequenceManagerImpl::RemoveAllCanceledTasksFromFrontOfWorkQueues() {
1076 for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
1077 queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
1078 queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
1079 }
1080 }
1081
GetWeakPtr()1082 WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
1083 return weak_factory_.GetWeakPtr();
1084 }
1085
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1086 void SequenceManagerImpl::SetDefaultTaskRunner(
1087 scoped_refptr<SingleThreadTaskRunner> task_runner) {
1088 controller_->SetDefaultTaskRunner(task_runner);
1089 }
1090
GetTickClock() const1091 const TickClock* SequenceManagerImpl::GetTickClock() const {
1092 return any_thread_clock();
1093 }
1094
NowTicks() const1095 TimeTicks SequenceManagerImpl::NowTicks() const {
1096 return any_thread_clock()->NowTicks();
1097 }
1098
ShouldRecordCPUTimeForTask()1099 bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
1100 DCHECK(ThreadTicks::IsSupported() ||
1101 !metric_recording_settings_.records_cpu_time_for_some_tasks());
1102 return metric_recording_settings_.records_cpu_time_for_some_tasks() &&
1103 main_thread_only().metrics_subsampler->ShouldSample(
1104 metric_recording_settings_
1105 .task_sampling_rate_for_recording_cpu_time);
1106 }
1107
1108 const SequenceManager::MetricRecordingSettings&
GetMetricRecordingSettings() const1109 SequenceManagerImpl::GetMetricRecordingSettings() const {
1110 return metric_recording_settings_;
1111 }
1112
SetTaskExecutionAllowed(bool allowed)1113 void SequenceManagerImpl::SetTaskExecutionAllowed(bool allowed) {
1114 controller_->SetTaskExecutionAllowed(allowed);
1115 }
1116
IsTaskExecutionAllowed() const1117 bool SequenceManagerImpl::IsTaskExecutionAllowed() const {
1118 return controller_->IsTaskExecutionAllowed();
1119 }
1120
1121 #if BUILDFLAG(IS_IOS)
AttachToMessagePump()1122 void SequenceManagerImpl::AttachToMessagePump() {
1123 return controller_->AttachToMessagePump();
1124 }
1125 #endif
1126
IsIdleForTesting()1127 bool SequenceManagerImpl::IsIdleForTesting() {
1128 ReloadEmptyWorkQueues();
1129 RemoveAllCanceledTasksFromFrontOfWorkQueues();
1130 return !main_thread_only().selector.GetHighestPendingPriority().has_value();
1131 }
1132
EnableMessagePumpTimeKeeperMetrics(const char * thread_name)1133 void SequenceManagerImpl::EnableMessagePumpTimeKeeperMetrics(
1134 const char* thread_name) {
1135 controller_->EnableMessagePumpTimeKeeperMetrics(thread_name);
1136 }
1137
GetPendingTaskCountForTesting() const1138 size_t SequenceManagerImpl::GetPendingTaskCountForTesting() const {
1139 size_t total = 0;
1140 for (internal::TaskQueueImpl* task_queue : main_thread_only().active_queues) {
1141 total += task_queue->GetNumberOfPendingTasks();
1142 }
1143 return total;
1144 }
1145
CreateTaskQueue(const TaskQueue::Spec & spec)1146 scoped_refptr<TaskQueue> SequenceManagerImpl::CreateTaskQueue(
1147 const TaskQueue::Spec& spec) {
1148 return WrapRefCounted(new TaskQueue(CreateTaskQueueImpl(spec), spec));
1149 }
1150
DescribeAllPendingTasks() const1151 std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
1152 Value::Dict value =
1153 AsValueWithSelectorResult(nullptr, /* force_verbose */ true);
1154 std::string result;
1155 JSONWriter::Write(value, &result);
1156 return result;
1157 }
1158
PrioritizeYieldingToNative(base::TimeTicks prioritize_until)1159 void SequenceManagerImpl::PrioritizeYieldingToNative(
1160 base::TimeTicks prioritize_until) {
1161 controller_->PrioritizeYieldingToNative(prioritize_until);
1162 }
1163
AddDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1164 void SequenceManagerImpl::AddDestructionObserver(
1165 CurrentThread::DestructionObserver* destruction_observer) {
1166 main_thread_only().destruction_observers.AddObserver(destruction_observer);
1167 }
1168
RemoveDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1169 void SequenceManagerImpl::RemoveDestructionObserver(
1170 CurrentThread::DestructionObserver* destruction_observer) {
1171 main_thread_only().destruction_observers.RemoveObserver(destruction_observer);
1172 }
1173
RegisterOnNextIdleCallback(OnceClosure on_next_idle_callback)1174 void SequenceManagerImpl::RegisterOnNextIdleCallback(
1175 OnceClosure on_next_idle_callback) {
1176 DCHECK(!main_thread_only().on_next_idle_callback || !on_next_idle_callback);
1177 main_thread_only().on_next_idle_callback = std::move(on_next_idle_callback);
1178 }
1179
SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1180 void SequenceManagerImpl::SetTaskRunner(
1181 scoped_refptr<SingleThreadTaskRunner> task_runner) {
1182 controller_->SetDefaultTaskRunner(task_runner);
1183 }
1184
GetTaskRunner()1185 scoped_refptr<SingleThreadTaskRunner> SequenceManagerImpl::GetTaskRunner() {
1186 return controller_->GetDefaultTaskRunner();
1187 }
1188
IsBoundToCurrentThread() const1189 bool SequenceManagerImpl::IsBoundToCurrentThread() const {
1190 return associated_thread_->IsBoundToCurrentThread();
1191 }
1192
GetMessagePump() const1193 MessagePump* SequenceManagerImpl::GetMessagePump() const {
1194 return controller_->GetBoundMessagePump();
1195 }
1196
IsType(MessagePumpType type) const1197 bool SequenceManagerImpl::IsType(MessagePumpType type) const {
1198 return settings_.message_loop_type == type;
1199 }
1200
EnableCrashKeys(const char * async_stack_crash_key)1201 void SequenceManagerImpl::EnableCrashKeys(const char* async_stack_crash_key) {
1202 DCHECK(!main_thread_only().async_stack_crash_key);
1203 #if !BUILDFLAG(IS_NACL)
1204 main_thread_only().async_stack_crash_key = debug::AllocateCrashKeyString(
1205 async_stack_crash_key, debug::CrashKeySize::Size64);
1206 static_assert(sizeof(main_thread_only().async_stack_buffer) ==
1207 static_cast<size_t>(debug::CrashKeySize::Size64),
1208 "Async stack buffer size must match crash key size.");
1209 #endif // BUILDFLAG(IS_NACL)
1210 }
1211
RecordCrashKeys(const PendingTask & pending_task)1212 void SequenceManagerImpl::RecordCrashKeys(const PendingTask& pending_task) {
1213 #if !BUILDFLAG(IS_NACL)
1214 // SetCrashKeyString is a no-op even if the crash key is null, but we'd still
1215 // have construct the StringPiece that is passed in.
1216 if (!main_thread_only().async_stack_crash_key)
1217 return;
1218
1219 // Write the async stack trace onto a crash key as whitespace-delimited hex
1220 // addresses. These will be symbolized by the crash reporting system. With
1221 // 63 characters we can fit the address of the task that posted the current
1222 // task and its predecessor. Avoid HexEncode since it incurs a memory
1223 // allocation and snprintf because it's about 3.5x slower on Android this
1224 // this.
1225 //
1226 // See
1227 // https://chromium.googlesource.com/chromium/src/+/main/docs/debugging_with_crash_keys.md
1228 // for instructions for symbolizing these crash keys.
1229 //
1230 // TODO(skyostil): Find a way to extract the destination function address
1231 // from the task.
1232 size_t max_size = main_thread_only().async_stack_buffer.size();
1233 char* const buffer = &main_thread_only().async_stack_buffer[0];
1234 char* const buffer_end = &buffer[max_size - 1];
1235 char* pos = buffer_end;
1236 // Leave space for the NUL terminator.
1237 pos = PrependHexAddress(pos - 1, pending_task.task_backtrace[0]);
1238 *(--pos) = ' ';
1239 pos = PrependHexAddress(pos - 1, pending_task.posted_from.program_counter());
1240 DCHECK_GE(pos, buffer);
1241 debug::SetCrashKeyString(
1242 main_thread_only().async_stack_crash_key,
1243 StringPiece(pos, static_cast<size_t>(buffer_end - pos)));
1244 #endif // BUILDFLAG(IS_NACL)
1245 }
1246
currently_executing_task_queue() const1247 internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
1248 const {
1249 if (main_thread_only().task_execution_stack.empty())
1250 return nullptr;
1251 return main_thread_only().task_execution_stack.rbegin()->task_queue;
1252 }
1253
GetPriorityCount() const1254 TaskQueue::QueuePriority SequenceManagerImpl::GetPriorityCount() const {
1255 return settings().priority_settings.priority_count();
1256 }
1257
1258 constexpr TimeDelta SequenceManagerImpl::kReclaimMemoryInterval;
1259
1260 } // namespace internal
1261 } // namespace sequence_manager
1262 } // namespace base
1263