• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
6 #define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
7 
8 #include <stddef.h>
9 
10 #include <functional>
11 #include <memory>
12 #include <queue>
13 #include <set>
14 #include <utility>
15 #include <vector>
16 
17 #include "base/base_export.h"
18 #include "base/containers/flat_map.h"
19 #include "base/containers/intrusive_heap.h"
20 #include "base/dcheck_is_on.h"
21 #include "base/functional/callback.h"
22 #include "base/memory/raw_ptr.h"
23 #include "base/memory/scoped_refptr.h"
24 #include "base/memory/weak_ptr.h"
25 #include "base/observer_list.h"
26 #include "base/pending_task.h"
27 #include "base/task/common/checked_lock.h"
28 #include "base/task/common/operations_controller.h"
29 #include "base/task/sequence_manager/associated_thread_id.h"
30 #include "base/task/sequence_manager/atomic_flag_set.h"
31 #include "base/task/sequence_manager/enqueue_order.h"
32 #include "base/task/sequence_manager/fence.h"
33 #include "base/task/sequence_manager/lazily_deallocated_deque.h"
34 #include "base/task/sequence_manager/sequenced_task_source.h"
35 #include "base/task/sequence_manager/task_queue.h"
36 #include "base/threading/thread_checker.h"
37 #include "base/time/time_override.h"
38 #include "base/trace_event/base_tracing_forward.h"
39 #include "base/values.h"
40 #include "third_party/abseil-cpp/absl/types/optional.h"
41 
42 namespace base {
43 class LazyNow;
44 namespace sequence_manager::internal {
45 
46 class SequenceManagerImpl;
47 class WorkQueue;
48 class WorkQueueSets;
49 class WakeUpQueue;
50 
51 // TaskQueueImpl has four main queues:
52 //
53 // Immediate (non-delayed) tasks:
54 //    |immediate_incoming_queue| - PostTask enqueues tasks here.
55 //    |immediate_work_queue| - SequenceManager takes immediate tasks here.
56 //
57 // Delayed tasks
58 //    |delayed_incoming_queue| - PostDelayedTask enqueues tasks here.
59 //    |delayed_work_queue| - SequenceManager takes delayed tasks here.
60 //
61 // The |immediate_incoming_queue| can be accessed from any thread, the other
62 // queues are main-thread only. To reduce the overhead of locking,
63 // |immediate_work_queue| is swapped with |immediate_incoming_queue| when
64 // |immediate_work_queue| becomes empty.
65 //
66 // Delayed tasks are initially posted to |delayed_incoming_queue| and a wake-up
67 // is scheduled with the TimeDomain.  When the delay has elapsed, the TimeDomain
68 // calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
69 // |delayed_work_queue|. Note the EnqueueOrder (used for ordering) for a delayed
70 // task is not set until it's moved into the |delayed_work_queue|.
71 //
72 // TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
73 // prioritization. Task selection is done by the TaskQueueSelector and when a
74 // queue is selected, it round-robins between the |immediate_work_queue| and
75 // |delayed_work_queue|.  The reason for this is we want to make sure delayed
76 // tasks (normally the most common type) don't starve out immediate work.
77 class BASE_EXPORT TaskQueueImpl {
78  public:
79   // Initializes the state of all the task queue features. Must be invoked
80   // after FeatureList initialization and while Chrome is still single-threaded.
81   static void InitializeFeatures();
82 
83   // Sets the global cached state of the RemoveCanceledTasksInTaskQueue feature
84   // according to its enabled state. Must be invoked after FeatureList
85   // initialization.
86   static void ApplyRemoveCanceledTasksInTaskQueue();
87 
88   // Resets the global cached state of the RemoveCanceledTasksInTaskQueue
89   // feature according to its default state.
90   static void ResetRemoveCanceledTasksInTaskQueueForTesting();
91 
92   TaskQueueImpl(SequenceManagerImpl* sequence_manager,
93                 WakeUpQueue* wake_up_queue,
94                 const TaskQueue::Spec& spec);
95 
96   TaskQueueImpl(const TaskQueueImpl&) = delete;
97   TaskQueueImpl& operator=(const TaskQueueImpl&) = delete;
98   ~TaskQueueImpl();
99 
100   // Types of queues TaskQueueImpl is maintaining internally.
101   enum class WorkQueueType { kImmediate, kDelayed };
102 
103   // Some methods have fast paths when on the main thread.
104   enum class CurrentThread { kMainThread, kNotMainThread };
105 
106   // Non-nestable tasks may get deferred but such queue is being maintained on
107   // SequenceManager side, so we need to keep information how to requeue it.
108   struct DeferredNonNestableTask {
109     Task task;
110 
111     // `task_queue` is not a raw_ptr<...> for performance reasons (based on
112     // analysis of sampling profiler data and tab_search:top100:2020).
113     RAW_PTR_EXCLUSION internal::TaskQueueImpl* task_queue;
114 
115     WorkQueueType work_queue_type;
116   };
117 
118   using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
119   using OnTaskStartedHandler =
120       RepeatingCallback<void(const Task&, const TaskQueue::TaskTiming&)>;
121   using OnTaskCompletedHandler =
122       RepeatingCallback<void(const Task&, TaskQueue::TaskTiming*, LazyNow*)>;
123   using OnTaskPostedHandler = RepeatingCallback<void(const Task&)>;
124   using TaskExecutionTraceLogger =
125       RepeatingCallback<void(perfetto::EventContext&, const Task&)>;
126 
127   // May be called from any thread.
128   scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
129       TaskType task_type) const;
130 
131   // TaskQueue implementation.
132   const char* GetName() const;
133   QueueName GetProtoName() const;
134   bool IsQueueEnabled() const;
135   void SetQueueEnabled(bool enabled);
136   void SetShouldReportPostedTasksWhenDisabled(bool should_report);
137   bool IsEmpty() const;
138   size_t GetNumberOfPendingTasks() const;
139   bool HasTaskToRunImmediatelyOrReadyDelayedTask() const;
140   absl::optional<WakeUp> GetNextDesiredWakeUp();
141   void SetQueuePriority(TaskQueue::QueuePriority priority);
142   TaskQueue::QueuePriority GetQueuePriority() const;
143   void AddTaskObserver(TaskObserver* task_observer);
144   void RemoveTaskObserver(TaskObserver* task_observer);
145   void InsertFence(TaskQueue::InsertFencePosition position);
146   void InsertFenceAt(TimeTicks time);
147   void RemoveFence();
148   bool HasActiveFence();
149   bool BlockedByFence() const;
150   void SetThrottler(TaskQueue::Throttler* throttler);
151   void ResetThrottler();
152 
153   void UnregisterTaskQueue();
154 
155   // Returns true if a (potentially hypothetical) task with the specified
156   // |enqueue_order| could run on the queue. Must be called from the main
157   // thread.
158   bool CouldTaskRun(EnqueueOrder enqueue_order) const;
159 
160   // Returns true if a task with |enqueue_order| obtained from this queue was
161   // ever in the queue while it was disabled, blocked by a fence, or less
162   // important than kNormalPriority.
163   bool WasBlockedOrLowPriority(EnqueueOrder enqueue_order) const;
164 
165   // Must only be called from the thread this task queue was created on.
166   void ReloadEmptyImmediateWorkQueue();
167 
168   Value::Dict AsValue(TimeTicks now, bool force_verbose) const;
169 
GetQuiescenceMonitored()170   bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
GetShouldNotifyObservers()171   bool GetShouldNotifyObservers() const { return should_notify_observers_; }
172 
173   void NotifyWillProcessTask(const Task& task,
174                              bool was_blocked_or_low_priority);
175   void NotifyDidProcessTask(const Task& task);
176 
177   // Returns true iff this queue has work that can execute now, i.e. immediate
178   // tasks or delayed tasks that have been transferred to the work queue by
179   // MoveReadyDelayedTasksToWorkQueue(). Delayed tasks that are still in the
180   // incoming queue are not taken into account. Ignores the queue's enabled
181   // state and fences.
182   bool HasTaskToRunImmediately() const;
183   bool HasTaskToRunImmediatelyLocked() const
184       EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
185 
has_pending_high_resolution_tasks()186   bool has_pending_high_resolution_tasks() const {
187     return main_thread_only()
188         .delayed_incoming_queue.has_pending_high_resolution_tasks();
189   }
190 
delayed_work_queue()191   WorkQueue* delayed_work_queue() {
192     return main_thread_only().delayed_work_queue.get();
193   }
194 
delayed_work_queue()195   const WorkQueue* delayed_work_queue() const {
196     return main_thread_only().delayed_work_queue.get();
197   }
198 
immediate_work_queue()199   WorkQueue* immediate_work_queue() {
200     return main_thread_only().immediate_work_queue.get();
201   }
202 
immediate_work_queue()203   const WorkQueue* immediate_work_queue() const {
204     return main_thread_only().immediate_work_queue.get();
205   }
206 
task_execution_trace_logger()207   TaskExecutionTraceLogger task_execution_trace_logger() const {
208     return main_thread_only().task_execution_trace_logger;
209   }
210 
211   // Removes all canceled tasks from the front of the delayed incoming queue.
212   // After calling this, GetNextDesiredWakeUp() is guaranteed to return a time
213   // for a non-canceled task, if one exists. Return true if a canceled task was
214   // removed.
215   bool RemoveAllCanceledDelayedTasksFromFront(LazyNow* lazy_now);
216 
217   // Enqueues in `delayed_work_queue` all delayed tasks which must run now
218   // (cannot be postponed) and possibly some delayed tasks which can run now but
219   // could be postponed (due to how tasks are stored, it is not possible to
220   // retrieve all such tasks efficiently). Must be called from the main thread.
221   void MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now,
222                                         EnqueueOrder enqueue_order);
223 
224   void OnWakeUp(LazyNow* lazy_now, EnqueueOrder enqueue_order);
225 
wake_up_queue()226   const WakeUpQueue* wake_up_queue() const {
227     return main_thread_only().wake_up_queue;
228   }
229 
heap_handle()230   HeapHandle heap_handle() const { return main_thread_only().heap_handle; }
231 
set_heap_handle(HeapHandle heap_handle)232   void set_heap_handle(HeapHandle heap_handle) {
233     main_thread_only().heap_handle = heap_handle;
234   }
235 
236   // Pushes |task| onto the front of the specified work queue. Caution must be
237   // taken with this API because you could easily starve out other work.
238   // TODO(kraynov): Simplify non-nestable task logic https://crbug.com/845437.
239   void RequeueDeferredNonNestableTask(DeferredNonNestableTask task);
240 
241   void PushImmediateIncomingTaskForTest(Task task);
242 
243   // Iterates over |delayed_incoming_queue| removing canceled tasks. In
244   // addition MaybeShrinkQueue is called on all internal queues.
245   void ReclaimMemory(TimeTicks now);
246 
247   // Allows wrapping TaskQueue to set a handler to subscribe for notifications
248   // about started and completed tasks.
249   void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
250   void OnTaskStarted(const Task& task,
251                      const TaskQueue::TaskTiming& task_timing);
252 
253   // |task_timing| may be passed in Running state and may not have the end time,
254   // so that the handler can run an additional task that is counted as a part of
255   // the main task.
256   // The handler can call TaskTiming::RecordTaskEnd, which is optional, to
257   // finalize the task, and use the resulting timing.
258   void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
259   void OnTaskCompleted(const Task& task,
260                        TaskQueue::TaskTiming* task_timing,
261                        LazyNow* lazy_now);
262   bool RequiresTaskTiming() const;
263 
264   // Add a callback for adding custom functionality for processing posted task.
265   // Callback will be dispatched while holding a scheduler lock. As a result,
266   // callback should not call scheduler APIs directly, as this can lead to
267   // deadlocks. For example, PostTask should not be called directly and
268   // ScopedDeferTaskPosting::PostOrDefer should be used instead. `handler` must
269   // not be a null callback.
270   [[nodiscard]] std::unique_ptr<TaskQueue::OnTaskPostedCallbackHandle>
271   AddOnTaskPostedHandler(OnTaskPostedHandler handler);
272 
273   // Set a callback to fill trace event arguments associated with the task
274   // execution.
275   void SetTaskExecutionTraceLogger(TaskExecutionTraceLogger logger);
276 
277   WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
278 
sequence_manager()279   SequenceManagerImpl* sequence_manager() const { return sequence_manager_; }
280 
281   // Returns true if this queue is unregistered or task queue manager is deleted
282   // and this queue can be safely deleted on any thread.
283   bool IsUnregistered() const;
284 
285   // Updates this queue's next wake up time in the time domain,
286   // taking into account the desired run time of queued tasks and
287   // policies enforced by the Throttler.
288   void UpdateWakeUp(LazyNow* lazy_now);
289 
290  protected:
291   // Sets this queue's next wake up time to |wake_up| in the time domain.
292   void SetNextWakeUp(LazyNow* lazy_now, absl::optional<WakeUp> wake_up);
293 
294  private:
295   friend class WorkQueue;
296   friend class WorkQueueTest;
297   friend class DelayedTaskHandleDelegate;
298 
299   // A TaskQueueImpl instance can be destroyed or unregistered before all its
300   // associated TaskRunner instances are (they are refcounted). Thus we need a
301   // way to prevent TaskRunner instances from posting further tasks. This class
302   // guards PostTask calls using an OperationsController.
303   // This class is ref-counted as both the TaskQueueImpl instance and all
304   // associated TaskRunner instances share the same GuardedTaskPoster instance.
305   // When TaskQueueImpl shuts down it calls ShutdownAndWaitForZeroOperations(),
306   // preventing further PostTask calls being made to the underlying
307   // TaskQueueImpl.
308   class GuardedTaskPoster : public RefCountedThreadSafe<GuardedTaskPoster> {
309    public:
310     explicit GuardedTaskPoster(TaskQueueImpl* outer);
311 
312     bool PostTask(PostedTask task);
313     DelayedTaskHandle PostCancelableTask(PostedTask task);
314 
StartAcceptingOperations()315     void StartAcceptingOperations() {
316       operations_controller_.StartAcceptingOperations();
317     }
318 
ShutdownAndWaitForZeroOperations()319     void ShutdownAndWaitForZeroOperations() {
320       operations_controller_.ShutdownAndWaitForZeroOperations();
321       // `operations_controller_` won't let any more operations here, and
322       // `outer_` might get destroyed before `this` does, so clearing `outer_`
323       // avoids a potential dangling pointer.
324       outer_ = nullptr;
325     }
326 
327    private:
328     friend class RefCountedThreadSafe<GuardedTaskPoster>;
329 
330     ~GuardedTaskPoster();
331 
332     base::internal::OperationsController operations_controller_;
333     // Pointer might be stale, access guarded by |operations_controller_|
334     raw_ptr<TaskQueueImpl> outer_;
335   };
336 
337   class TaskRunner final : public SingleThreadTaskRunner {
338    public:
339     explicit TaskRunner(
340         scoped_refptr<GuardedTaskPoster> task_poster,
341         scoped_refptr<const AssociatedThreadId> associated_thread,
342         TaskType task_type);
343 
344     bool PostDelayedTask(const Location& location,
345                          OnceClosure callback,
346                          TimeDelta delay) final;
347     bool PostDelayedTaskAt(subtle::PostDelayedTaskPassKey,
348                            const Location& location,
349                            OnceClosure callback,
350                            TimeTicks delayed_run_time,
351                            base::subtle::DelayPolicy delay_policy) final;
352     DelayedTaskHandle PostCancelableDelayedTaskAt(
353         subtle::PostDelayedTaskPassKey,
354         const Location& location,
355         OnceClosure callback,
356         TimeTicks delayed_run_time,
357         base::subtle::DelayPolicy delay_policy) final;
358     DelayedTaskHandle PostCancelableDelayedTask(subtle::PostDelayedTaskPassKey,
359                                                 const Location& location,
360                                                 OnceClosure callback,
361                                                 TimeDelta delay) final;
362     bool PostNonNestableDelayedTask(const Location& location,
363                                     OnceClosure callback,
364                                     TimeDelta delay) final;
365     bool RunsTasksInCurrentSequence() const final;
366 
367    private:
368     ~TaskRunner() final;
369 
370     const scoped_refptr<GuardedTaskPoster> task_poster_;
371     const scoped_refptr<const AssociatedThreadId> associated_thread_;
372     const TaskType task_type_;
373   };
374 
375   class OnTaskPostedCallbackHandleImpl
376       : public TaskQueue::OnTaskPostedCallbackHandle {
377    public:
378     OnTaskPostedCallbackHandleImpl(
379         TaskQueueImpl* task_queue_impl,
380         scoped_refptr<const AssociatedThreadId> associated_thread_);
381     ~OnTaskPostedCallbackHandleImpl() override;
382 
383     // Callback handles can outlive the associated TaskQueueImpl, so the
384     // reference needs to be cleared when the queue is unregistered.
UnregisterTaskQueue()385     void UnregisterTaskQueue() { task_queue_impl_ = nullptr; }
386 
387    private:
388     raw_ptr<TaskQueueImpl> task_queue_impl_;
389     const scoped_refptr<const AssociatedThreadId> associated_thread_;
390   };
391 
392   // A queue for holding delayed tasks before their delay has expired.
393   struct DelayedIncomingQueue {
394    public:
395     DelayedIncomingQueue();
396     DelayedIncomingQueue(const DelayedIncomingQueue&) = delete;
397     DelayedIncomingQueue& operator=(const DelayedIncomingQueue&) = delete;
398     ~DelayedIncomingQueue();
399 
400     void push(Task task);
401     void remove(HeapHandle heap_handle);
402     Task take_top();
emptyDelayedIncomingQueue403     bool empty() const { return queue_.empty(); }
sizeDelayedIncomingQueue404     size_t size() const { return queue_.size(); }
topDelayedIncomingQueue405     const Task& top() const { return queue_.top(); }
406     void swap(DelayedIncomingQueue* other);
407 
has_pending_high_resolution_tasksDelayedIncomingQueue408     bool has_pending_high_resolution_tasks() const {
409       return pending_high_res_tasks_;
410     }
411 
412     // TODO(crbug.com/1155905): we pass SequenceManager to be able to record
413     // crash keys. Remove this parameter after chasing down this crash.
414     void SweepCancelledTasks(SequenceManagerImpl* sequence_manager);
415     Value::List AsValue(TimeTicks now) const;
416 
417    private:
418     struct Compare {
419       bool operator()(const Task& lhs, const Task& rhs) const;
420     };
421     IntrusiveHeap<Task, Compare> queue_;
422 
423     // Number of pending tasks in the queue that need high resolution timing.
424     int pending_high_res_tasks_ = 0;
425   };
426 
427   struct MainThreadOnly {
428     MainThreadOnly(TaskQueueImpl* task_queue, WakeUpQueue* wake_up_queue);
429     ~MainThreadOnly();
430 
431     raw_ptr<WakeUpQueue> wake_up_queue;
432 
433     raw_ptr<TaskQueue::Throttler> throttler = nullptr;
434 
435     std::unique_ptr<WorkQueue> delayed_work_queue;
436     std::unique_ptr<WorkQueue> immediate_work_queue;
437     DelayedIncomingQueue delayed_incoming_queue;
438     ObserverList<TaskObserver>::Unchecked task_observers;
439     HeapHandle heap_handle;
440     bool is_enabled = true;
441     absl::optional<Fence> current_fence;
442     absl::optional<TimeTicks> delayed_fence;
443     // Snapshots the next sequence number when the queue is unblocked, otherwise
444     // it contains EnqueueOrder::none(). If the EnqueueOrder of a task just
445     // popped from this queue is greater than this, it means that the queue was
446     // never disabled or blocked by a fence while the task was queued.
447     EnqueueOrder enqueue_order_at_which_we_became_unblocked;
448     // If the EnqueueOrder of a task just popped from this queue is greater than
449     // this, it means that the queue was never disabled, blocked by a fence or
450     // less important than kNormalPriority while the task was queued.
451     //
452     // Implementation details:
453     // 1) When the queue is made less important than kNormalPriority, this is
454     //    set to EnqueueOrder::max(). The EnqueueOrder of any task will compare
455     //    less than this.
456     // 2) When the queue is made at least as important as kNormalPriority, this
457     //    snapshots the next sequence number. If the queue is blocked, the value
458     //    is irrelevant because no task should be popped. If the queue is not
459     //    blocked, the EnqueueOrder of any already queued task will compare less
460     //    than this.
461     // 3) When the queue is unblocked while at least as important as
462     //    kNormalPriority, this snapshots the next sequence number. The
463     //    EnqueueOrder of any already queued task will compare less than this.
464     //
465     // TODO(crbug.com/1249857): Change this to use `TaskOrder`.
466     EnqueueOrder
467         enqueue_order_at_which_we_became_unblocked_with_normal_priority;
468     OnTaskStartedHandler on_task_started_handler;
469     OnTaskCompletedHandler on_task_completed_handler;
470     TaskExecutionTraceLogger task_execution_trace_logger;
471     // Last reported wake up, used only in UpdateWakeUp to avoid
472     // excessive calls.
473     absl::optional<WakeUp> scheduled_wake_up;
474     // If false, queue will be disabled. Used only for tests.
475     bool is_enabled_for_test = true;
476     // The time at which the task queue was disabled, if it is currently
477     // disabled.
478     absl::optional<TimeTicks> disabled_time;
479     // Whether or not the task queue should emit tracing events for tasks
480     // posted to this queue when it is disabled.
481     bool should_report_posted_tasks_when_disabled = false;
482   };
483 
484   void PostTask(PostedTask task);
485   void RemoveCancelableTask(HeapHandle heap_handle);
486 
487   void PostImmediateTaskImpl(PostedTask task, CurrentThread current_thread);
488   void PostDelayedTaskImpl(PostedTask task, CurrentThread current_thread);
489 
490   // Push the task onto the |delayed_incoming_queue|. Lock-free main thread
491   // only fast path.
492   void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
493                                                   LazyNow* lazy_now,
494                                                   bool notify_task_annotator);
495 
496   // Push the task onto the |delayed_incoming_queue|.  Slow path from other
497   // threads.
498   void PushOntoDelayedIncomingQueue(Task pending_task);
499 
500   void ScheduleDelayedWorkTask(Task pending_task);
501 
502   void MoveReadyImmediateTasksToImmediateWorkQueueLocked()
503       EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
504 
505   // Records the delay for some tasks in the main thread and the size of the
506   // |delayed_incoming_queue| pseudorandomly in a histogram.
507   void RecordQueuingDelayedTaskMetrics(const Task& pending_task,
508                                        LazyNow* lazy_now);
509 
510   // LazilyDeallocatedDeque use TimeTicks to figure out when to resize.  We
511   // should use real time here always.
512   using TaskDeque =
513       LazilyDeallocatedDeque<Task, subtle::TimeTicksNowIgnoringOverride>;
514 
515   // Extracts all the tasks from the immediate incoming queue and swaps it with
516   // |queue| which must be empty.
517   // Can be called from any thread.
518   void TakeImmediateIncomingQueueTasks(TaskDeque* queue);
519 
520   void TraceQueueSize() const;
521   static Value::List QueueAsValue(const TaskDeque& queue, TimeTicks now);
522   static Value::Dict TaskAsValue(const Task& task, TimeTicks now);
523 
524   // Returns a Task representation for `delayed_task`.
525   Task MakeDelayedTask(PostedTask delayed_task, LazyNow* lazy_now) const;
526 
527   // Activate a delayed fence if a time has come based on `task`'s delayed run
528   // time.
529   void ActivateDelayedFenceIfNeeded(const Task& task);
530 
531   // Updates state protected by any_thread_lock_.
532   void UpdateCrossThreadQueueStateLocked()
533       EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
534 
535   TimeDelta GetTaskDelayAdjustment(CurrentThread current_thread);
536 
537   // Reports the task if it was due to IPC and was posted to a disabled queue.
538   // This should be called after WillQueueTask has been called for the task.
539   void MaybeReportIpcTaskQueuedFromMainThread(const Task& pending_task);
540   bool ShouldReportIpcTaskQueuedFromAnyThreadLocked(
541       base::TimeDelta* time_since_disabled)
542       EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
543   void MaybeReportIpcTaskQueuedFromAnyThreadLocked(const Task& pending_task)
544       EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
545   void MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(const Task& pending_task);
546   void ReportIpcTaskQueued(const Task& pending_task,
547                            const base::TimeDelta& time_since_disabled);
548 
549   // Invoked when the queue becomes enabled and not blocked by a fence.
550   void OnQueueUnblocked();
551 
552   void InsertFence(Fence fence);
553 
554   void RemoveOnTaskPostedHandler(
555       OnTaskPostedCallbackHandleImpl* on_task_posted_callback_handle);
556 
557   TaskQueue::QueuePriority DefaultPriority() const;
558 
559   QueueName name_;
560   const raw_ptr<SequenceManagerImpl, DanglingUntriaged> sequence_manager_;
561 
562   const scoped_refptr<const AssociatedThreadId> associated_thread_;
563 
564   const scoped_refptr<GuardedTaskPoster> task_poster_;
565 
566   mutable base::internal::CheckedLock any_thread_lock_;
567 
568   struct AnyThread {
569     // Mirrored from MainThreadOnly. These are only used for tracing.
570     struct TracingOnly {
571       TracingOnly();
572       ~TracingOnly();
573 
574       bool is_enabled = true;
575       absl::optional<TimeTicks> disabled_time;
576       bool should_report_posted_tasks_when_disabled = false;
577     };
578 
579     AnyThread();
580     ~AnyThread();
581 
582     TaskDeque immediate_incoming_queue;
583 
584     // True if main_thread_only().immediate_work_queue is empty.
585     bool immediate_work_queue_empty = true;
586 
587     bool post_immediate_task_should_schedule_work = true;
588 
589     bool unregistered = false;
590 
591     base::flat_map<raw_ptr<OnTaskPostedCallbackHandleImpl>, OnTaskPostedHandler>
592         on_task_posted_handlers;
593 
594 #if DCHECK_IS_ON()
595     // A cache of |immediate_work_queue->work_queue_set_index()| which is used
596     // to index into
597     // SequenceManager::Settings::per_priority_cross_thread_task_delay to apply
598     // a priority specific delay for debugging purposes.
599     size_t queue_set_index = 0;
600 #endif
601 
602     TracingOnly tracing_only;
603   };
604 
605   AnyThread any_thread_ GUARDED_BY(any_thread_lock_);
606 
607   MainThreadOnly main_thread_only_;
main_thread_only()608   MainThreadOnly& main_thread_only() {
609     DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
610     return main_thread_only_;
611   }
main_thread_only()612   const MainThreadOnly& main_thread_only() const {
613     DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
614     return main_thread_only_;
615   }
616 
617   // Handle to our entry within the SequenceManagers |empty_queues_to_reload_|
618   // atomic flag set. Used to signal that this queue needs to be reloaded.
619   // If you call SetActive(false) you should do so inside |any_thread_lock_|
620   // because there is a danger a cross thread PostTask might reset it before we
621   // make |immediate_work_queue| non-empty.
622   AtomicFlagSet::AtomicFlag empty_queues_to_reload_handle_;
623 
624   const bool should_monitor_quiescence_;
625   const bool should_notify_observers_;
626   const bool delayed_fence_allowed_;
627 };
628 
629 }  // namespace sequence_manager::internal
630 }  // namespace base
631 
632 #endif  // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
633