1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task/sequence_manager/work_queue.h"
6
7 #include <optional>
8
9 #include "base/debug/alias.h"
10 #include "base/task/common/task_annotator.h"
11 #include "base/task/sequence_manager/fence.h"
12 #include "base/task/sequence_manager/sequence_manager_impl.h"
13 #include "base/task/sequence_manager/task_order.h"
14 #include "base/task/sequence_manager/work_queue_sets.h"
15 #include "build/build_config.h"
16 #include "third_party/abseil-cpp/absl/cleanup/cleanup.h"
17 #include "third_party/abseil-cpp/absl/container/inlined_vector.h"
18
19 namespace base {
20 namespace sequence_manager {
21 namespace internal {
22
WorkQueue(TaskQueueImpl * task_queue,const char * name,QueueType queue_type)23 WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
24 const char* name,
25 QueueType queue_type)
26 : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
27
AsValue(TimeTicks now) const28 Value::List WorkQueue::AsValue(TimeTicks now) const {
29 Value::List state;
30 for (const Task& task : tasks_)
31 state.Append(TaskQueueImpl::TaskAsValue(task, now));
32 return state;
33 }
34
~WorkQueue()35 WorkQueue::~WorkQueue() {
36 DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
37 << work_queue_sets_->GetName() << " : " << name_;
38 }
39
GetFrontTask() const40 const Task* WorkQueue::GetFrontTask() const {
41 if (tasks_.empty())
42 return nullptr;
43 return &tasks_.front();
44 }
45
GetBackTask() const46 const Task* WorkQueue::GetBackTask() const {
47 if (tasks_.empty())
48 return nullptr;
49 return &tasks_.back();
50 }
51
BlockedByFence() const52 bool WorkQueue::BlockedByFence() const {
53 if (!fence_)
54 return false;
55
56 // If the queue is empty then any future tasks will have a higher enqueue
57 // order and will be blocked. The queue is also blocked if the head is past
58 // the fence.
59 return tasks_.empty() || tasks_.front().task_order() >= fence_->task_order();
60 }
61
GetFrontTaskOrder() const62 std::optional<TaskOrder> WorkQueue::GetFrontTaskOrder() const {
63 if (tasks_.empty() || BlockedByFence())
64 return std::nullopt;
65 // Quick sanity check.
66 DCHECK(tasks_.front().task_order() <= tasks_.back().task_order())
67 << task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
68 << name_;
69 return tasks_.front().task_order();
70 }
71
Push(Task task)72 void WorkQueue::Push(Task task) {
73 bool was_empty = tasks_.empty();
74 #ifndef NDEBUG
75 DCHECK(task.enqueue_order_set());
76 #endif
77
78 // Make sure the task order is strictly increasing.
79 DCHECK(was_empty || tasks_.back().task_order() < task.task_order());
80 // Make sure enqueue order is strictly increasing for immediate queues and
81 // monotonically increasing for delayed queues.
82 DCHECK(was_empty || tasks_.back().enqueue_order() < task.enqueue_order() ||
83 (queue_type_ == QueueType::kDelayed &&
84 tasks_.back().enqueue_order() == task.enqueue_order()));
85
86 // Amortized O(1).
87 tasks_.push_back(std::move(task));
88
89 if (!was_empty)
90 return;
91
92 // If we hit the fence, pretend to WorkQueueSets that we're empty.
93 if (work_queue_sets_ && !BlockedByFence())
94 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
95 }
96
TaskPusher(WorkQueue * work_queue)97 WorkQueue::TaskPusher::TaskPusher(WorkQueue* work_queue)
98 : work_queue_(work_queue), was_empty_(work_queue->Empty()) {}
99
TaskPusher(TaskPusher && other)100 WorkQueue::TaskPusher::TaskPusher(TaskPusher&& other)
101 : work_queue_(other.work_queue_), was_empty_(other.was_empty_) {
102 other.work_queue_ = nullptr;
103 }
104
Push(Task task)105 void WorkQueue::TaskPusher::Push(Task task) {
106 DCHECK(work_queue_);
107
108 #ifndef NDEBUG
109 DCHECK(task.enqueue_order_set());
110 #endif
111
112 // Make sure the task order is strictly increasing.
113 DCHECK(work_queue_->tasks_.empty() ||
114 work_queue_->tasks_.back().task_order() < task.task_order());
115 // Make sure enqueue order is strictly increasing for immediate queues and
116 // monotonically increasing for delayed queues.
117 DCHECK(work_queue_->tasks_.empty() ||
118 work_queue_->tasks_.back().enqueue_order() < task.enqueue_order() ||
119 (work_queue_->queue_type_ == QueueType::kDelayed &&
120 work_queue_->tasks_.back().enqueue_order() == task.enqueue_order()));
121
122 // Amortized O(1).
123 work_queue_->tasks_.push_back(std::move(task));
124 }
125
~TaskPusher()126 WorkQueue::TaskPusher::~TaskPusher() {
127 // If |work_queue_| became non empty and it isn't blocked by a fence then we
128 // must notify |work_queue_->work_queue_sets_|.
129 if (was_empty_ && work_queue_ && !work_queue_->Empty() &&
130 work_queue_->work_queue_sets_ && !work_queue_->BlockedByFence()) {
131 work_queue_->work_queue_sets_->OnTaskPushedToEmptyQueue(work_queue_);
132 }
133 }
134
CreateTaskPusher()135 WorkQueue::TaskPusher WorkQueue::CreateTaskPusher() {
136 return TaskPusher(this);
137 }
138
PushNonNestableTaskToFront(Task task)139 void WorkQueue::PushNonNestableTaskToFront(Task task) {
140 DCHECK(task.nestable == Nestable::kNonNestable);
141
142 bool was_empty = tasks_.empty();
143 bool was_blocked = BlockedByFence();
144 #ifndef NDEBUG
145 DCHECK(task.enqueue_order_set());
146 #endif
147
148 if (!was_empty) {
149 // Make sure the task order is strictly increasing.
150 DCHECK(task.task_order() < tasks_.front().task_order())
151 << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
152 << " : " << name_;
153 // Make sure the enqueue order is strictly increasing for immediate queues
154 // and monotonically increasing for delayed queues.
155 DCHECK(task.enqueue_order() < tasks_.front().enqueue_order() ||
156 (queue_type_ == QueueType::kDelayed &&
157 task.enqueue_order() == tasks_.front().enqueue_order()))
158 << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
159 << " : " << name_;
160 }
161
162 // Amortized O(1).
163 tasks_.push_front(std::move(task));
164
165 if (!work_queue_sets_)
166 return;
167
168 // Pretend to WorkQueueSets that nothing has changed if we're blocked.
169 if (BlockedByFence())
170 return;
171
172 // Pushing task to front may unblock the fence.
173 if (was_empty || was_blocked) {
174 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
175 } else {
176 work_queue_sets_->OnQueuesFrontTaskChanged(this);
177 }
178 }
179
TakeImmediateIncomingQueueTasks()180 void WorkQueue::TakeImmediateIncomingQueueTasks() {
181 DCHECK(tasks_.empty());
182
183 task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
184 if (tasks_.empty())
185 return;
186
187 // If we hit the fence, pretend to WorkQueueSets that we're empty.
188 if (work_queue_sets_ && !BlockedByFence())
189 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
190 }
191
TakeTaskFromWorkQueue()192 Task WorkQueue::TakeTaskFromWorkQueue() {
193 DCHECK(work_queue_sets_);
194 DCHECK(!tasks_.empty());
195
196 Task pending_task = std::move(tasks_.front());
197 tasks_.pop_front();
198 // NB immediate tasks have a different pipeline to delayed ones.
199 if (tasks_.empty()) {
200 // NB delayed tasks are inserted via Push, no don't need to reload those.
201 if (queue_type_ == QueueType::kImmediate) {
202 // Short-circuit the queue reload so that OnPopMinQueueInSet does the
203 // right thing.
204 task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
205 }
206 // Since the queue is empty, now is a good time to consider reducing it's
207 // capacity if we're wasting memory.
208 tasks_.MaybeShrinkQueue();
209 }
210
211 DCHECK(work_queue_sets_);
212 #if DCHECK_IS_ON()
213 // If diagnostics are on it's possible task queues are being selected at
214 // random so we can't use the (slightly) more efficient OnPopMinQueueInSet.
215 work_queue_sets_->OnQueuesFrontTaskChanged(this);
216 #else
217 // OnPopMinQueueInSet calls GetFrontTaskOrder which checks
218 // BlockedByFence() so we don't need to here.
219 work_queue_sets_->OnPopMinQueueInSet(this);
220 #endif
221 task_queue_->TraceQueueSize();
222 return pending_task;
223 }
224
RemoveAllCanceledTasksFromFront()225 bool WorkQueue::RemoveAllCanceledTasksFromFront() {
226 if (!work_queue_sets_) {
227 return false;
228 }
229
230 // Since task destructors could have a side-effect of deleting this task queue
231 // we move cancelled tasks into a temporary container which can be emptied
232 // without accessing |this|.
233 absl::InlinedVector<Task, 8> tasks_to_delete;
234
235 while (!tasks_.empty()) {
236 const auto& pending_task = tasks_.front();
237 #if DCHECK_IS_ON()
238 // Checking if a task is cancelled can trip DCHECK/CHECK failures out of the
239 // control of the SequenceManager code, so provide a task trace for easier
240 // diagnosis. See crbug.com/374409662 for context.
241 absl::Cleanup resetter = [original_task =
242 TaskAnnotator::CurrentTaskForThread()] {
243 TaskAnnotator::SetCurrentTaskForThread({}, original_task);
244 };
245 TaskAnnotator::SetCurrentTaskForThread(base::PassKey<WorkQueue>(),
246 &pending_task);
247 #endif
248 if (pending_task.task && !pending_task.IsCanceled())
249 break;
250 tasks_to_delete.push_back(std::move(tasks_.front()));
251 tasks_.pop_front();
252 }
253 if (!tasks_to_delete.empty()) {
254 if (tasks_.empty()) {
255 // NB delayed tasks are inserted via Push, no don't need to reload those.
256 if (queue_type_ == QueueType::kImmediate) {
257 // Short-circuit the queue reload so that OnPopMinQueueInSet does the
258 // right thing.
259 task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
260 }
261 // Since the queue is empty, now is a good time to consider reducing it's
262 // capacity if we're wasting memory.
263 tasks_.MaybeShrinkQueue();
264 }
265 // If we have a valid |heap_handle_| (i.e. we're not blocked by a fence or
266 // disabled) then |work_queue_sets_| needs to be told.
267 if (heap_handle_.IsValid())
268 work_queue_sets_->OnQueuesFrontTaskChanged(this);
269 task_queue_->TraceQueueSize();
270 }
271 return !tasks_to_delete.empty();
272 }
273
AssignToWorkQueueSets(WorkQueueSets * work_queue_sets)274 void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
275 work_queue_sets_ = work_queue_sets;
276 }
277
AssignSetIndex(size_t work_queue_set_index)278 void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
279 work_queue_set_index_ = work_queue_set_index;
280 }
281
InsertFenceImpl(Fence fence)282 bool WorkQueue::InsertFenceImpl(Fence fence) {
283 DCHECK(!fence_ || fence.task_order() >= fence_->task_order() ||
284 fence.IsBlockingFence());
285 bool was_blocked_by_fence = BlockedByFence();
286 fence_ = fence;
287 return was_blocked_by_fence;
288 }
289
InsertFenceSilently(Fence fence)290 void WorkQueue::InsertFenceSilently(Fence fence) {
291 // Ensure that there is no fence present or a new one blocks queue completely.
292 DCHECK(!fence_ || fence_->IsBlockingFence());
293 InsertFenceImpl(fence);
294 }
295
InsertFence(Fence fence)296 bool WorkQueue::InsertFence(Fence fence) {
297 bool was_blocked_by_fence = InsertFenceImpl(fence);
298 if (!work_queue_sets_)
299 return false;
300
301 // Moving the fence forward may unblock some tasks.
302 if (!tasks_.empty() && was_blocked_by_fence && !BlockedByFence()) {
303 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
304 return true;
305 }
306 // Fence insertion may have blocked all tasks in this work queue.
307 if (BlockedByFence())
308 work_queue_sets_->OnQueueBlocked(this);
309 return false;
310 }
311
RemoveFence()312 bool WorkQueue::RemoveFence() {
313 bool was_blocked_by_fence = BlockedByFence();
314 fence_ = std::nullopt;
315 if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
316 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
317 return true;
318 }
319 return false;
320 }
321
MaybeShrinkQueue()322 void WorkQueue::MaybeShrinkQueue() {
323 tasks_.MaybeShrinkQueue();
324 }
325
PopTaskForTesting()326 void WorkQueue::PopTaskForTesting() {
327 if (tasks_.empty())
328 return;
329 tasks_.pop_front();
330 }
331
CollectTasksOlderThan(TaskOrder reference,std::vector<const Task * > * result) const332 void WorkQueue::CollectTasksOlderThan(TaskOrder reference,
333 std::vector<const Task*>* result) const {
334 for (const Task& task : tasks_) {
335 if (task.task_order() >= reference)
336 break;
337
338 result->push_back(&task);
339 }
340 }
341
342 } // namespace internal
343 } // namespace sequence_manager
344 } // namespace base
345