• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/work_queue.h"
6 
7 #include "base/containers/stack_container.h"
8 #include "base/debug/alias.h"
9 #include "base/task/sequence_manager/fence.h"
10 #include "base/task/sequence_manager/sequence_manager_impl.h"
11 #include "base/task/sequence_manager/task_order.h"
12 #include "base/task/sequence_manager/work_queue_sets.h"
13 #include "build/build_config.h"
14 #include "third_party/abseil-cpp/absl/types/optional.h"
15 
16 namespace base {
17 namespace sequence_manager {
18 namespace internal {
19 
WorkQueue(TaskQueueImpl * task_queue,const char * name,QueueType queue_type)20 WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
21                      const char* name,
22                      QueueType queue_type)
23     : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
24 
AsValue(TimeTicks now) const25 Value::List WorkQueue::AsValue(TimeTicks now) const {
26   Value::List state;
27   for (const Task& task : tasks_)
28     state.Append(TaskQueueImpl::TaskAsValue(task, now));
29   return state;
30 }
31 
~WorkQueue()32 WorkQueue::~WorkQueue() {
33   DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
34                             << work_queue_sets_->GetName() << " : " << name_;
35 }
36 
GetFrontTask() const37 const Task* WorkQueue::GetFrontTask() const {
38   if (tasks_.empty())
39     return nullptr;
40   return &tasks_.front();
41 }
42 
GetBackTask() const43 const Task* WorkQueue::GetBackTask() const {
44   if (tasks_.empty())
45     return nullptr;
46   return &tasks_.back();
47 }
48 
BlockedByFence() const49 bool WorkQueue::BlockedByFence() const {
50   if (!fence_)
51     return false;
52 
53   // If the queue is empty then any future tasks will have a higher enqueue
54   // order and will be blocked. The queue is also blocked if the head is past
55   // the fence.
56   return tasks_.empty() || tasks_.front().task_order() >= fence_->task_order();
57 }
58 
GetFrontTaskOrder() const59 absl::optional<TaskOrder> WorkQueue::GetFrontTaskOrder() const {
60   if (tasks_.empty() || BlockedByFence())
61     return absl::nullopt;
62   // Quick sanity check.
63   DCHECK(tasks_.front().task_order() <= tasks_.back().task_order())
64       << task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
65       << name_;
66   return tasks_.front().task_order();
67 }
68 
Push(Task task)69 void WorkQueue::Push(Task task) {
70   bool was_empty = tasks_.empty();
71 #ifndef NDEBUG
72   DCHECK(task.enqueue_order_set());
73 #endif
74 
75   // Make sure the task order is strictly increasing.
76   DCHECK(was_empty || tasks_.back().task_order() < task.task_order());
77   // Make sure enqueue order is strictly increasing for immediate queues and
78   // monotonically increasing for delayed queues.
79   DCHECK(was_empty || tasks_.back().enqueue_order() < task.enqueue_order() ||
80          (queue_type_ == QueueType::kDelayed &&
81           tasks_.back().enqueue_order() == task.enqueue_order()));
82 
83   // Amortized O(1).
84   tasks_.push_back(std::move(task));
85 
86   if (!was_empty)
87     return;
88 
89   // If we hit the fence, pretend to WorkQueueSets that we're empty.
90   if (work_queue_sets_ && !BlockedByFence())
91     work_queue_sets_->OnTaskPushedToEmptyQueue(this);
92 }
93 
TaskPusher(WorkQueue * work_queue)94 WorkQueue::TaskPusher::TaskPusher(WorkQueue* work_queue)
95     : work_queue_(work_queue), was_empty_(work_queue->Empty()) {}
96 
TaskPusher(TaskPusher && other)97 WorkQueue::TaskPusher::TaskPusher(TaskPusher&& other)
98     : work_queue_(other.work_queue_), was_empty_(other.was_empty_) {
99   other.work_queue_ = nullptr;
100 }
101 
Push(Task task)102 void WorkQueue::TaskPusher::Push(Task task) {
103   DCHECK(work_queue_);
104 
105 #ifndef NDEBUG
106   DCHECK(task.enqueue_order_set());
107 #endif
108 
109   // Make sure the task order is strictly increasing.
110   DCHECK(work_queue_->tasks_.empty() ||
111          work_queue_->tasks_.back().task_order() < task.task_order());
112   // Make sure enqueue order is strictly increasing for immediate queues and
113   // monotonically increasing for delayed queues.
114   DCHECK(work_queue_->tasks_.empty() ||
115          work_queue_->tasks_.back().enqueue_order() < task.enqueue_order() ||
116          (work_queue_->queue_type_ == QueueType::kDelayed &&
117           work_queue_->tasks_.back().enqueue_order() == task.enqueue_order()));
118 
119   // Amortized O(1).
120   work_queue_->tasks_.push_back(std::move(task));
121 }
122 
~TaskPusher()123 WorkQueue::TaskPusher::~TaskPusher() {
124   // If |work_queue_| became non empty and it isn't blocked by a fence then we
125   // must notify |work_queue_->work_queue_sets_|.
126   if (was_empty_ && work_queue_ && !work_queue_->Empty() &&
127       work_queue_->work_queue_sets_ && !work_queue_->BlockedByFence()) {
128     work_queue_->work_queue_sets_->OnTaskPushedToEmptyQueue(work_queue_);
129   }
130 }
131 
CreateTaskPusher()132 WorkQueue::TaskPusher WorkQueue::CreateTaskPusher() {
133   return TaskPusher(this);
134 }
135 
PushNonNestableTaskToFront(Task task)136 void WorkQueue::PushNonNestableTaskToFront(Task task) {
137   DCHECK(task.nestable == Nestable::kNonNestable);
138 
139   bool was_empty = tasks_.empty();
140   bool was_blocked = BlockedByFence();
141 #ifndef NDEBUG
142   DCHECK(task.enqueue_order_set());
143 #endif
144 
145   if (!was_empty) {
146     // Make sure the task order is strictly increasing.
147     DCHECK(task.task_order() < tasks_.front().task_order())
148         << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
149         << " : " << name_;
150     // Make sure the enqueue order is strictly increasing for immediate queues
151     // and monotonically increasing for delayed queues.
152     DCHECK(task.enqueue_order() < tasks_.front().enqueue_order() ||
153            (queue_type_ == QueueType::kDelayed &&
154             task.enqueue_order() == tasks_.front().enqueue_order()))
155         << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
156         << " : " << name_;
157   }
158 
159   // Amortized O(1).
160   tasks_.push_front(std::move(task));
161 
162   if (!work_queue_sets_)
163     return;
164 
165   // Pretend  to WorkQueueSets that nothing has changed if we're blocked.
166   if (BlockedByFence())
167     return;
168 
169   // Pushing task to front may unblock the fence.
170   if (was_empty || was_blocked) {
171     work_queue_sets_->OnTaskPushedToEmptyQueue(this);
172   } else {
173     work_queue_sets_->OnQueuesFrontTaskChanged(this);
174   }
175 }
176 
TakeImmediateIncomingQueueTasks()177 void WorkQueue::TakeImmediateIncomingQueueTasks() {
178   DCHECK(tasks_.empty());
179 
180   task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
181   if (tasks_.empty())
182     return;
183 
184   // If we hit the fence, pretend to WorkQueueSets that we're empty.
185   if (work_queue_sets_ && !BlockedByFence())
186     work_queue_sets_->OnTaskPushedToEmptyQueue(this);
187 }
188 
TakeTaskFromWorkQueue()189 Task WorkQueue::TakeTaskFromWorkQueue() {
190   DCHECK(work_queue_sets_);
191   DCHECK(!tasks_.empty());
192 
193   Task pending_task = std::move(tasks_.front());
194   tasks_.pop_front();
195   // NB immediate tasks have a different pipeline to delayed ones.
196   if (tasks_.empty()) {
197     // NB delayed tasks are inserted via Push, no don't need to reload those.
198     if (queue_type_ == QueueType::kImmediate) {
199       // Short-circuit the queue reload so that OnPopMinQueueInSet does the
200       // right thing.
201       task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
202     }
203     // Since the queue is empty, now is a good time to consider reducing it's
204     // capacity if we're wasting memory.
205     tasks_.MaybeShrinkQueue();
206   }
207 
208   DCHECK(work_queue_sets_);
209 #if DCHECK_IS_ON()
210   // If diagnostics are on it's possible task queues are being selected at
211   // random so we can't use the (slightly) more efficient OnPopMinQueueInSet.
212   work_queue_sets_->OnQueuesFrontTaskChanged(this);
213 #else
214   // OnPopMinQueueInSet calls GetFrontTaskOrder which checks
215   // BlockedByFence() so we don't need to here.
216   work_queue_sets_->OnPopMinQueueInSet(this);
217 #endif
218   task_queue_->TraceQueueSize();
219   return pending_task;
220 }
221 
RemoveAllCanceledTasksFromFront()222 bool WorkQueue::RemoveAllCanceledTasksFromFront() {
223   if (!work_queue_sets_) {
224     return false;
225   }
226 
227   // Since task destructors could have a side-effect of deleting this task queue
228   // we move cancelled tasks into a temporary container which can be emptied
229   // without accessing |this|.
230   StackVector<Task, 8> tasks_to_delete;
231 
232   while (!tasks_.empty()) {
233     const auto& pending_task = tasks_.front();
234     if (pending_task.task && !pending_task.IsCanceled())
235       break;
236     tasks_to_delete->push_back(std::move(tasks_.front()));
237     tasks_.pop_front();
238   }
239   if (!tasks_to_delete->empty()) {
240     if (tasks_.empty()) {
241       // NB delayed tasks are inserted via Push, no don't need to reload those.
242       if (queue_type_ == QueueType::kImmediate) {
243         // Short-circuit the queue reload so that OnPopMinQueueInSet does the
244         // right thing.
245         task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
246       }
247       // Since the queue is empty, now is a good time to consider reducing it's
248       // capacity if we're wasting memory.
249       tasks_.MaybeShrinkQueue();
250     }
251     // If we have a valid |heap_handle_| (i.e. we're not blocked by a fence or
252     // disabled) then |work_queue_sets_| needs to be told.
253     if (heap_handle_.IsValid())
254       work_queue_sets_->OnQueuesFrontTaskChanged(this);
255     task_queue_->TraceQueueSize();
256   }
257   return !tasks_to_delete->empty();
258 }
259 
AssignToWorkQueueSets(WorkQueueSets * work_queue_sets)260 void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
261   work_queue_sets_ = work_queue_sets;
262 }
263 
AssignSetIndex(size_t work_queue_set_index)264 void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
265   work_queue_set_index_ = work_queue_set_index;
266 }
267 
InsertFenceImpl(Fence fence)268 bool WorkQueue::InsertFenceImpl(Fence fence) {
269   DCHECK(!fence_ || fence.task_order() >= fence_->task_order() ||
270          fence.IsBlockingFence());
271   bool was_blocked_by_fence = BlockedByFence();
272   fence_ = fence;
273   return was_blocked_by_fence;
274 }
275 
InsertFenceSilently(Fence fence)276 void WorkQueue::InsertFenceSilently(Fence fence) {
277   // Ensure that there is no fence present or a new one blocks queue completely.
278   DCHECK(!fence_ || fence_->IsBlockingFence());
279   InsertFenceImpl(fence);
280 }
281 
InsertFence(Fence fence)282 bool WorkQueue::InsertFence(Fence fence) {
283   bool was_blocked_by_fence = InsertFenceImpl(fence);
284   if (!work_queue_sets_)
285     return false;
286 
287   // Moving the fence forward may unblock some tasks.
288   if (!tasks_.empty() && was_blocked_by_fence && !BlockedByFence()) {
289     work_queue_sets_->OnTaskPushedToEmptyQueue(this);
290     return true;
291   }
292   // Fence insertion may have blocked all tasks in this work queue.
293   if (BlockedByFence())
294     work_queue_sets_->OnQueueBlocked(this);
295   return false;
296 }
297 
RemoveFence()298 bool WorkQueue::RemoveFence() {
299   bool was_blocked_by_fence = BlockedByFence();
300   fence_ = absl::nullopt;
301   if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
302     work_queue_sets_->OnTaskPushedToEmptyQueue(this);
303     return true;
304   }
305   return false;
306 }
307 
MaybeShrinkQueue()308 void WorkQueue::MaybeShrinkQueue() {
309   tasks_.MaybeShrinkQueue();
310 }
311 
PopTaskForTesting()312 void WorkQueue::PopTaskForTesting() {
313   if (tasks_.empty())
314     return;
315   tasks_.pop_front();
316 }
317 
CollectTasksOlderThan(TaskOrder reference,std::vector<const Task * > * result) const318 void WorkQueue::CollectTasksOlderThan(TaskOrder reference,
319                                       std::vector<const Task*>* result) const {
320   for (const Task& task : tasks_) {
321     if (task.task_order() >= reference)
322       break;
323 
324     result->push_back(&task);
325   }
326 }
327 
328 }  // namespace internal
329 }  // namespace sequence_manager
330 }  // namespace base
331