1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/message_loop/message_loop.h"
6
7 #include <algorithm>
8 #include <memory>
9 #include <utility>
10
11 #include "base/bind.h"
12 #include "base/compiler_specific.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h"
15 #include "base/memory/ptr_util.h"
16 #include "base/message_loop/message_pump_default.h"
17 #include "base/metrics/histogram.h"
18 #include "base/metrics/statistics_recorder.h"
19 #include "base/run_loop.h"
20 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
21 #include "base/threading/thread_id_name_manager.h"
22 #include "base/threading/thread_local.h"
23 #include "base/threading/thread_task_runner_handle.h"
24 #include "base/time/time.h"
25 #include "base/trace_event/trace_event.h"
26 #include "base/tracked_objects.h"
27 #include "build/build_config.h"
28
29 #if defined(OS_MACOSX)
30 #include "base/message_loop/message_pump_mac.h"
31 #endif
32 #if defined(OS_POSIX) && !defined(OS_IOS)
33 #include "base/message_loop/message_pump_libevent.h"
34 #endif
35 #if defined(OS_ANDROID)
36 #include "base/message_loop/message_pump_android.h"
37 #endif
38 #if defined(USE_GLIB)
39 #include "base/message_loop/message_pump_glib.h"
40 #endif
41
42 namespace base {
43
44 namespace {
45
46 // A lazily created thread local storage for quick access to a thread's message
47 // loop, if one exists. This should be safe and free of static constructors.
48 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
49 LAZY_INSTANCE_INITIALIZER;
50
51 // Logical events for Histogram profiling. Run with --message-loop-histogrammer
52 // to get an accounting of messages and actions taken on each thread.
53 const int kTaskRunEvent = 0x1;
54 #if !defined(OS_NACL)
55 const int kTimerEvent = 0x2;
56
57 // Provide range of message IDs for use in histogramming and debug display.
58 const int kLeastNonZeroMessageId = 1;
59 const int kMaxMessageId = 1099;
60 const int kNumberOfDistinctMessagesDisplayed = 1100;
61
62 // Provide a macro that takes an expression (such as a constant, or macro
63 // constant) and creates a pair to initialize an array of pairs. In this case,
64 // our pair consists of the expressions value, and the "stringized" version
65 // of the expression (i.e., the expression put in quotes). For example, if
66 // we have:
67 // #define FOO 2
68 // #define BAR 5
69 // then the following:
70 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
71 // will expand to:
72 // {7, "FOO + BAR"}
73 // We use the resulting array as an argument to our histogram, which reads the
74 // number as a bucket identifier, and proceeds to use the corresponding name
75 // in the pair (i.e., the quoted string) when printing out a histogram.
76 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
77
78 const LinearHistogram::DescriptionPair event_descriptions_[] = {
79 // Provide some pretty print capability in our histogram for our internal
80 // messages.
81
82 // A few events we handle (kindred to messages), and used to profile actions.
83 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
84 VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
85
86 {-1, NULL} // The list must be null-terminated, per API to histogram.
87 };
88 #endif // !defined(OS_NACL)
89
90 bool enable_histogrammer_ = false;
91
92 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
93
94 #if defined(OS_IOS)
95 typedef MessagePumpIOSForIO MessagePumpForIO;
96 #elif defined(OS_NACL_SFI)
97 typedef MessagePumpDefault MessagePumpForIO;
98 #elif defined(OS_POSIX)
99 typedef MessagePumpLibevent MessagePumpForIO;
100 #endif
101
102 #if !defined(OS_NACL_SFI)
ToPumpIO(MessagePump * pump)103 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
104 return static_cast<MessagePumpForIO*>(pump);
105 }
106 #endif // !defined(OS_NACL_SFI)
107
ReturnPump(std::unique_ptr<MessagePump> pump)108 std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
109 return pump;
110 }
111
112 } // namespace
113
114 //------------------------------------------------------------------------------
115
TaskObserver()116 MessageLoop::TaskObserver::TaskObserver() {
117 }
118
~TaskObserver()119 MessageLoop::TaskObserver::~TaskObserver() {
120 }
121
~DestructionObserver()122 MessageLoop::DestructionObserver::~DestructionObserver() {
123 }
124
~NestingObserver()125 MessageLoop::NestingObserver::~NestingObserver() {}
126
127 //------------------------------------------------------------------------------
128
MessageLoop(Type type)129 MessageLoop::MessageLoop(Type type)
130 : MessageLoop(type, MessagePumpFactoryCallback()) {
131 BindToCurrentThread();
132 }
133
MessageLoop(std::unique_ptr<MessagePump> pump)134 MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
135 : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
136 BindToCurrentThread();
137 }
138
~MessageLoop()139 MessageLoop::~MessageLoop() {
140 // If |pump_| is non-null, this message loop has been bound and should be the
141 // current one on this thread. Otherwise, this loop is being destructed before
142 // it was bound to a thread, so a different message loop (or no loop at all)
143 // may be current.
144 DCHECK((pump_ && current() == this) || (!pump_ && current() != this));
145
146 // iOS just attaches to the loop, it doesn't Run it.
147 // TODO(stuartmorgan): Consider wiring up a Detach().
148 #if !defined(OS_IOS)
149 DCHECK(!run_loop_);
150 #endif
151
152 #if defined(OS_WIN)
153 if (in_high_res_mode_)
154 Time::ActivateHighResolutionTimer(false);
155 #endif
156 // Clean up any unprocessed tasks, but take care: deleting a task could
157 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
158 // limit on the number of times we will allow a deleted task to generate more
159 // tasks. Normally, we should only pass through this loop once or twice. If
160 // we end up hitting the loop limit, then it is probably due to one task that
161 // is being stubborn. Inspect the queues to see who is left.
162 bool did_work;
163 for (int i = 0; i < 100; ++i) {
164 DeletePendingTasks();
165 ReloadWorkQueue();
166 // If we end up with empty queues, then break out of the loop.
167 did_work = DeletePendingTasks();
168 if (!did_work)
169 break;
170 }
171 DCHECK(!did_work);
172
173 // Let interested parties have one last shot at accessing this.
174 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
175 WillDestroyCurrentMessageLoop());
176
177 thread_task_runner_handle_.reset();
178
179 // Tell the incoming queue that we are dying.
180 incoming_task_queue_->WillDestroyCurrentMessageLoop();
181 incoming_task_queue_ = NULL;
182 unbound_task_runner_ = NULL;
183 task_runner_ = NULL;
184
185 // OK, now make it so that no one can find us.
186 if (current() == this)
187 lazy_tls_ptr.Pointer()->Set(nullptr);
188 }
189
190 // static
current()191 MessageLoop* MessageLoop::current() {
192 // TODO(darin): sadly, we cannot enable this yet since people call us even
193 // when they have no intention of using us.
194 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
195 return lazy_tls_ptr.Pointer()->Get();
196 }
197
198 // static
EnableHistogrammer(bool enable)199 void MessageLoop::EnableHistogrammer(bool enable) {
200 enable_histogrammer_ = enable;
201 }
202
203 // static
InitMessagePumpForUIFactory(MessagePumpFactory * factory)204 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
205 if (message_pump_for_ui_factory_)
206 return false;
207
208 message_pump_for_ui_factory_ = factory;
209 return true;
210 }
211
212 // static
CreateMessagePumpForType(Type type)213 std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
214 // TODO(rvargas): Get rid of the OS guards.
215 #if defined(USE_GLIB) && !defined(OS_NACL)
216 typedef MessagePumpGlib MessagePumpForUI;
217 #elif defined(OS_LINUX) && !defined(OS_NACL)
218 typedef MessagePumpLibevent MessagePumpForUI;
219 #endif
220
221 #if defined(OS_IOS) || defined(OS_MACOSX)
222 #define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(MessagePumpMac::Create())
223 #elif defined(OS_NACL)
224 // Currently NaCl doesn't have a UI MessageLoop.
225 // TODO(abarth): Figure out if we need this.
226 #define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>()
227 #else
228 #define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(new MessagePumpForUI())
229 #endif
230
231 #if defined(OS_MACOSX)
232 // Use an OS native runloop on Mac to support timer coalescing.
233 #define MESSAGE_PUMP_DEFAULT \
234 std::unique_ptr<MessagePump>(new MessagePumpCFRunLoop())
235 #else
236 #define MESSAGE_PUMP_DEFAULT \
237 std::unique_ptr<MessagePump>(new MessagePumpDefault())
238 #endif
239
240 if (type == MessageLoop::TYPE_UI) {
241 if (message_pump_for_ui_factory_)
242 return message_pump_for_ui_factory_();
243 return MESSAGE_PUMP_UI;
244 }
245 if (type == MessageLoop::TYPE_IO)
246 return std::unique_ptr<MessagePump>(new MessagePumpForIO());
247
248 #if defined(OS_ANDROID)
249 if (type == MessageLoop::TYPE_JAVA)
250 return std::unique_ptr<MessagePump>(new MessagePumpForUI());
251 #endif
252
253 DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
254 return MESSAGE_PUMP_DEFAULT;
255 }
256
AddDestructionObserver(DestructionObserver * destruction_observer)257 void MessageLoop::AddDestructionObserver(
258 DestructionObserver* destruction_observer) {
259 DCHECK_EQ(this, current());
260 destruction_observers_.AddObserver(destruction_observer);
261 }
262
RemoveDestructionObserver(DestructionObserver * destruction_observer)263 void MessageLoop::RemoveDestructionObserver(
264 DestructionObserver* destruction_observer) {
265 DCHECK_EQ(this, current());
266 destruction_observers_.RemoveObserver(destruction_observer);
267 }
268
AddNestingObserver(NestingObserver * observer)269 void MessageLoop::AddNestingObserver(NestingObserver* observer) {
270 DCHECK_EQ(this, current());
271 nesting_observers_.AddObserver(observer);
272 }
273
RemoveNestingObserver(NestingObserver * observer)274 void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
275 DCHECK_EQ(this, current());
276 nesting_observers_.RemoveObserver(observer);
277 }
278
PostTask(const tracked_objects::Location & from_here,const Closure & task)279 void MessageLoop::PostTask(
280 const tracked_objects::Location& from_here,
281 const Closure& task) {
282 task_runner_->PostTask(from_here, task);
283 }
284
PostDelayedTask(const tracked_objects::Location & from_here,const Closure & task,TimeDelta delay)285 void MessageLoop::PostDelayedTask(
286 const tracked_objects::Location& from_here,
287 const Closure& task,
288 TimeDelta delay) {
289 task_runner_->PostDelayedTask(from_here, task, delay);
290 }
291
Run()292 void MessageLoop::Run() {
293 DCHECK(pump_);
294 RunLoop run_loop;
295 run_loop.Run();
296 }
297
RunUntilIdle()298 void MessageLoop::RunUntilIdle() {
299 DCHECK(pump_);
300 RunLoop run_loop;
301 run_loop.RunUntilIdle();
302 }
303
QuitWhenIdle()304 void MessageLoop::QuitWhenIdle() {
305 DCHECK_EQ(this, current());
306 if (run_loop_) {
307 run_loop_->QuitWhenIdle();
308 } else {
309 NOTREACHED() << "Must be inside Run to call QuitWhenIdle";
310 }
311 }
312
QuitNow()313 void MessageLoop::QuitNow() {
314 DCHECK_EQ(this, current());
315 if (run_loop_) {
316 pump_->Quit();
317 } else {
318 NOTREACHED() << "Must be inside Run to call Quit";
319 }
320 }
321
IsType(Type type) const322 bool MessageLoop::IsType(Type type) const {
323 return type_ == type;
324 }
325
QuitCurrentWhenIdle()326 static void QuitCurrentWhenIdle() {
327 MessageLoop::current()->QuitWhenIdle();
328 }
329
330 // static
QuitWhenIdleClosure()331 Closure MessageLoop::QuitWhenIdleClosure() {
332 return Bind(&QuitCurrentWhenIdle);
333 }
334
SetNestableTasksAllowed(bool allowed)335 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
336 if (allowed) {
337 // Kick the native pump just in case we enter a OS-driven nested message
338 // loop.
339 pump_->ScheduleWork();
340 }
341 nestable_tasks_allowed_ = allowed;
342 }
343
NestableTasksAllowed() const344 bool MessageLoop::NestableTasksAllowed() const {
345 return nestable_tasks_allowed_;
346 }
347
IsNested()348 bool MessageLoop::IsNested() {
349 return run_loop_->run_depth_ > 1;
350 }
351
AddTaskObserver(TaskObserver * task_observer)352 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
353 DCHECK_EQ(this, current());
354 task_observers_.AddObserver(task_observer);
355 }
356
RemoveTaskObserver(TaskObserver * task_observer)357 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
358 DCHECK_EQ(this, current());
359 task_observers_.RemoveObserver(task_observer);
360 }
361
is_running() const362 bool MessageLoop::is_running() const {
363 DCHECK_EQ(this, current());
364 return run_loop_ != NULL;
365 }
366
HasHighResolutionTasks()367 bool MessageLoop::HasHighResolutionTasks() {
368 return incoming_task_queue_->HasHighResolutionTasks();
369 }
370
IsIdleForTesting()371 bool MessageLoop::IsIdleForTesting() {
372 // We only check the incoming queue, since we don't want to lock the work
373 // queue.
374 return incoming_task_queue_->IsIdleForTesting();
375 }
376
377 //------------------------------------------------------------------------------
378
379 // static
CreateUnbound(Type type,MessagePumpFactoryCallback pump_factory)380 std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
381 Type type,
382 MessagePumpFactoryCallback pump_factory) {
383 return WrapUnique(new MessageLoop(type, pump_factory));
384 }
385
MessageLoop(Type type,MessagePumpFactoryCallback pump_factory)386 MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
387 : type_(type),
388 #if defined(OS_WIN)
389 pending_high_res_tasks_(0),
390 in_high_res_mode_(false),
391 #endif
392 nestable_tasks_allowed_(true),
393 pump_factory_(pump_factory),
394 message_histogram_(NULL),
395 run_loop_(NULL),
396 incoming_task_queue_(new internal::IncomingTaskQueue(this)),
397 unbound_task_runner_(
398 new internal::MessageLoopTaskRunner(incoming_task_queue_)),
399 task_runner_(unbound_task_runner_),
400 thread_id_(kInvalidThreadId) {
401 // If type is TYPE_CUSTOM non-null pump_factory must be given.
402 DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
403 }
404
BindToCurrentThread()405 void MessageLoop::BindToCurrentThread() {
406 DCHECK(!pump_);
407 if (!pump_factory_.is_null())
408 pump_ = pump_factory_.Run();
409 else
410 pump_ = CreateMessagePumpForType(type_);
411
412 DCHECK(!current()) << "should only have one message loop per thread";
413 lazy_tls_ptr.Pointer()->Set(this);
414
415 incoming_task_queue_->StartScheduling();
416 unbound_task_runner_->BindToCurrentThread();
417 unbound_task_runner_ = nullptr;
418 SetThreadTaskRunnerHandle();
419 {
420 // Save the current thread's ID for potential use by other threads
421 // later from GetThreadName().
422 thread_id_ = PlatformThread::CurrentId();
423 subtle::MemoryBarrier();
424 }
425 }
426
GetThreadName() const427 std::string MessageLoop::GetThreadName() const {
428 if (thread_id_ == kInvalidThreadId) {
429 // |thread_id_| may already have been initialized but this thread might not
430 // have received the update yet.
431 subtle::MemoryBarrier();
432 DCHECK_NE(kInvalidThreadId, thread_id_);
433 }
434 return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
435 }
436
SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)437 void MessageLoop::SetTaskRunner(
438 scoped_refptr<SingleThreadTaskRunner> task_runner) {
439 DCHECK_EQ(this, current());
440 DCHECK(task_runner->BelongsToCurrentThread());
441 DCHECK(!unbound_task_runner_);
442 task_runner_ = std::move(task_runner);
443 SetThreadTaskRunnerHandle();
444 }
445
SetThreadTaskRunnerHandle()446 void MessageLoop::SetThreadTaskRunnerHandle() {
447 DCHECK_EQ(this, current());
448 // Clear the previous thread task runner first, because only one can exist at
449 // a time.
450 thread_task_runner_handle_.reset();
451 thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
452 }
453
RunHandler()454 void MessageLoop::RunHandler() {
455 DCHECK_EQ(this, current());
456 StartHistogrammer();
457 pump_->Run(this);
458 }
459
ProcessNextDelayedNonNestableTask()460 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
461 if (run_loop_->run_depth_ != 1)
462 return false;
463
464 if (deferred_non_nestable_work_queue_.empty())
465 return false;
466
467 PendingTask pending_task =
468 std::move(deferred_non_nestable_work_queue_.front());
469 deferred_non_nestable_work_queue_.pop();
470
471 RunTask(pending_task);
472 return true;
473 }
474
RunTask(const PendingTask & pending_task)475 void MessageLoop::RunTask(const PendingTask& pending_task) {
476 DCHECK(nestable_tasks_allowed_);
477
478 #if defined(OS_WIN)
479 if (pending_task.is_high_res) {
480 pending_high_res_tasks_--;
481 CHECK_GE(pending_high_res_tasks_, 0);
482 }
483 #endif
484
485 // Execute the task and assume the worst: It is probably not reentrant.
486 nestable_tasks_allowed_ = false;
487
488 HistogramEvent(kTaskRunEvent);
489
490 TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
491
492 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
493 WillProcessTask(pending_task));
494 task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
495 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
496 DidProcessTask(pending_task));
497
498 nestable_tasks_allowed_ = true;
499 }
500
DeferOrRunPendingTask(PendingTask pending_task)501 bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
502 if (pending_task.nestable || run_loop_->run_depth_ == 1) {
503 RunTask(pending_task);
504 // Show that we ran a task (Note: a new one might arrive as a
505 // consequence!).
506 return true;
507 }
508
509 // We couldn't run the task now because we're in a nested message loop
510 // and the task isn't nestable.
511 deferred_non_nestable_work_queue_.push(std::move(pending_task));
512 return false;
513 }
514
AddToDelayedWorkQueue(PendingTask pending_task)515 void MessageLoop::AddToDelayedWorkQueue(PendingTask pending_task) {
516 // Move to the delayed work queue.
517 delayed_work_queue_.push(std::move(pending_task));
518 }
519
DeletePendingTasks()520 bool MessageLoop::DeletePendingTasks() {
521 bool did_work = !work_queue_.empty();
522 while (!work_queue_.empty()) {
523 PendingTask pending_task = std::move(work_queue_.front());
524 work_queue_.pop();
525 if (!pending_task.delayed_run_time.is_null()) {
526 // We want to delete delayed tasks in the same order in which they would
527 // normally be deleted in case of any funny dependencies between delayed
528 // tasks.
529 AddToDelayedWorkQueue(std::move(pending_task));
530 }
531 }
532 did_work |= !deferred_non_nestable_work_queue_.empty();
533 while (!deferred_non_nestable_work_queue_.empty()) {
534 deferred_non_nestable_work_queue_.pop();
535 }
536 did_work |= !delayed_work_queue_.empty();
537
538 // Historically, we always delete the task regardless of valgrind status. It's
539 // not completely clear why we want to leak them in the loops above. This
540 // code is replicating legacy behavior, and should not be considered
541 // absolutely "correct" behavior. See TODO above about deleting all tasks
542 // when it's safe.
543 while (!delayed_work_queue_.empty()) {
544 delayed_work_queue_.pop();
545 }
546 return did_work;
547 }
548
ReloadWorkQueue()549 void MessageLoop::ReloadWorkQueue() {
550 // We can improve performance of our loading tasks from the incoming queue to
551 // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
552 // load. That reduces the number of locks-per-task significantly when our
553 // queues get large.
554 if (work_queue_.empty()) {
555 #if defined(OS_WIN)
556 pending_high_res_tasks_ +=
557 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
558 #else
559 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
560 #endif
561 }
562 }
563
ScheduleWork()564 void MessageLoop::ScheduleWork() {
565 pump_->ScheduleWork();
566 }
567
568 #if defined(OS_WIN)
MessagePumpWasSignaled()569 bool MessageLoop::MessagePumpWasSignaled() {
570 return pump_->WasSignaled();
571 }
572 #endif
573
574 //------------------------------------------------------------------------------
575 // Method and data for histogramming events and actions taken by each instance
576 // on each thread.
577
StartHistogrammer()578 void MessageLoop::StartHistogrammer() {
579 #if !defined(OS_NACL) // NaCl build has no metrics code.
580 if (enable_histogrammer_ && !message_histogram_
581 && StatisticsRecorder::IsActive()) {
582 std::string thread_name = GetThreadName();
583 DCHECK(!thread_name.empty());
584 message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
585 "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
586 kNumberOfDistinctMessagesDisplayed,
587 HistogramBase::kHexRangePrintingFlag, event_descriptions_);
588 }
589 #endif
590 }
591
HistogramEvent(int event)592 void MessageLoop::HistogramEvent(int event) {
593 #if !defined(OS_NACL)
594 if (message_histogram_)
595 message_histogram_->Add(event);
596 #endif
597 }
598
NotifyBeginNestedLoop()599 void MessageLoop::NotifyBeginNestedLoop() {
600 FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
601 OnBeginNestedMessageLoop());
602 }
603
DoWork()604 bool MessageLoop::DoWork() {
605 if (!nestable_tasks_allowed_) {
606 // Task can't be executed right now.
607 return false;
608 }
609
610 for (;;) {
611 ReloadWorkQueue();
612 if (work_queue_.empty())
613 break;
614
615 // Execute oldest task.
616 do {
617 PendingTask pending_task = std::move(work_queue_.front());
618 work_queue_.pop();
619 if (!pending_task.delayed_run_time.is_null()) {
620 int sequence_num = pending_task.sequence_num;
621 TimeTicks delayed_run_time = pending_task.delayed_run_time;
622 AddToDelayedWorkQueue(std::move(pending_task));
623 // If we changed the topmost task, then it is time to reschedule.
624 if (delayed_work_queue_.top().sequence_num == sequence_num)
625 pump_->ScheduleDelayedWork(delayed_run_time);
626 } else {
627 if (DeferOrRunPendingTask(std::move(pending_task)))
628 return true;
629 }
630 } while (!work_queue_.empty());
631 }
632
633 // Nothing happened.
634 return false;
635 }
636
DoDelayedWork(TimeTicks * next_delayed_work_time)637 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
638 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
639 recent_time_ = *next_delayed_work_time = TimeTicks();
640 return false;
641 }
642
643 // When we "fall behind", there will be a lot of tasks in the delayed work
644 // queue that are ready to run. To increase efficiency when we fall behind,
645 // we will only call Time::Now() intermittently, and then process all tasks
646 // that are ready to run before calling it again. As a result, the more we
647 // fall behind (and have a lot of ready-to-run delayed tasks), the more
648 // efficient we'll be at handling the tasks.
649
650 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
651 if (next_run_time > recent_time_) {
652 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
653 if (next_run_time > recent_time_) {
654 *next_delayed_work_time = next_run_time;
655 return false;
656 }
657 }
658
659 PendingTask pending_task =
660 std::move(const_cast<PendingTask&>(delayed_work_queue_.top()));
661 delayed_work_queue_.pop();
662
663 if (!delayed_work_queue_.empty())
664 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
665
666 return DeferOrRunPendingTask(std::move(pending_task));
667 }
668
DoIdleWork()669 bool MessageLoop::DoIdleWork() {
670 if (ProcessNextDelayedNonNestableTask())
671 return true;
672
673 if (run_loop_->quit_when_idle_received_)
674 pump_->Quit();
675
676 // When we return we will do a kernel wait for more tasks.
677 #if defined(OS_WIN)
678 // On Windows we activate the high resolution timer so that the wait
679 // _if_ triggered by the timer happens with good resolution. If we don't
680 // do this the default resolution is 15ms which might not be acceptable
681 // for some tasks.
682 bool high_res = pending_high_res_tasks_ > 0;
683 if (high_res != in_high_res_mode_) {
684 in_high_res_mode_ = high_res;
685 Time::ActivateHighResolutionTimer(in_high_res_mode_);
686 }
687 #endif
688 return false;
689 }
690
DeleteSoonInternal(const tracked_objects::Location & from_here,void (* deleter)(const void *),const void * object)691 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
692 void(*deleter)(const void*),
693 const void* object) {
694 task_runner()->PostNonNestableTask(from_here, Bind(deleter, object));
695 }
696
ReleaseSoonInternal(const tracked_objects::Location & from_here,void (* releaser)(const void *),const void * object)697 void MessageLoop::ReleaseSoonInternal(
698 const tracked_objects::Location& from_here,
699 void(*releaser)(const void*),
700 const void* object) {
701 task_runner()->PostNonNestableTask(from_here, Bind(releaser, object));
702 }
703
704 #if !defined(OS_NACL)
705 //------------------------------------------------------------------------------
706 // MessageLoopForUI
707
MessageLoopForUI(std::unique_ptr<MessagePump> pump)708 MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
709 : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {}
710
711 #if defined(OS_ANDROID)
Start()712 void MessageLoopForUI::Start() {
713 // No Histogram support for UI message loop as it is managed by Java side
714 static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
715 }
716 #endif
717
718 #if defined(OS_IOS)
Attach()719 void MessageLoopForUI::Attach() {
720 static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
721 }
722 #endif
723
724 #if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
WatchFileDescriptor(int fd,bool persistent,MessagePumpLibevent::Mode mode,MessagePumpLibevent::FileDescriptorWatcher * controller,MessagePumpLibevent::Watcher * delegate)725 bool MessageLoopForUI::WatchFileDescriptor(
726 int fd,
727 bool persistent,
728 MessagePumpLibevent::Mode mode,
729 MessagePumpLibevent::FileDescriptorWatcher *controller,
730 MessagePumpLibevent::Watcher *delegate) {
731 return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
732 fd,
733 persistent,
734 mode,
735 controller,
736 delegate);
737 }
738 #endif
739
740 #endif // !defined(OS_NACL)
741
742 //------------------------------------------------------------------------------
743 // MessageLoopForIO
744
MessageLoopForIO()745 MessageLoopForIO::MessageLoopForIO() : MessageLoop(TYPE_IO) {}
746
747 #if !defined(OS_NACL_SFI)
748
749 #if defined(OS_WIN)
RegisterIOHandler(HANDLE file,IOHandler * handler)750 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
751 ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
752 }
753
RegisterJobObject(HANDLE job,IOHandler * handler)754 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
755 return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
756 }
757
WaitForIOCompletion(DWORD timeout,IOHandler * filter)758 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
759 return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
760 }
761 #elif defined(OS_POSIX)
WatchFileDescriptor(int fd,bool persistent,Mode mode,FileDescriptorWatcher * controller,Watcher * delegate)762 bool MessageLoopForIO::WatchFileDescriptor(int fd,
763 bool persistent,
764 Mode mode,
765 FileDescriptorWatcher* controller,
766 Watcher* delegate) {
767 return ToPumpIO(pump_.get())->WatchFileDescriptor(
768 fd,
769 persistent,
770 mode,
771 controller,
772 delegate);
773 }
774 #endif
775
776 #endif // !defined(OS_NACL_SFI)
777
778 } // namespace base
779