• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/message_loop.h"
6 
7 #if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(ANDROID)
8 #include <gdk/gdk.h>
9 #include <gdk/gdkx.h>
10 #endif
11 
12 #include <algorithm>
13 
14 #include "base/compiler_specific.h"
15 #include "base/lazy_instance.h"
16 #include "base/logging.h"
17 #include "base/message_pump_default.h"
18 #include "base/metrics/histogram.h"
19 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
20 #include "base/threading/thread_local.h"
21 
22 #if defined(OS_MACOSX)
23 #include "base/message_pump_mac.h"
24 #endif
25 #if defined(OS_POSIX)
26 #include "base/message_pump_libevent.h"
27 #endif
28 #if defined(OS_POSIX) && !defined(OS_MACOSX)
29 #include "base/message_pump_glib.h"
30 #endif
31 #if defined(TOUCH_UI)
32 #include "base/message_pump_glib_x.h"
33 #endif
34 
35 using base::TimeDelta;
36 using base::TimeTicks;
37 
38 namespace {
39 
40 // A lazily created thread local storage for quick access to a thread's message
41 // loop, if one exists.  This should be safe and free of static constructors.
42 base::LazyInstance<base::ThreadLocalPointer<MessageLoop> > lazy_tls_ptr(
43     base::LINKER_INITIALIZED);
44 
45 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
46 // to get an accounting of messages and actions taken on each thread.
47 const int kTaskRunEvent = 0x1;
48 const int kTimerEvent = 0x2;
49 
50 // Provide range of message IDs for use in histogramming and debug display.
51 const int kLeastNonZeroMessageId = 1;
52 const int kMaxMessageId = 1099;
53 const int kNumberOfDistinctMessagesDisplayed = 1100;
54 
55 // Provide a macro that takes an expression (such as a constant, or macro
56 // constant) and creates a pair to initalize an array of pairs.  In this case,
57 // our pair consists of the expressions value, and the "stringized" version
58 // of the expression (i.e., the exrpression put in quotes).  For example, if
59 // we have:
60 //    #define FOO 2
61 //    #define BAR 5
62 // then the following:
63 //    VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
64 // will expand to:
65 //   {7, "FOO + BAR"}
66 // We use the resulting array as an argument to our histogram, which reads the
67 // number as a bucket identifier, and proceeds to use the corresponding name
68 // in the pair (i.e., the quoted string) when printing out a histogram.
69 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
70 
71 const base::LinearHistogram::DescriptionPair event_descriptions_[] = {
72   // Provide some pretty print capability in our histogram for our internal
73   // messages.
74 
75   // A few events we handle (kindred to messages), and used to profile actions.
76   VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
77   VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
78 
79   {-1, NULL}  // The list must be null terminated, per API to histogram.
80 };
81 
82 bool enable_histogrammer_ = false;
83 
84 }  // namespace
85 
86 //------------------------------------------------------------------------------
87 
88 #if defined(OS_WIN)
89 
90 // Upon a SEH exception in this thread, it restores the original unhandled
91 // exception filter.
SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter)92 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) {
93   ::SetUnhandledExceptionFilter(old_filter);
94   return EXCEPTION_CONTINUE_SEARCH;
95 }
96 
97 // Retrieves a pointer to the current unhandled exception filter. There
98 // is no standalone getter method.
GetTopSEHFilter()99 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() {
100   LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL;
101   top_filter = ::SetUnhandledExceptionFilter(0);
102   ::SetUnhandledExceptionFilter(top_filter);
103   return top_filter;
104 }
105 
106 #endif  // defined(OS_WIN)
107 
108 //------------------------------------------------------------------------------
109 
TaskObserver()110 MessageLoop::TaskObserver::TaskObserver() {
111 }
112 
~TaskObserver()113 MessageLoop::TaskObserver::~TaskObserver() {
114 }
115 
~DestructionObserver()116 MessageLoop::DestructionObserver::~DestructionObserver() {
117 }
118 
119 //------------------------------------------------------------------------------
120 
MessageLoop(Type type)121 MessageLoop::MessageLoop(Type type)
122     : type_(type),
123       nestable_tasks_allowed_(true),
124       exception_restoration_(false),
125       message_histogram_(NULL),
126       state_(NULL),
127 #ifdef OS_WIN
128       os_modal_loop_(false),
129 #endif  // OS_WIN
130       next_sequence_num_(0) {
131   DCHECK(!current()) << "should only have one message loop per thread";
132   lazy_tls_ptr.Pointer()->Set(this);
133 
134 // TODO(rvargas): Get rid of the OS guards.
135 #if defined(OS_WIN)
136 #define MESSAGE_PUMP_UI new base::MessagePumpForUI()
137 #define MESSAGE_PUMP_IO new base::MessagePumpForIO()
138 #elif defined(OS_MACOSX)
139 #define MESSAGE_PUMP_UI base::MessagePumpMac::Create()
140 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
141 #elif defined(ANDROID)
142 #define MESSAGE_PUMP_UI new base::MessagePumpDefault()
143 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
144 #elif defined(TOUCH_UI)
145 #define MESSAGE_PUMP_UI new base::MessagePumpGlibX()
146 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
147 #elif defined(OS_NACL)
148 // Currently NaCl doesn't have a UI or an IO MessageLoop.
149 // TODO(abarth): Figure out if we need these.
150 #define MESSAGE_PUMP_UI NULL
151 #define MESSAGE_PUMP_IO NULL
152 #elif defined(OS_POSIX)  // POSIX but not MACOSX.
153 #define MESSAGE_PUMP_UI new base::MessagePumpForUI()
154 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
155 #else
156 #error Not implemented
157 #endif
158 
159   if (type_ == TYPE_UI) {
160     pump_ = MESSAGE_PUMP_UI;
161   } else if (type_ == TYPE_IO) {
162     pump_ = MESSAGE_PUMP_IO;
163   } else {
164     DCHECK_EQ(TYPE_DEFAULT, type_);
165     pump_ = new base::MessagePumpDefault();
166   }
167 }
168 
~MessageLoop()169 MessageLoop::~MessageLoop() {
170   DCHECK_EQ(this, current());
171 
172   DCHECK(!state_);
173 
174   // Clean up any unprocessed tasks, but take care: deleting a task could
175   // result in the addition of more tasks (e.g., via DeleteSoon).  We set a
176   // limit on the number of times we will allow a deleted task to generate more
177   // tasks.  Normally, we should only pass through this loop once or twice.  If
178   // we end up hitting the loop limit, then it is probably due to one task that
179   // is being stubborn.  Inspect the queues to see who is left.
180   bool did_work;
181   for (int i = 0; i < 100; ++i) {
182     DeletePendingTasks();
183     ReloadWorkQueue();
184     // If we end up with empty queues, then break out of the loop.
185     did_work = DeletePendingTasks();
186     if (!did_work)
187       break;
188   }
189   DCHECK(!did_work);
190 
191   // Let interested parties have one last shot at accessing this.
192   FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
193                     WillDestroyCurrentMessageLoop());
194 
195   // OK, now make it so that no one can find us.
196   lazy_tls_ptr.Pointer()->Set(NULL);
197 }
198 
199 // static
current()200 MessageLoop* MessageLoop::current() {
201   // TODO(darin): sadly, we cannot enable this yet since people call us even
202   // when they have no intention of using us.
203   // DCHECK(loop) << "Ouch, did you forget to initialize me?";
204   return lazy_tls_ptr.Pointer()->Get();
205 }
206 
207 // static
EnableHistogrammer(bool enable)208 void MessageLoop::EnableHistogrammer(bool enable) {
209   enable_histogrammer_ = enable;
210 }
211 
AddDestructionObserver(DestructionObserver * destruction_observer)212 void MessageLoop::AddDestructionObserver(
213     DestructionObserver* destruction_observer) {
214   DCHECK_EQ(this, current());
215   destruction_observers_.AddObserver(destruction_observer);
216 }
217 
RemoveDestructionObserver(DestructionObserver * destruction_observer)218 void MessageLoop::RemoveDestructionObserver(
219     DestructionObserver* destruction_observer) {
220   DCHECK_EQ(this, current());
221   destruction_observers_.RemoveObserver(destruction_observer);
222 }
223 
PostTask(const tracked_objects::Location & from_here,Task * task)224 void MessageLoop::PostTask(
225     const tracked_objects::Location& from_here, Task* task) {
226   PostTask_Helper(from_here, task, 0, true);
227 }
228 
PostDelayedTask(const tracked_objects::Location & from_here,Task * task,int64 delay_ms)229 void MessageLoop::PostDelayedTask(
230     const tracked_objects::Location& from_here, Task* task, int64 delay_ms) {
231   PostTask_Helper(from_here, task, delay_ms, true);
232 }
233 
PostNonNestableTask(const tracked_objects::Location & from_here,Task * task)234 void MessageLoop::PostNonNestableTask(
235     const tracked_objects::Location& from_here, Task* task) {
236   PostTask_Helper(from_here, task, 0, false);
237 }
238 
PostNonNestableDelayedTask(const tracked_objects::Location & from_here,Task * task,int64 delay_ms)239 void MessageLoop::PostNonNestableDelayedTask(
240     const tracked_objects::Location& from_here, Task* task, int64 delay_ms) {
241   PostTask_Helper(from_here, task, delay_ms, false);
242 }
243 
Run()244 void MessageLoop::Run() {
245   AutoRunState save_state(this);
246   RunHandler();
247 }
248 
RunAllPending()249 void MessageLoop::RunAllPending() {
250   AutoRunState save_state(this);
251   state_->quit_received = true;  // Means run until we would otherwise block.
252   RunHandler();
253 }
254 
Quit()255 void MessageLoop::Quit() {
256   DCHECK_EQ(this, current());
257   if (state_) {
258     state_->quit_received = true;
259   } else {
260     NOTREACHED() << "Must be inside Run to call Quit";
261   }
262 }
263 
QuitNow()264 void MessageLoop::QuitNow() {
265   DCHECK_EQ(this, current());
266   if (state_) {
267     pump_->Quit();
268   } else {
269     NOTREACHED() << "Must be inside Run to call Quit";
270   }
271 }
272 
SetNestableTasksAllowed(bool allowed)273 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
274   if (nestable_tasks_allowed_ != allowed) {
275     nestable_tasks_allowed_ = allowed;
276     if (!nestable_tasks_allowed_)
277       return;
278     // Start the native pump if we are not already pumping.
279     pump_->ScheduleWork();
280   }
281 }
282 
NestableTasksAllowed() const283 bool MessageLoop::NestableTasksAllowed() const {
284   return nestable_tasks_allowed_;
285 }
286 
IsNested()287 bool MessageLoop::IsNested() {
288   return state_->run_depth > 1;
289 }
290 
AddTaskObserver(TaskObserver * task_observer)291 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
292   DCHECK_EQ(this, current());
293   task_observers_.AddObserver(task_observer);
294 }
295 
RemoveTaskObserver(TaskObserver * task_observer)296 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
297   DCHECK_EQ(this, current());
298   task_observers_.RemoveObserver(task_observer);
299 }
300 
AssertIdle() const301 void MessageLoop::AssertIdle() const {
302   // We only check |incoming_queue_|, since we don't want to lock |work_queue_|.
303   base::AutoLock lock(incoming_queue_lock_);
304   DCHECK(incoming_queue_.empty());
305 }
306 
307 //------------------------------------------------------------------------------
308 
309 // Runs the loop in two different SEH modes:
310 // enable_SEH_restoration_ = false : any unhandled exception goes to the last
311 // one that calls SetUnhandledExceptionFilter().
312 // enable_SEH_restoration_ = true : any unhandled exception goes to the filter
313 // that was existed before the loop was run.
RunHandler()314 void MessageLoop::RunHandler() {
315 #if defined(OS_WIN)
316   if (exception_restoration_) {
317     RunInternalInSEHFrame();
318     return;
319   }
320 #endif
321 
322   RunInternal();
323 }
324 
325 #if defined(OS_WIN)
RunInternalInSEHFrame()326 __declspec(noinline) void MessageLoop::RunInternalInSEHFrame() {
327   LPTOP_LEVEL_EXCEPTION_FILTER current_filter = GetTopSEHFilter();
328   __try {
329     RunInternal();
330   } __except(SEHFilter(current_filter)) {
331   }
332   return;
333 }
334 #endif
335 
RunInternal()336 void MessageLoop::RunInternal() {
337   DCHECK_EQ(this, current());
338 
339 #ifndef ANDROID
340   StartHistogrammer();
341 #endif
342 
343 #if !defined(OS_MACOSX)
344   if (state_->dispatcher && type() == TYPE_UI) {
345     static_cast<base::MessagePumpForUI*>(pump_.get())->
346         RunWithDispatcher(this, state_->dispatcher);
347     return;
348   }
349 #endif
350 
351   pump_->Run(this);
352 }
353 
ProcessNextDelayedNonNestableTask()354 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
355   if (state_->run_depth != 1)
356     return false;
357 
358   if (deferred_non_nestable_work_queue_.empty())
359     return false;
360 
361   Task* task = deferred_non_nestable_work_queue_.front().task;
362   deferred_non_nestable_work_queue_.pop();
363 
364   RunTask(task);
365   return true;
366 }
367 
RunTask(Task * task)368 void MessageLoop::RunTask(Task* task) {
369   DCHECK(nestable_tasks_allowed_);
370   // Execute the task and assume the worst: It is probably not reentrant.
371   nestable_tasks_allowed_ = false;
372 
373   HistogramEvent(kTaskRunEvent);
374   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
375                     WillProcessTask(task));
376   task->Run();
377   FOR_EACH_OBSERVER(TaskObserver, task_observers_, DidProcessTask(task));
378   delete task;
379 
380   nestable_tasks_allowed_ = true;
381 }
382 
DeferOrRunPendingTask(const PendingTask & pending_task)383 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
384   if (pending_task.nestable || state_->run_depth == 1) {
385     RunTask(pending_task.task);
386     // Show that we ran a task (Note: a new one might arrive as a
387     // consequence!).
388     return true;
389   }
390 
391   // We couldn't run the task now because we're in a nested message loop
392   // and the task isn't nestable.
393   deferred_non_nestable_work_queue_.push(pending_task);
394   return false;
395 }
396 
AddToDelayedWorkQueue(const PendingTask & pending_task)397 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
398   // Move to the delayed work queue.  Initialize the sequence number
399   // before inserting into the delayed_work_queue_.  The sequence number
400   // is used to faciliate FIFO sorting when two tasks have the same
401   // delayed_run_time value.
402   PendingTask new_pending_task(pending_task);
403   new_pending_task.sequence_num = next_sequence_num_++;
404   delayed_work_queue_.push(new_pending_task);
405 }
406 
ReloadWorkQueue()407 void MessageLoop::ReloadWorkQueue() {
408   // We can improve performance of our loading tasks from incoming_queue_ to
409   // work_queue_ by waiting until the last minute (work_queue_ is empty) to
410   // load.  That reduces the number of locks-per-task significantly when our
411   // queues get large.
412   if (!work_queue_.empty())
413     return;  // Wait till we *really* need to lock and load.
414 
415   // Acquire all we can from the inter-thread queue with one lock acquisition.
416   {
417     base::AutoLock lock(incoming_queue_lock_);
418     if (incoming_queue_.empty())
419       return;
420     incoming_queue_.Swap(&work_queue_);  // Constant time
421     DCHECK(incoming_queue_.empty());
422   }
423 }
424 
DeletePendingTasks()425 bool MessageLoop::DeletePendingTasks() {
426   bool did_work = !work_queue_.empty();
427   while (!work_queue_.empty()) {
428     PendingTask pending_task = work_queue_.front();
429     work_queue_.pop();
430     if (!pending_task.delayed_run_time.is_null()) {
431       // We want to delete delayed tasks in the same order in which they would
432       // normally be deleted in case of any funny dependencies between delayed
433       // tasks.
434       AddToDelayedWorkQueue(pending_task);
435     } else {
436       // TODO(darin): Delete all tasks once it is safe to do so.
437       // Until it is totally safe, just do it when running Purify or
438       // Valgrind.
439 #if defined(PURIFY) || defined(USE_HEAPCHECKER)
440       delete pending_task.task;
441 #else
442       if (RunningOnValgrind())
443         delete pending_task.task;
444 #endif  // defined(OS_POSIX)
445     }
446   }
447   did_work |= !deferred_non_nestable_work_queue_.empty();
448   while (!deferred_non_nestable_work_queue_.empty()) {
449     // TODO(darin): Delete all tasks once it is safe to do so.
450     // Until it is totaly safe, only delete them under Purify and Valgrind.
451     Task* task = NULL;
452 #if defined(PURIFY) || defined(USE_HEAPCHECKER)
453     task = deferred_non_nestable_work_queue_.front().task;
454 #else
455     if (RunningOnValgrind())
456       task = deferred_non_nestable_work_queue_.front().task;
457 #endif
458     deferred_non_nestable_work_queue_.pop();
459     if (task)
460       delete task;
461   }
462   did_work |= !delayed_work_queue_.empty();
463   while (!delayed_work_queue_.empty()) {
464     Task* task = delayed_work_queue_.top().task;
465     delayed_work_queue_.pop();
466     delete task;
467   }
468   return did_work;
469 }
470 
471 // Possibly called on a background thread!
PostTask_Helper(const tracked_objects::Location & from_here,Task * task,int64 delay_ms,bool nestable)472 void MessageLoop::PostTask_Helper(
473     const tracked_objects::Location& from_here, Task* task, int64 delay_ms,
474     bool nestable) {
475   task->SetBirthPlace(from_here);
476 
477   PendingTask pending_task(task, nestable);
478 
479   if (delay_ms > 0) {
480     pending_task.delayed_run_time =
481         TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms);
482 
483 #if defined(OS_WIN)
484     if (high_resolution_timer_expiration_.is_null()) {
485       // Windows timers are granular to 15.6ms.  If we only set high-res
486       // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms,
487       // which as a percentage is pretty inaccurate.  So enable high
488       // res timers for any timer which is within 2x of the granularity.
489       // This is a tradeoff between accuracy and power management.
490       bool needs_high_res_timers =
491           delay_ms < (2 * base::Time::kMinLowResolutionThresholdMs);
492       if (needs_high_res_timers) {
493         base::Time::ActivateHighResolutionTimer(true);
494         high_resolution_timer_expiration_ = TimeTicks::Now() +
495             TimeDelta::FromMilliseconds(kHighResolutionTimerModeLeaseTimeMs);
496       }
497     }
498 #endif
499   } else {
500     DCHECK_EQ(delay_ms, 0) << "delay should not be negative";
501   }
502 
503 #if defined(OS_WIN)
504   if (!high_resolution_timer_expiration_.is_null()) {
505     if (TimeTicks::Now() > high_resolution_timer_expiration_) {
506       base::Time::ActivateHighResolutionTimer(false);
507       high_resolution_timer_expiration_ = TimeTicks();
508     }
509   }
510 #endif
511 
512   // Warning: Don't try to short-circuit, and handle this thread's tasks more
513   // directly, as it could starve handling of foreign threads.  Put every task
514   // into this queue.
515 
516   scoped_refptr<base::MessagePump> pump;
517   {
518     base::AutoLock locked(incoming_queue_lock_);
519 
520     bool was_empty = incoming_queue_.empty();
521     incoming_queue_.push(pending_task);
522     if (!was_empty)
523       return;  // Someone else should have started the sub-pump.
524 
525     pump = pump_;
526   }
527   // Since the incoming_queue_ may contain a task that destroys this message
528   // loop, we cannot exit incoming_queue_lock_ until we are done with |this|.
529   // We use a stack-based reference to the message pump so that we can call
530   // ScheduleWork outside of incoming_queue_lock_.
531 
532   pump->ScheduleWork();
533 }
534 
535 //------------------------------------------------------------------------------
536 // Method and data for histogramming events and actions taken by each instance
537 // on each thread.
538 
StartHistogrammer()539 void MessageLoop::StartHistogrammer() {
540   if (enable_histogrammer_ && !message_histogram_
541       && base::StatisticsRecorder::IsActive()) {
542     DCHECK(!thread_name_.empty());
543     message_histogram_ = base::LinearHistogram::FactoryGet(
544         "MsgLoop:" + thread_name_,
545         kLeastNonZeroMessageId, kMaxMessageId,
546         kNumberOfDistinctMessagesDisplayed,
547         message_histogram_->kHexRangePrintingFlag);
548     message_histogram_->SetRangeDescriptions(event_descriptions_);
549   }
550 }
551 
HistogramEvent(int event)552 void MessageLoop::HistogramEvent(int event) {
553   if (message_histogram_)
554     message_histogram_->Add(event);
555 }
556 
DoWork()557 bool MessageLoop::DoWork() {
558   if (!nestable_tasks_allowed_) {
559     // Task can't be executed right now.
560     return false;
561   }
562 
563   for (;;) {
564     ReloadWorkQueue();
565     if (work_queue_.empty())
566       break;
567 
568     // Execute oldest task.
569     do {
570       PendingTask pending_task = work_queue_.front();
571       work_queue_.pop();
572       if (!pending_task.delayed_run_time.is_null()) {
573         AddToDelayedWorkQueue(pending_task);
574         // If we changed the topmost task, then it is time to re-schedule.
575         if (delayed_work_queue_.top().task == pending_task.task)
576           pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
577       } else {
578         if (DeferOrRunPendingTask(pending_task))
579           return true;
580       }
581     } while (!work_queue_.empty());
582   }
583 
584   // Nothing happened.
585   return false;
586 }
587 
DoDelayedWork(base::TimeTicks * next_delayed_work_time)588 bool MessageLoop::DoDelayedWork(base::TimeTicks* next_delayed_work_time) {
589   if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
590     recent_time_ = *next_delayed_work_time = TimeTicks();
591     return false;
592   }
593 
594   // When we "fall behind," there will be a lot of tasks in the delayed work
595   // queue that are ready to run.  To increase efficiency when we fall behind,
596   // we will only call Time::Now() intermittently, and then process all tasks
597   // that are ready to run before calling it again.  As a result, the more we
598   // fall behind (and have a lot of ready-to-run delayed tasks), the more
599   // efficient we'll be at handling the tasks.
600 
601   TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
602   if (next_run_time > recent_time_) {
603     recent_time_ = TimeTicks::Now();  // Get a better view of Now();
604     if (next_run_time > recent_time_) {
605       *next_delayed_work_time = next_run_time;
606       return false;
607     }
608   }
609 
610   PendingTask pending_task = delayed_work_queue_.top();
611   delayed_work_queue_.pop();
612 
613   if (!delayed_work_queue_.empty())
614     *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
615 
616   return DeferOrRunPendingTask(pending_task);
617 }
618 
DoIdleWork()619 bool MessageLoop::DoIdleWork() {
620   if (ProcessNextDelayedNonNestableTask())
621     return true;
622 
623   if (state_->quit_received)
624     pump_->Quit();
625 
626   return false;
627 }
628 
629 //------------------------------------------------------------------------------
630 // MessageLoop::AutoRunState
631 
AutoRunState(MessageLoop * loop)632 MessageLoop::AutoRunState::AutoRunState(MessageLoop* loop) : loop_(loop) {
633   // Make the loop reference us.
634   previous_state_ = loop_->state_;
635   if (previous_state_) {
636     run_depth = previous_state_->run_depth + 1;
637   } else {
638     run_depth = 1;
639   }
640   loop_->state_ = this;
641 
642   // Initialize the other fields:
643   quit_received = false;
644 #if !defined(OS_MACOSX)
645   dispatcher = NULL;
646 #endif
647 }
648 
~AutoRunState()649 MessageLoop::AutoRunState::~AutoRunState() {
650   loop_->state_ = previous_state_;
651 }
652 
653 //------------------------------------------------------------------------------
654 // MessageLoop::PendingTask
655 
operator <(const PendingTask & other) const656 bool MessageLoop::PendingTask::operator<(const PendingTask& other) const {
657   // Since the top of a priority queue is defined as the "greatest" element, we
658   // need to invert the comparison here.  We want the smaller time to be at the
659   // top of the heap.
660 
661   if (delayed_run_time < other.delayed_run_time)
662     return false;
663 
664   if (delayed_run_time > other.delayed_run_time)
665     return true;
666 
667   // If the times happen to match, then we use the sequence number to decide.
668   // Compare the difference to support integer roll-over.
669   return (sequence_num - other.sequence_num) > 0;
670 }
671 
672 //------------------------------------------------------------------------------
673 // MessageLoopForUI
674 
675 #if defined(OS_WIN)
DidProcessMessage(const MSG & message)676 void MessageLoopForUI::DidProcessMessage(const MSG& message) {
677   pump_win()->DidProcessMessage(message);
678 }
679 #endif  // defined(OS_WIN)
680 
681 #if defined(USE_X11)
GetDisplay()682 Display* MessageLoopForUI::GetDisplay() {
683   return gdk_x11_get_default_xdisplay();
684 }
685 #endif  // defined(USE_X11)
686 
687 #if !defined(OS_MACOSX) && !defined(OS_NACL) && !defined(ANDROID)
AddObserver(Observer * observer)688 void MessageLoopForUI::AddObserver(Observer* observer) {
689   pump_ui()->AddObserver(observer);
690 }
691 
RemoveObserver(Observer * observer)692 void MessageLoopForUI::RemoveObserver(Observer* observer) {
693   pump_ui()->RemoveObserver(observer);
694 }
695 
Run(Dispatcher * dispatcher)696 void MessageLoopForUI::Run(Dispatcher* dispatcher) {
697   AutoRunState save_state(this);
698   state_->dispatcher = dispatcher;
699   RunHandler();
700 }
701 #endif  // !defined(OS_MACOSX) && !defined(OS_NACL)
702 
703 //------------------------------------------------------------------------------
704 // MessageLoopForIO
705 
706 #if defined(OS_WIN)
707 
RegisterIOHandler(HANDLE file,IOHandler * handler)708 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
709   pump_io()->RegisterIOHandler(file, handler);
710 }
711 
WaitForIOCompletion(DWORD timeout,IOHandler * filter)712 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
713   return pump_io()->WaitForIOCompletion(timeout, filter);
714 }
715 
716 #elif defined(OS_POSIX) && !defined(OS_NACL)
717 
WatchFileDescriptor(int fd,bool persistent,Mode mode,FileDescriptorWatcher * controller,Watcher * delegate)718 bool MessageLoopForIO::WatchFileDescriptor(int fd,
719                                            bool persistent,
720                                            Mode mode,
721                                            FileDescriptorWatcher *controller,
722                                            Watcher *delegate) {
723   return pump_libevent()->WatchFileDescriptor(
724       fd,
725       persistent,
726       static_cast<base::MessagePumpLibevent::Mode>(mode),
727       controller,
728       delegate);
729 }
730 
731 #endif
732