• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/message_loop/message_loop.h"
6 
7 #include <algorithm>
8 
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/lazy_instance.h"
12 #include "base/logging.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/message_loop/message_pump_default.h"
15 #include "base/metrics/histogram.h"
16 #include "base/metrics/statistics_recorder.h"
17 #include "base/run_loop.h"
18 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "base/threading/thread_local.h"
21 #include "base/time/time.h"
22 #include "base/tracked_objects.h"
23 
24 #if defined(OS_MACOSX)
25 #include "base/message_loop/message_pump_mac.h"
26 #endif
27 #if defined(OS_POSIX) && !defined(OS_IOS)
28 #include "base/message_loop/message_pump_libevent.h"
29 #endif
30 #if defined(OS_ANDROID)
31 #include "base/message_loop/message_pump_android.h"
32 #endif
33 #if defined(USE_GLIB)
34 #include "base/message_loop/message_pump_glib.h"
35 #endif
36 
37 namespace base {
38 
39 namespace {
40 
41 // A lazily created thread local storage for quick access to a thread's message
42 // loop, if one exists.  This should be safe and free of static constructors.
43 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
44     LAZY_INSTANCE_INITIALIZER;
45 
46 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
47 // to get an accounting of messages and actions taken on each thread.
48 const int kTaskRunEvent = 0x1;
49 #if !defined(OS_NACL)
50 const int kTimerEvent = 0x2;
51 
52 // Provide range of message IDs for use in histogramming and debug display.
53 const int kLeastNonZeroMessageId = 1;
54 const int kMaxMessageId = 1099;
55 const int kNumberOfDistinctMessagesDisplayed = 1100;
56 
57 // Provide a macro that takes an expression (such as a constant, or macro
58 // constant) and creates a pair to initalize an array of pairs.  In this case,
59 // our pair consists of the expressions value, and the "stringized" version
60 // of the expression (i.e., the exrpression put in quotes).  For example, if
61 // we have:
62 //    #define FOO 2
63 //    #define BAR 5
64 // then the following:
65 //    VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
66 // will expand to:
67 //   {7, "FOO + BAR"}
68 // We use the resulting array as an argument to our histogram, which reads the
69 // number as a bucket identifier, and proceeds to use the corresponding name
70 // in the pair (i.e., the quoted string) when printing out a histogram.
71 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
72 
73 const LinearHistogram::DescriptionPair event_descriptions_[] = {
74   // Provide some pretty print capability in our histogram for our internal
75   // messages.
76 
77   // A few events we handle (kindred to messages), and used to profile actions.
78   VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
79   VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
80 
81   {-1, NULL}  // The list must be null terminated, per API to histogram.
82 };
83 #endif  // !defined(OS_NACL)
84 
85 bool enable_histogrammer_ = false;
86 
87 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
88 
89 // Returns true if MessagePump::ScheduleWork() must be called one
90 // time for every task that is added to the MessageLoop incoming queue.
AlwaysNotifyPump(MessageLoop::Type type)91 bool AlwaysNotifyPump(MessageLoop::Type type) {
92 #if defined(OS_ANDROID)
93   // The Android UI message loop needs to get notified each time a task is added
94   // to the incoming queue.
95   return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
96 #else
97   return false;
98 #endif
99 }
100 
101 #if defined(OS_IOS)
102 typedef MessagePumpIOSForIO MessagePumpForIO;
103 #elif defined(OS_NACL)
104 typedef MessagePumpDefault MessagePumpForIO;
105 #elif defined(OS_POSIX)
106 typedef MessagePumpLibevent MessagePumpForIO;
107 #endif
108 
ToPumpIO(MessagePump * pump)109 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
110   return static_cast<MessagePumpForIO*>(pump);
111 }
112 
113 }  // namespace
114 
115 //------------------------------------------------------------------------------
116 
TaskObserver()117 MessageLoop::TaskObserver::TaskObserver() {
118 }
119 
~TaskObserver()120 MessageLoop::TaskObserver::~TaskObserver() {
121 }
122 
~DestructionObserver()123 MessageLoop::DestructionObserver::~DestructionObserver() {
124 }
125 
126 //------------------------------------------------------------------------------
127 
MessageLoop(Type type)128 MessageLoop::MessageLoop(Type type)
129     : type_(type),
130       pending_high_res_tasks_(0),
131       in_high_res_mode_(false),
132       nestable_tasks_allowed_(true),
133 #if defined(OS_WIN)
134       os_modal_loop_(false),
135 #endif  // OS_WIN
136       message_histogram_(NULL),
137       run_loop_(NULL) {
138   Init();
139 
140   pump_ = CreateMessagePumpForType(type).Pass();
141 }
142 
MessageLoop(scoped_ptr<MessagePump> pump)143 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
144     : pump_(pump.Pass()),
145       type_(TYPE_CUSTOM),
146       pending_high_res_tasks_(0),
147       in_high_res_mode_(false),
148       nestable_tasks_allowed_(true),
149 #if defined(OS_WIN)
150       os_modal_loop_(false),
151 #endif  // OS_WIN
152       message_histogram_(NULL),
153       run_loop_(NULL) {
154   DCHECK(pump_.get());
155   Init();
156 }
157 
~MessageLoop()158 MessageLoop::~MessageLoop() {
159   DCHECK_EQ(this, current());
160 
161   DCHECK(!run_loop_);
162 #if defined(OS_WIN)
163   if (in_high_res_mode_)
164     Time::ActivateHighResolutionTimer(false);
165 #endif
166   // Clean up any unprocessed tasks, but take care: deleting a task could
167   // result in the addition of more tasks (e.g., via DeleteSoon).  We set a
168   // limit on the number of times we will allow a deleted task to generate more
169   // tasks.  Normally, we should only pass through this loop once or twice.  If
170   // we end up hitting the loop limit, then it is probably due to one task that
171   // is being stubborn.  Inspect the queues to see who is left.
172   bool did_work;
173   for (int i = 0; i < 100; ++i) {
174     DeletePendingTasks();
175     ReloadWorkQueue();
176     // If we end up with empty queues, then break out of the loop.
177     did_work = DeletePendingTasks();
178     if (!did_work)
179       break;
180   }
181   DCHECK(!did_work);
182 
183   // Let interested parties have one last shot at accessing this.
184   FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
185                     WillDestroyCurrentMessageLoop());
186 
187   thread_task_runner_handle_.reset();
188 
189   // Tell the incoming queue that we are dying.
190   incoming_task_queue_->WillDestroyCurrentMessageLoop();
191   incoming_task_queue_ = NULL;
192   message_loop_proxy_ = NULL;
193 
194   // OK, now make it so that no one can find us.
195   lazy_tls_ptr.Pointer()->Set(NULL);
196 }
197 
198 // static
current()199 MessageLoop* MessageLoop::current() {
200   // TODO(darin): sadly, we cannot enable this yet since people call us even
201   // when they have no intention of using us.
202   // DCHECK(loop) << "Ouch, did you forget to initialize me?";
203   return lazy_tls_ptr.Pointer()->Get();
204 }
205 
206 // static
EnableHistogrammer(bool enable)207 void MessageLoop::EnableHistogrammer(bool enable) {
208   enable_histogrammer_ = enable;
209 }
210 
211 // static
InitMessagePumpForUIFactory(MessagePumpFactory * factory)212 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
213   if (message_pump_for_ui_factory_)
214     return false;
215 
216   message_pump_for_ui_factory_ = factory;
217   return true;
218 }
219 
220 // static
CreateMessagePumpForType(Type type)221 scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
222 // TODO(rvargas): Get rid of the OS guards.
223 #if defined(USE_GLIB) && !defined(OS_NACL)
224   typedef MessagePumpGlib MessagePumpForUI;
225 #elif defined(OS_LINUX) && !defined(OS_NACL)
226   typedef MessagePumpLibevent MessagePumpForUI;
227 #endif
228 
229 #if defined(OS_IOS) || defined(OS_MACOSX)
230 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
231 #elif defined(OS_NACL)
232 // Currently NaCl doesn't have a UI MessageLoop.
233 // TODO(abarth): Figure out if we need this.
234 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
235 #else
236 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
237 #endif
238 
239 #if defined(OS_MACOSX)
240   // Use an OS native runloop on Mac to support timer coalescing.
241   #define MESSAGE_PUMP_DEFAULT \
242       scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
243 #else
244   #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
245 #endif
246 
247   if (type == MessageLoop::TYPE_UI) {
248     if (message_pump_for_ui_factory_)
249       return message_pump_for_ui_factory_();
250     return MESSAGE_PUMP_UI;
251   }
252   if (type == MessageLoop::TYPE_IO)
253     return scoped_ptr<MessagePump>(new MessagePumpForIO());
254 
255 #if defined(OS_ANDROID)
256   if (type == MessageLoop::TYPE_JAVA)
257     return scoped_ptr<MessagePump>(new MessagePumpForUI());
258 #endif
259 
260   DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
261   return MESSAGE_PUMP_DEFAULT;
262 }
263 
AddDestructionObserver(DestructionObserver * destruction_observer)264 void MessageLoop::AddDestructionObserver(
265     DestructionObserver* destruction_observer) {
266   DCHECK_EQ(this, current());
267   destruction_observers_.AddObserver(destruction_observer);
268 }
269 
RemoveDestructionObserver(DestructionObserver * destruction_observer)270 void MessageLoop::RemoveDestructionObserver(
271     DestructionObserver* destruction_observer) {
272   DCHECK_EQ(this, current());
273   destruction_observers_.RemoveObserver(destruction_observer);
274 }
275 
PostTask(const tracked_objects::Location & from_here,const Closure & task)276 void MessageLoop::PostTask(
277     const tracked_objects::Location& from_here,
278     const Closure& task) {
279   DCHECK(!task.is_null()) << from_here.ToString();
280   incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), true);
281 }
282 
PostDelayedTask(const tracked_objects::Location & from_here,const Closure & task,TimeDelta delay)283 void MessageLoop::PostDelayedTask(
284     const tracked_objects::Location& from_here,
285     const Closure& task,
286     TimeDelta delay) {
287   DCHECK(!task.is_null()) << from_here.ToString();
288   incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, true);
289 }
290 
PostNonNestableTask(const tracked_objects::Location & from_here,const Closure & task)291 void MessageLoop::PostNonNestableTask(
292     const tracked_objects::Location& from_here,
293     const Closure& task) {
294   DCHECK(!task.is_null()) << from_here.ToString();
295   incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), false);
296 }
297 
PostNonNestableDelayedTask(const tracked_objects::Location & from_here,const Closure & task,TimeDelta delay)298 void MessageLoop::PostNonNestableDelayedTask(
299     const tracked_objects::Location& from_here,
300     const Closure& task,
301     TimeDelta delay) {
302   DCHECK(!task.is_null()) << from_here.ToString();
303   incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, false);
304 }
305 
Run()306 void MessageLoop::Run() {
307   RunLoop run_loop;
308   run_loop.Run();
309 }
310 
RunUntilIdle()311 void MessageLoop::RunUntilIdle() {
312   RunLoop run_loop;
313   run_loop.RunUntilIdle();
314 }
315 
QuitWhenIdle()316 void MessageLoop::QuitWhenIdle() {
317   DCHECK_EQ(this, current());
318   if (run_loop_) {
319     run_loop_->quit_when_idle_received_ = true;
320   } else {
321     NOTREACHED() << "Must be inside Run to call Quit";
322   }
323 }
324 
QuitNow()325 void MessageLoop::QuitNow() {
326   DCHECK_EQ(this, current());
327   if (run_loop_) {
328     pump_->Quit();
329   } else {
330     NOTREACHED() << "Must be inside Run to call Quit";
331   }
332 }
333 
IsType(Type type) const334 bool MessageLoop::IsType(Type type) const {
335   return type_ == type;
336 }
337 
QuitCurrentWhenIdle()338 static void QuitCurrentWhenIdle() {
339   MessageLoop::current()->QuitWhenIdle();
340 }
341 
342 // static
QuitWhenIdleClosure()343 Closure MessageLoop::QuitWhenIdleClosure() {
344   return Bind(&QuitCurrentWhenIdle);
345 }
346 
SetNestableTasksAllowed(bool allowed)347 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
348   if (allowed) {
349     // Kick the native pump just in case we enter a OS-driven nested message
350     // loop.
351     pump_->ScheduleWork();
352   }
353   nestable_tasks_allowed_ = allowed;
354 }
355 
NestableTasksAllowed() const356 bool MessageLoop::NestableTasksAllowed() const {
357   return nestable_tasks_allowed_;
358 }
359 
IsNested()360 bool MessageLoop::IsNested() {
361   return run_loop_->run_depth_ > 1;
362 }
363 
AddTaskObserver(TaskObserver * task_observer)364 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
365   DCHECK_EQ(this, current());
366   task_observers_.AddObserver(task_observer);
367 }
368 
RemoveTaskObserver(TaskObserver * task_observer)369 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
370   DCHECK_EQ(this, current());
371   task_observers_.RemoveObserver(task_observer);
372 }
373 
is_running() const374 bool MessageLoop::is_running() const {
375   DCHECK_EQ(this, current());
376   return run_loop_ != NULL;
377 }
378 
HasHighResolutionTasks()379 bool MessageLoop::HasHighResolutionTasks() {
380   return incoming_task_queue_->HasHighResolutionTasks();
381 }
382 
IsIdleForTesting()383 bool MessageLoop::IsIdleForTesting() {
384   // We only check the imcoming queue|, since we don't want to lock the work
385   // queue.
386   return incoming_task_queue_->IsIdleForTesting();
387 }
388 
389 //------------------------------------------------------------------------------
390 
Init()391 void MessageLoop::Init() {
392   DCHECK(!current()) << "should only have one message loop per thread";
393   lazy_tls_ptr.Pointer()->Set(this);
394 
395   incoming_task_queue_ = new internal::IncomingTaskQueue(this);
396   message_loop_proxy_ =
397       new internal::MessageLoopProxyImpl(incoming_task_queue_);
398   thread_task_runner_handle_.reset(
399       new ThreadTaskRunnerHandle(message_loop_proxy_));
400 }
401 
RunHandler()402 void MessageLoop::RunHandler() {
403   DCHECK_EQ(this, current());
404 
405   StartHistogrammer();
406 
407 #if defined(OS_WIN)
408   if (run_loop_->dispatcher_ && type() == TYPE_UI) {
409     static_cast<MessagePumpForUI*>(pump_.get())->
410         RunWithDispatcher(this, run_loop_->dispatcher_);
411     return;
412   }
413 #endif
414 
415   pump_->Run(this);
416 }
417 
ProcessNextDelayedNonNestableTask()418 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
419   if (run_loop_->run_depth_ != 1)
420     return false;
421 
422   if (deferred_non_nestable_work_queue_.empty())
423     return false;
424 
425   PendingTask pending_task = deferred_non_nestable_work_queue_.front();
426   deferred_non_nestable_work_queue_.pop();
427 
428   RunTask(pending_task);
429   return true;
430 }
431 
RunTask(const PendingTask & pending_task)432 void MessageLoop::RunTask(const PendingTask& pending_task) {
433   DCHECK(nestable_tasks_allowed_);
434 
435   if (pending_task.is_high_res) {
436     pending_high_res_tasks_--;
437     CHECK(pending_high_res_tasks_ >= 0);
438   }
439   // Execute the task and assume the worst: It is probably not reentrant.
440   nestable_tasks_allowed_ = false;
441 
442   HistogramEvent(kTaskRunEvent);
443 
444   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
445                     WillProcessTask(pending_task));
446   task_annotator_.RunTask(
447       "MessageLoop::PostTask", "MessageLoop::RunTask", pending_task);
448   FOR_EACH_OBSERVER(TaskObserver, task_observers_,
449                     DidProcessTask(pending_task));
450 
451   nestable_tasks_allowed_ = true;
452 }
453 
DeferOrRunPendingTask(const PendingTask & pending_task)454 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
455   if (pending_task.nestable || run_loop_->run_depth_ == 1) {
456     RunTask(pending_task);
457     // Show that we ran a task (Note: a new one might arrive as a
458     // consequence!).
459     return true;
460   }
461 
462   // We couldn't run the task now because we're in a nested message loop
463   // and the task isn't nestable.
464   deferred_non_nestable_work_queue_.push(pending_task);
465   return false;
466 }
467 
AddToDelayedWorkQueue(const PendingTask & pending_task)468 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
469   // Move to the delayed work queue.
470   delayed_work_queue_.push(pending_task);
471 }
472 
DeletePendingTasks()473 bool MessageLoop::DeletePendingTasks() {
474   bool did_work = !work_queue_.empty();
475   while (!work_queue_.empty()) {
476     PendingTask pending_task = work_queue_.front();
477     work_queue_.pop();
478     if (!pending_task.delayed_run_time.is_null()) {
479       // We want to delete delayed tasks in the same order in which they would
480       // normally be deleted in case of any funny dependencies between delayed
481       // tasks.
482       AddToDelayedWorkQueue(pending_task);
483     }
484   }
485   did_work |= !deferred_non_nestable_work_queue_.empty();
486   while (!deferred_non_nestable_work_queue_.empty()) {
487     deferred_non_nestable_work_queue_.pop();
488   }
489   did_work |= !delayed_work_queue_.empty();
490 
491   // Historically, we always delete the task regardless of valgrind status. It's
492   // not completely clear why we want to leak them in the loops above.  This
493   // code is replicating legacy behavior, and should not be considered
494   // absolutely "correct" behavior.  See TODO above about deleting all tasks
495   // when it's safe.
496   while (!delayed_work_queue_.empty()) {
497     delayed_work_queue_.pop();
498   }
499   return did_work;
500 }
501 
ReloadWorkQueue()502 void MessageLoop::ReloadWorkQueue() {
503   // We can improve performance of our loading tasks from the incoming queue to
504   // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
505   // load. That reduces the number of locks-per-task significantly when our
506   // queues get large.
507   if (work_queue_.empty()) {
508     pending_high_res_tasks_ +=
509         incoming_task_queue_->ReloadWorkQueue(&work_queue_);
510   }
511 }
512 
ScheduleWork(bool was_empty)513 void MessageLoop::ScheduleWork(bool was_empty) {
514   if (was_empty || AlwaysNotifyPump(type_))
515     pump_->ScheduleWork();
516 }
517 
518 //------------------------------------------------------------------------------
519 // Method and data for histogramming events and actions taken by each instance
520 // on each thread.
521 
StartHistogrammer()522 void MessageLoop::StartHistogrammer() {
523 #if !defined(OS_NACL)  // NaCl build has no metrics code.
524   if (enable_histogrammer_ && !message_histogram_
525       && StatisticsRecorder::IsActive()) {
526     DCHECK(!thread_name_.empty());
527     message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
528         "MsgLoop:" + thread_name_,
529         kLeastNonZeroMessageId, kMaxMessageId,
530         kNumberOfDistinctMessagesDisplayed,
531         message_histogram_->kHexRangePrintingFlag,
532         event_descriptions_);
533   }
534 #endif
535 }
536 
HistogramEvent(int event)537 void MessageLoop::HistogramEvent(int event) {
538 #if !defined(OS_NACL)
539   if (message_histogram_)
540     message_histogram_->Add(event);
541 #endif
542 }
543 
DoWork()544 bool MessageLoop::DoWork() {
545   if (!nestable_tasks_allowed_) {
546     // Task can't be executed right now.
547     return false;
548   }
549 
550   for (;;) {
551     ReloadWorkQueue();
552     if (work_queue_.empty())
553       break;
554 
555     // Execute oldest task.
556     do {
557       PendingTask pending_task = work_queue_.front();
558       work_queue_.pop();
559       if (!pending_task.delayed_run_time.is_null()) {
560         AddToDelayedWorkQueue(pending_task);
561         // If we changed the topmost task, then it is time to reschedule.
562         if (delayed_work_queue_.top().task.Equals(pending_task.task))
563           pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
564       } else {
565         if (DeferOrRunPendingTask(pending_task))
566           return true;
567       }
568     } while (!work_queue_.empty());
569   }
570 
571   // Nothing happened.
572   return false;
573 }
574 
DoDelayedWork(TimeTicks * next_delayed_work_time)575 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
576   if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
577     recent_time_ = *next_delayed_work_time = TimeTicks();
578     return false;
579   }
580 
581   // When we "fall behind," there will be a lot of tasks in the delayed work
582   // queue that are ready to run.  To increase efficiency when we fall behind,
583   // we will only call Time::Now() intermittently, and then process all tasks
584   // that are ready to run before calling it again.  As a result, the more we
585   // fall behind (and have a lot of ready-to-run delayed tasks), the more
586   // efficient we'll be at handling the tasks.
587 
588   TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
589   if (next_run_time > recent_time_) {
590     recent_time_ = TimeTicks::Now();  // Get a better view of Now();
591     if (next_run_time > recent_time_) {
592       *next_delayed_work_time = next_run_time;
593       return false;
594     }
595   }
596 
597   PendingTask pending_task = delayed_work_queue_.top();
598   delayed_work_queue_.pop();
599 
600   if (!delayed_work_queue_.empty())
601     *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
602 
603   return DeferOrRunPendingTask(pending_task);
604 }
605 
DoIdleWork()606 bool MessageLoop::DoIdleWork() {
607   if (ProcessNextDelayedNonNestableTask())
608     return true;
609 
610   if (run_loop_->quit_when_idle_received_)
611     pump_->Quit();
612 
613   // When we return we will do a kernel wait for more tasks.
614 #if defined(OS_WIN)
615   // On Windows we activate the high resolution timer so that the wait
616   // _if_ triggered by the timer happens with good resolution. If we don't
617   // do this the default resolution is 15ms which might not be acceptable
618   // for some tasks.
619   bool high_res = pending_high_res_tasks_ > 0;
620   if (high_res != in_high_res_mode_) {
621     in_high_res_mode_ = high_res;
622     Time::ActivateHighResolutionTimer(in_high_res_mode_);
623   }
624 #endif
625   return false;
626 }
627 
DeleteSoonInternal(const tracked_objects::Location & from_here,void (* deleter)(const void *),const void * object)628 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
629                                      void(*deleter)(const void*),
630                                      const void* object) {
631   PostNonNestableTask(from_here, Bind(deleter, object));
632 }
633 
ReleaseSoonInternal(const tracked_objects::Location & from_here,void (* releaser)(const void *),const void * object)634 void MessageLoop::ReleaseSoonInternal(
635     const tracked_objects::Location& from_here,
636     void(*releaser)(const void*),
637     const void* object) {
638   PostNonNestableTask(from_here, Bind(releaser, object));
639 }
640 
641 #if !defined(OS_NACL)
642 //------------------------------------------------------------------------------
643 // MessageLoopForUI
644 
645 #if defined(OS_ANDROID)
Start()646 void MessageLoopForUI::Start() {
647   // No Histogram support for UI message loop as it is managed by Java side
648   static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
649 }
650 #endif
651 
652 #if defined(OS_IOS)
Attach()653 void MessageLoopForUI::Attach() {
654   static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
655 }
656 #endif
657 
658 #if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
WatchFileDescriptor(int fd,bool persistent,MessagePumpLibevent::Mode mode,MessagePumpLibevent::FileDescriptorWatcher * controller,MessagePumpLibevent::Watcher * delegate)659 bool MessageLoopForUI::WatchFileDescriptor(
660     int fd,
661     bool persistent,
662     MessagePumpLibevent::Mode mode,
663     MessagePumpLibevent::FileDescriptorWatcher *controller,
664     MessagePumpLibevent::Watcher *delegate) {
665   return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
666       fd,
667       persistent,
668       mode,
669       controller,
670       delegate);
671 }
672 #endif
673 
674 #endif  // !defined(OS_NACL)
675 
676 //------------------------------------------------------------------------------
677 // MessageLoopForIO
678 
679 #if !defined(OS_NACL)
AddIOObserver(MessageLoopForIO::IOObserver * io_observer)680 void MessageLoopForIO::AddIOObserver(
681     MessageLoopForIO::IOObserver* io_observer) {
682   ToPumpIO(pump_.get())->AddIOObserver(io_observer);
683 }
684 
RemoveIOObserver(MessageLoopForIO::IOObserver * io_observer)685 void MessageLoopForIO::RemoveIOObserver(
686     MessageLoopForIO::IOObserver* io_observer) {
687   ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
688 }
689 
690 #if defined(OS_WIN)
RegisterIOHandler(HANDLE file,IOHandler * handler)691 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
692   ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
693 }
694 
RegisterJobObject(HANDLE job,IOHandler * handler)695 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
696   return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
697 }
698 
WaitForIOCompletion(DWORD timeout,IOHandler * filter)699 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
700   return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
701 }
702 #elif defined(OS_POSIX)
WatchFileDescriptor(int fd,bool persistent,Mode mode,FileDescriptorWatcher * controller,Watcher * delegate)703 bool MessageLoopForIO::WatchFileDescriptor(int fd,
704                                            bool persistent,
705                                            Mode mode,
706                                            FileDescriptorWatcher *controller,
707                                            Watcher *delegate) {
708   return ToPumpIO(pump_.get())->WatchFileDescriptor(
709       fd,
710       persistent,
711       mode,
712       controller,
713       delegate);
714 }
715 #endif
716 
717 #endif  // !defined(OS_NACL)
718 
719 }  // namespace base
720