1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/message_loop/message_loop.h"
6
7 #include <algorithm>
8 #include <utility>
9
10 #include "base/bind.h"
11 #include "base/compiler_specific.h"
12 #include "base/logging.h"
13 #include "base/memory/ptr_util.h"
14 #include "base/message_loop/message_pump_default.h"
15 #include "base/run_loop.h"
16 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
17 #include "base/threading/thread_id_name_manager.h"
18 #include "base/threading/thread_local.h"
19 #include "base/threading/thread_task_runner_handle.h"
20 #include "base/trace_event/trace_event.h"
21
22 #if defined(OS_MACOSX)
23 #include "base/message_loop/message_pump_mac.h"
24 #endif
25 #if defined(OS_POSIX) && !defined(OS_IOS)
26 #include "base/message_loop/message_pump_libevent.h"
27 #endif
28 #if defined(OS_ANDROID)
29 #include "base/message_loop/message_pump_android.h"
30 #endif
31 #if defined(USE_GLIB)
32 #include "base/message_loop/message_pump_glib.h"
33 #endif
34
35 namespace base {
36
37 namespace {
38
39 // A lazily created thread local storage for quick access to a thread's message
40 // loop, if one exists.
GetTLSMessageLoop()41 base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
42 static auto* lazy_tls_ptr = new base::ThreadLocalPointer<MessageLoop>();
43 return lazy_tls_ptr;
44 }
45 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
46
47 #if defined(OS_IOS)
48 typedef MessagePumpIOSForIO MessagePumpForIO;
49 #elif defined(OS_NACL_SFI)
50 typedef MessagePumpDefault MessagePumpForIO;
51 #elif defined(OS_POSIX)
52 typedef MessagePumpLibevent MessagePumpForIO;
53 #endif
54
55 #if !defined(OS_NACL_SFI)
ToPumpIO(MessagePump * pump)56 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
57 return static_cast<MessagePumpForIO*>(pump);
58 }
59 #endif // !defined(OS_NACL_SFI)
60
ReturnPump(std::unique_ptr<MessagePump> pump)61 std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
62 return pump;
63 }
64
65 } // namespace
66
67 //------------------------------------------------------------------------------
68
TaskObserver()69 MessageLoop::TaskObserver::TaskObserver() {
70 }
71
~TaskObserver()72 MessageLoop::TaskObserver::~TaskObserver() {
73 }
74
~DestructionObserver()75 MessageLoop::DestructionObserver::~DestructionObserver() {
76 }
77
~NestingObserver()78 MessageLoop::NestingObserver::~NestingObserver() {}
79
80 //------------------------------------------------------------------------------
81
MessageLoop(Type type)82 MessageLoop::MessageLoop(Type type)
83 : MessageLoop(type, MessagePumpFactoryCallback()) {
84 BindToCurrentThread();
85 }
86
MessageLoop(std::unique_ptr<MessagePump> pump)87 MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
88 : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
89 BindToCurrentThread();
90 }
91
~MessageLoop()92 MessageLoop::~MessageLoop() {
93 // If |pump_| is non-null, this message loop has been bound and should be the
94 // current one on this thread. Otherwise, this loop is being destructed before
95 // it was bound to a thread, so a different message loop (or no loop at all)
96 // may be current.
97 DCHECK((pump_ && current() == this) || (!pump_ && current() != this));
98
99 // iOS just attaches to the loop, it doesn't Run it.
100 // TODO(stuartmorgan): Consider wiring up a Detach().
101 #if !defined(OS_IOS)
102 DCHECK(!run_loop_);
103 #endif
104
105 #if defined(OS_WIN)
106 if (in_high_res_mode_)
107 Time::ActivateHighResolutionTimer(false);
108 #endif
109 // Clean up any unprocessed tasks, but take care: deleting a task could
110 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
111 // limit on the number of times we will allow a deleted task to generate more
112 // tasks. Normally, we should only pass through this loop once or twice. If
113 // we end up hitting the loop limit, then it is probably due to one task that
114 // is being stubborn. Inspect the queues to see who is left.
115 bool did_work;
116 for (int i = 0; i < 100; ++i) {
117 DeletePendingTasks();
118 ReloadWorkQueue();
119 // If we end up with empty queues, then break out of the loop.
120 did_work = DeletePendingTasks();
121 if (!did_work)
122 break;
123 }
124 DCHECK(!did_work);
125
126 // Let interested parties have one last shot at accessing this.
127 for (auto& observer : destruction_observers_)
128 observer.WillDestroyCurrentMessageLoop();
129
130 thread_task_runner_handle_.reset();
131
132 // Tell the incoming queue that we are dying.
133 incoming_task_queue_->WillDestroyCurrentMessageLoop();
134 incoming_task_queue_ = NULL;
135 unbound_task_runner_ = NULL;
136 task_runner_ = NULL;
137
138 // OK, now make it so that no one can find us.
139 if (current() == this)
140 GetTLSMessageLoop()->Set(nullptr);
141 }
142
143 // static
current()144 MessageLoop* MessageLoop::current() {
145 // TODO(darin): sadly, we cannot enable this yet since people call us even
146 // when they have no intention of using us.
147 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
148 return GetTLSMessageLoop()->Get();
149 }
150
151 // static
InitMessagePumpForUIFactory(MessagePumpFactory * factory)152 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
153 if (message_pump_for_ui_factory_)
154 return false;
155
156 message_pump_for_ui_factory_ = factory;
157 return true;
158 }
159
160 // static
CreateMessagePumpForType(Type type)161 std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
162 // TODO(rvargas): Get rid of the OS guards.
163 #if defined(USE_GLIB) && !defined(OS_NACL)
164 typedef MessagePumpGlib MessagePumpForUI;
165 #elif (defined(OS_LINUX) && !defined(OS_NACL)) || defined(OS_BSD)
166 typedef MessagePumpLibevent MessagePumpForUI;
167 #endif
168
169 #if defined(OS_IOS) || defined(OS_MACOSX)
170 #define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(MessagePumpMac::Create())
171 #elif defined(OS_NACL)
172 // Currently NaCl doesn't have a UI MessageLoop.
173 // TODO(abarth): Figure out if we need this.
174 #define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>()
175 #else
176 #define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(new MessagePumpForUI())
177 #endif
178
179 #if defined(OS_MACOSX)
180 // Use an OS native runloop on Mac to support timer coalescing.
181 #define MESSAGE_PUMP_DEFAULT \
182 std::unique_ptr<MessagePump>(new MessagePumpCFRunLoop())
183 #else
184 #define MESSAGE_PUMP_DEFAULT \
185 std::unique_ptr<MessagePump>(new MessagePumpDefault())
186 #endif
187
188 if (type == MessageLoop::TYPE_UI) {
189 if (message_pump_for_ui_factory_)
190 return message_pump_for_ui_factory_();
191 return MESSAGE_PUMP_UI;
192 }
193 if (type == MessageLoop::TYPE_IO)
194 return std::unique_ptr<MessagePump>(new MessagePumpForIO());
195
196 #if defined(OS_ANDROID)
197 if (type == MessageLoop::TYPE_JAVA)
198 return std::unique_ptr<MessagePump>(new MessagePumpForUI());
199 #endif
200
201 DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
202 return MESSAGE_PUMP_DEFAULT;
203 }
204
AddDestructionObserver(DestructionObserver * destruction_observer)205 void MessageLoop::AddDestructionObserver(
206 DestructionObserver* destruction_observer) {
207 DCHECK_EQ(this, current());
208 destruction_observers_.AddObserver(destruction_observer);
209 }
210
RemoveDestructionObserver(DestructionObserver * destruction_observer)211 void MessageLoop::RemoveDestructionObserver(
212 DestructionObserver* destruction_observer) {
213 DCHECK_EQ(this, current());
214 destruction_observers_.RemoveObserver(destruction_observer);
215 }
216
AddNestingObserver(NestingObserver * observer)217 void MessageLoop::AddNestingObserver(NestingObserver* observer) {
218 DCHECK_EQ(this, current());
219 CHECK(allow_nesting_);
220 nesting_observers_.AddObserver(observer);
221 }
222
RemoveNestingObserver(NestingObserver * observer)223 void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
224 DCHECK_EQ(this, current());
225 CHECK(allow_nesting_);
226 nesting_observers_.RemoveObserver(observer);
227 }
228
QuitWhenIdle()229 void MessageLoop::QuitWhenIdle() {
230 DCHECK_EQ(this, current());
231 if (run_loop_) {
232 run_loop_->QuitWhenIdle();
233 } else {
234 NOTREACHED() << "Must be inside Run to call QuitWhenIdle";
235 }
236 }
237
QuitNow()238 void MessageLoop::QuitNow() {
239 DCHECK_EQ(this, current());
240 if (run_loop_) {
241 pump_->Quit();
242 } else {
243 NOTREACHED() << "Must be inside Run to call Quit";
244 }
245 }
246
IsType(Type type) const247 bool MessageLoop::IsType(Type type) const {
248 return type_ == type;
249 }
250
QuitCurrentWhenIdle()251 static void QuitCurrentWhenIdle() {
252 MessageLoop::current()->QuitWhenIdle();
253 }
254
255 // static
QuitWhenIdleClosure()256 Closure MessageLoop::QuitWhenIdleClosure() {
257 return Bind(&QuitCurrentWhenIdle);
258 }
259
SetNestableTasksAllowed(bool allowed)260 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
261 if (allowed) {
262 CHECK(allow_nesting_);
263
264 // Kick the native pump just in case we enter a OS-driven nested message
265 // loop.
266 pump_->ScheduleWork();
267 }
268 nestable_tasks_allowed_ = allowed;
269 }
270
NestableTasksAllowed() const271 bool MessageLoop::NestableTasksAllowed() const {
272 return nestable_tasks_allowed_;
273 }
274
IsNested()275 bool MessageLoop::IsNested() {
276 return run_loop_->run_depth_ > 1;
277 }
278
AddTaskObserver(TaskObserver * task_observer)279 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
280 DCHECK_EQ(this, current());
281 CHECK(allow_task_observers_);
282 task_observers_.AddObserver(task_observer);
283 }
284
RemoveTaskObserver(TaskObserver * task_observer)285 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
286 DCHECK_EQ(this, current());
287 CHECK(allow_task_observers_);
288 task_observers_.RemoveObserver(task_observer);
289 }
290
is_running() const291 bool MessageLoop::is_running() const {
292 DCHECK_EQ(this, current());
293 return run_loop_ != NULL;
294 }
295
HasHighResolutionTasks()296 bool MessageLoop::HasHighResolutionTasks() {
297 return incoming_task_queue_->HasHighResolutionTasks();
298 }
299
IsIdleForTesting()300 bool MessageLoop::IsIdleForTesting() {
301 // We only check the incoming queue, since we don't want to lock the work
302 // queue.
303 return incoming_task_queue_->IsIdleForTesting();
304 }
305
306 //------------------------------------------------------------------------------
307
308 // static
CreateUnbound(Type type,MessagePumpFactoryCallback pump_factory)309 std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
310 Type type,
311 MessagePumpFactoryCallback pump_factory) {
312 return WrapUnique(new MessageLoop(type, pump_factory));
313 }
314
MessageLoop(Type type,MessagePumpFactoryCallback pump_factory)315 MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
316 : type_(type),
317 #if defined(OS_WIN)
318 pending_high_res_tasks_(0),
319 in_high_res_mode_(false),
320 #endif
321 nestable_tasks_allowed_(true),
322 pump_factory_(pump_factory),
323 run_loop_(nullptr),
324 current_pending_task_(nullptr),
325 incoming_task_queue_(new internal::IncomingTaskQueue(this)),
326 unbound_task_runner_(
327 new internal::MessageLoopTaskRunner(incoming_task_queue_)),
328 task_runner_(unbound_task_runner_),
329 thread_id_(kInvalidThreadId) {
330 // If type is TYPE_CUSTOM non-null pump_factory must be given.
331 DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
332 }
333
BindToCurrentThread()334 void MessageLoop::BindToCurrentThread() {
335 DCHECK(!pump_);
336 if (!pump_factory_.is_null())
337 pump_ = pump_factory_.Run();
338 else
339 pump_ = CreateMessagePumpForType(type_);
340
341 DCHECK(!current()) << "should only have one message loop per thread";
342 GetTLSMessageLoop()->Set(this);
343
344 incoming_task_queue_->StartScheduling();
345 unbound_task_runner_->BindToCurrentThread();
346 unbound_task_runner_ = nullptr;
347 SetThreadTaskRunnerHandle();
348 thread_id_ = PlatformThread::CurrentId();
349 }
350
GetThreadName() const351 std::string MessageLoop::GetThreadName() const {
352 DCHECK_NE(kInvalidThreadId, thread_id_)
353 << "GetThreadName() must only be called after BindToCurrentThread()'s "
354 << "side-effects have been synchronized with this thread.";
355 return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
356 }
357
SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)358 void MessageLoop::SetTaskRunner(
359 scoped_refptr<SingleThreadTaskRunner> task_runner) {
360 DCHECK_EQ(this, current());
361 DCHECK(task_runner);
362 DCHECK(task_runner->BelongsToCurrentThread());
363 DCHECK(!unbound_task_runner_);
364 task_runner_ = std::move(task_runner);
365 SetThreadTaskRunnerHandle();
366 }
367
ClearTaskRunnerForTesting()368 void MessageLoop::ClearTaskRunnerForTesting() {
369 DCHECK_EQ(this, current());
370 DCHECK(!unbound_task_runner_);
371 task_runner_ = nullptr;
372 thread_task_runner_handle_.reset();
373 }
374
SetThreadTaskRunnerHandle()375 void MessageLoop::SetThreadTaskRunnerHandle() {
376 DCHECK_EQ(this, current());
377 // Clear the previous thread task runner first, because only one can exist at
378 // a time.
379 thread_task_runner_handle_.reset();
380 thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
381 }
382
RunHandler()383 void MessageLoop::RunHandler() {
384 DCHECK_EQ(this, current());
385 DCHECK(run_loop_);
386 CHECK(allow_nesting_ || run_loop_->run_depth_ == 1);
387 pump_->Run(this);
388 }
389
ProcessNextDelayedNonNestableTask()390 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
391 if (run_loop_->run_depth_ != 1)
392 return false;
393
394 if (deferred_non_nestable_work_queue_.empty())
395 return false;
396
397 PendingTask pending_task =
398 std::move(deferred_non_nestable_work_queue_.front());
399 deferred_non_nestable_work_queue_.pop();
400
401 RunTask(&pending_task);
402 return true;
403 }
404
RunTask(PendingTask * pending_task)405 void MessageLoop::RunTask(PendingTask* pending_task) {
406 DCHECK(nestable_tasks_allowed_);
407 current_pending_task_ = pending_task;
408
409 #if defined(OS_WIN)
410 if (pending_task->is_high_res) {
411 pending_high_res_tasks_--;
412 CHECK_GE(pending_high_res_tasks_, 0);
413 }
414 #endif
415
416 // Execute the task and assume the worst: It is probably not reentrant.
417 nestable_tasks_allowed_ = false;
418
419 TRACE_TASK_EXECUTION("MessageLoop::RunTask", *pending_task);
420
421 for (auto& observer : task_observers_)
422 observer.WillProcessTask(*pending_task);
423 task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
424 for (auto& observer : task_observers_)
425 observer.DidProcessTask(*pending_task);
426
427 nestable_tasks_allowed_ = true;
428
429 current_pending_task_ = nullptr;
430 }
431
DeferOrRunPendingTask(PendingTask pending_task)432 bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
433 if (pending_task.nestable || run_loop_->run_depth_ == 1) {
434 RunTask(&pending_task);
435 // Show that we ran a task (Note: a new one might arrive as a
436 // consequence!).
437 return true;
438 }
439
440 // We couldn't run the task now because we're in a nested message loop
441 // and the task isn't nestable.
442 deferred_non_nestable_work_queue_.push(std::move(pending_task));
443 return false;
444 }
445
AddToDelayedWorkQueue(PendingTask pending_task)446 void MessageLoop::AddToDelayedWorkQueue(PendingTask pending_task) {
447 // Move to the delayed work queue.
448 delayed_work_queue_.push(std::move(pending_task));
449 }
450
DeletePendingTasks()451 bool MessageLoop::DeletePendingTasks() {
452 bool did_work = !work_queue_.empty();
453 while (!work_queue_.empty()) {
454 PendingTask pending_task = std::move(work_queue_.front());
455 work_queue_.pop();
456 if (!pending_task.delayed_run_time.is_null()) {
457 // We want to delete delayed tasks in the same order in which they would
458 // normally be deleted in case of any funny dependencies between delayed
459 // tasks.
460 AddToDelayedWorkQueue(std::move(pending_task));
461 }
462 }
463 did_work |= !deferred_non_nestable_work_queue_.empty();
464 while (!deferred_non_nestable_work_queue_.empty()) {
465 deferred_non_nestable_work_queue_.pop();
466 }
467 did_work |= !delayed_work_queue_.empty();
468
469 // Historically, we always delete the task regardless of valgrind status. It's
470 // not completely clear why we want to leak them in the loops above. This
471 // code is replicating legacy behavior, and should not be considered
472 // absolutely "correct" behavior. See TODO above about deleting all tasks
473 // when it's safe.
474 while (!delayed_work_queue_.empty()) {
475 delayed_work_queue_.pop();
476 }
477 return did_work;
478 }
479
ReloadWorkQueue()480 void MessageLoop::ReloadWorkQueue() {
481 // We can improve performance of our loading tasks from the incoming queue to
482 // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
483 // load. That reduces the number of locks-per-task significantly when our
484 // queues get large.
485 if (work_queue_.empty()) {
486 #if defined(OS_WIN)
487 pending_high_res_tasks_ +=
488 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
489 #else
490 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
491 #endif
492 }
493 }
494
ScheduleWork()495 void MessageLoop::ScheduleWork() {
496 pump_->ScheduleWork();
497 }
498
NotifyBeginNestedLoop()499 void MessageLoop::NotifyBeginNestedLoop() {
500 for (auto& observer : nesting_observers_)
501 observer.OnBeginNestedMessageLoop();
502 }
503
DoWork()504 bool MessageLoop::DoWork() {
505 if (!nestable_tasks_allowed_) {
506 // Task can't be executed right now.
507 return false;
508 }
509
510 for (;;) {
511 ReloadWorkQueue();
512 if (work_queue_.empty())
513 break;
514
515 // Execute oldest task.
516 do {
517 PendingTask pending_task = std::move(work_queue_.front());
518 work_queue_.pop();
519 if (!pending_task.delayed_run_time.is_null()) {
520 int sequence_num = pending_task.sequence_num;
521 TimeTicks delayed_run_time = pending_task.delayed_run_time;
522 AddToDelayedWorkQueue(std::move(pending_task));
523 // If we changed the topmost task, then it is time to reschedule.
524 if (delayed_work_queue_.top().sequence_num == sequence_num)
525 pump_->ScheduleDelayedWork(delayed_run_time);
526 } else {
527 if (DeferOrRunPendingTask(std::move(pending_task)))
528 return true;
529 }
530 } while (!work_queue_.empty());
531 }
532
533 // Nothing happened.
534 return false;
535 }
536
DoDelayedWork(TimeTicks * next_delayed_work_time)537 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
538 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
539 recent_time_ = *next_delayed_work_time = TimeTicks();
540 return false;
541 }
542
543 // When we "fall behind", there will be a lot of tasks in the delayed work
544 // queue that are ready to run. To increase efficiency when we fall behind,
545 // we will only call Time::Now() intermittently, and then process all tasks
546 // that are ready to run before calling it again. As a result, the more we
547 // fall behind (and have a lot of ready-to-run delayed tasks), the more
548 // efficient we'll be at handling the tasks.
549
550 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
551 if (next_run_time > recent_time_) {
552 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
553 if (next_run_time > recent_time_) {
554 *next_delayed_work_time = next_run_time;
555 return false;
556 }
557 }
558
559 PendingTask pending_task =
560 std::move(const_cast<PendingTask&>(delayed_work_queue_.top()));
561 delayed_work_queue_.pop();
562
563 if (!delayed_work_queue_.empty())
564 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
565
566 return DeferOrRunPendingTask(std::move(pending_task));
567 }
568
DoIdleWork()569 bool MessageLoop::DoIdleWork() {
570 if (ProcessNextDelayedNonNestableTask())
571 return true;
572
573 if (run_loop_->quit_when_idle_received_)
574 pump_->Quit();
575
576 // When we return we will do a kernel wait for more tasks.
577 #if defined(OS_WIN)
578 // On Windows we activate the high resolution timer so that the wait
579 // _if_ triggered by the timer happens with good resolution. If we don't
580 // do this the default resolution is 15ms which might not be acceptable
581 // for some tasks.
582 bool high_res = pending_high_res_tasks_ > 0;
583 if (high_res != in_high_res_mode_) {
584 in_high_res_mode_ = high_res;
585 Time::ActivateHighResolutionTimer(in_high_res_mode_);
586 }
587 #endif
588 return false;
589 }
590
591 #if !defined(OS_NACL)
592 //------------------------------------------------------------------------------
593 // MessageLoopForUI
594
MessageLoopForUI(std::unique_ptr<MessagePump> pump)595 MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
596 : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {}
597
598 #if defined(OS_ANDROID)
Start()599 void MessageLoopForUI::Start() {
600 // No Histogram support for UI message loop as it is managed by Java side
601 static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
602 }
603
StartForTesting(base::android::JavaMessageHandlerFactory * factory,WaitableEvent * test_done_event)604 void MessageLoopForUI::StartForTesting(
605 base::android::JavaMessageHandlerFactory* factory,
606 WaitableEvent* test_done_event) {
607 // No Histogram support for UI message loop as it is managed by Java side
608 static_cast<MessagePumpForUI*>(pump_.get())
609 ->StartForUnitTest(this, factory, test_done_event);
610 }
611
Abort()612 void MessageLoopForUI::Abort() {
613 static_cast<MessagePumpForUI*>(pump_.get())->Abort();
614 }
615 #endif
616
617 #if defined(OS_IOS)
Attach()618 void MessageLoopForUI::Attach() {
619 static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
620 }
621 #endif
622
623 #if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
WatchFileDescriptor(int fd,bool persistent,MessagePumpLibevent::Mode mode,MessagePumpLibevent::FileDescriptorWatcher * controller,MessagePumpLibevent::Watcher * delegate)624 bool MessageLoopForUI::WatchFileDescriptor(
625 int fd,
626 bool persistent,
627 MessagePumpLibevent::Mode mode,
628 MessagePumpLibevent::FileDescriptorWatcher *controller,
629 MessagePumpLibevent::Watcher *delegate) {
630 return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
631 fd,
632 persistent,
633 mode,
634 controller,
635 delegate);
636 }
637 #endif
638
639 #endif // !defined(OS_NACL)
640
641 //------------------------------------------------------------------------------
642 // MessageLoopForIO
643
644 #if !defined(OS_NACL_SFI)
645
646 #if defined(OS_WIN)
RegisterIOHandler(HANDLE file,IOHandler * handler)647 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
648 ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
649 }
650
RegisterJobObject(HANDLE job,IOHandler * handler)651 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
652 return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
653 }
654
WaitForIOCompletion(DWORD timeout,IOHandler * filter)655 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
656 return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
657 }
658 #elif defined(OS_POSIX)
WatchFileDescriptor(int fd,bool persistent,Mode mode,FileDescriptorWatcher * controller,Watcher * delegate)659 bool MessageLoopForIO::WatchFileDescriptor(int fd,
660 bool persistent,
661 Mode mode,
662 FileDescriptorWatcher* controller,
663 Watcher* delegate) {
664 return ToPumpIO(pump_.get())->WatchFileDescriptor(
665 fd,
666 persistent,
667 mode,
668 controller,
669 delegate);
670 }
671 #endif
672
673 #endif // !defined(OS_NACL_SFI)
674
675 } // namespace base
676