1 //
2 // Copyright 2010 The Android Open Source Project
3 //
4 // A looper implementation based on epoll().
5 //
6 #define LOG_TAG "Looper"
7
8 //#define LOG_NDEBUG 0
9
10 // Debugs poll and wake interactions.
11 #define DEBUG_POLL_AND_WAKE 0
12
13 // Debugs callback registration and invocation.
14 #define DEBUG_CALLBACKS 0
15
16 #include <cutils/log.h>
17 #include <utils/Looper.h>
18 #include <utils/Timers.h>
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include <string.h>
25 #include <sys/eventfd.h>
26 #include <unistd.h>
27
28
29 namespace android {
30
31 // --- WeakMessageHandler ---
32
WeakMessageHandler(const wp<MessageHandler> & handler)33 WeakMessageHandler::WeakMessageHandler(const wp<MessageHandler>& handler) :
34 mHandler(handler) {
35 }
36
~WeakMessageHandler()37 WeakMessageHandler::~WeakMessageHandler() {
38 }
39
handleMessage(const Message & message)40 void WeakMessageHandler::handleMessage(const Message& message) {
41 sp<MessageHandler> handler = mHandler.promote();
42 if (handler != NULL) {
43 handler->handleMessage(message);
44 }
45 }
46
47
48 // --- SimpleLooperCallback ---
49
SimpleLooperCallback(Looper_callbackFunc callback)50 SimpleLooperCallback::SimpleLooperCallback(Looper_callbackFunc callback) :
51 mCallback(callback) {
52 }
53
~SimpleLooperCallback()54 SimpleLooperCallback::~SimpleLooperCallback() {
55 }
56
handleEvent(int fd,int events,void * data)57 int SimpleLooperCallback::handleEvent(int fd, int events, void* data) {
58 return mCallback(fd, events, data);
59 }
60
61
62 // --- Looper ---
63
64 // Hint for number of file descriptors to be associated with the epoll instance.
65 static const int EPOLL_SIZE_HINT = 8;
66
67 // Maximum number of file descriptors for which to retrieve poll events each iteration.
68 static const int EPOLL_MAX_EVENTS = 16;
69
70 static pthread_once_t gTLSOnce = PTHREAD_ONCE_INIT;
71 static pthread_key_t gTLSKey = 0;
72
Looper(bool allowNonCallbacks)73 Looper::Looper(bool allowNonCallbacks) :
74 mAllowNonCallbacks(allowNonCallbacks), mSendingMessage(false),
75 mPolling(false), mEpollFd(-1), mEpollRebuildRequired(false),
76 mNextRequestSeq(0), mResponseIndex(0), mNextMessageUptime(LLONG_MAX) {
77 mWakeEventFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
78 LOG_ALWAYS_FATAL_IF(mWakeEventFd < 0, "Could not make wake event fd: %s",
79 strerror(errno));
80
81 AutoMutex _l(mLock);
82 rebuildEpollLocked();
83 }
84
~Looper()85 Looper::~Looper() {
86 close(mWakeEventFd);
87 if (mEpollFd >= 0) {
88 close(mEpollFd);
89 }
90 }
91
initTLSKey()92 void Looper::initTLSKey() {
93 int result = pthread_key_create(& gTLSKey, threadDestructor);
94 LOG_ALWAYS_FATAL_IF(result != 0, "Could not allocate TLS key.");
95 }
96
threadDestructor(void * st)97 void Looper::threadDestructor(void *st) {
98 Looper* const self = static_cast<Looper*>(st);
99 if (self != NULL) {
100 self->decStrong((void*)threadDestructor);
101 }
102 }
103
setForThread(const sp<Looper> & looper)104 void Looper::setForThread(const sp<Looper>& looper) {
105 sp<Looper> old = getForThread(); // also has side-effect of initializing TLS
106
107 if (looper != NULL) {
108 looper->incStrong((void*)threadDestructor);
109 }
110
111 pthread_setspecific(gTLSKey, looper.get());
112
113 if (old != NULL) {
114 old->decStrong((void*)threadDestructor);
115 }
116 }
117
getForThread()118 sp<Looper> Looper::getForThread() {
119 int result = pthread_once(& gTLSOnce, initTLSKey);
120 LOG_ALWAYS_FATAL_IF(result != 0, "pthread_once failed");
121
122 return (Looper*)pthread_getspecific(gTLSKey);
123 }
124
prepare(int opts)125 sp<Looper> Looper::prepare(int opts) {
126 bool allowNonCallbacks = opts & PREPARE_ALLOW_NON_CALLBACKS;
127 sp<Looper> looper = Looper::getForThread();
128 if (looper == NULL) {
129 looper = new Looper(allowNonCallbacks);
130 Looper::setForThread(looper);
131 }
132 if (looper->getAllowNonCallbacks() != allowNonCallbacks) {
133 ALOGW("Looper already prepared for this thread with a different value for the "
134 "LOOPER_PREPARE_ALLOW_NON_CALLBACKS option.");
135 }
136 return looper;
137 }
138
getAllowNonCallbacks() const139 bool Looper::getAllowNonCallbacks() const {
140 return mAllowNonCallbacks;
141 }
142
rebuildEpollLocked()143 void Looper::rebuildEpollLocked() {
144 // Close old epoll instance if we have one.
145 if (mEpollFd >= 0) {
146 #if DEBUG_CALLBACKS
147 ALOGD("%p ~ rebuildEpollLocked - rebuilding epoll set", this);
148 #endif
149 close(mEpollFd);
150 }
151
152 // Allocate the new epoll instance and register the wake pipe.
153 mEpollFd = epoll_create(EPOLL_SIZE_HINT);
154 LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance: %s", strerror(errno));
155
156 struct epoll_event eventItem;
157 memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union
158 eventItem.events = EPOLLIN;
159 eventItem.data.fd = mWakeEventFd;
160 int result = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mWakeEventFd, & eventItem);
161 LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake event fd to epoll instance: %s",
162 strerror(errno));
163
164 for (size_t i = 0; i < mRequests.size(); i++) {
165 const Request& request = mRequests.valueAt(i);
166 struct epoll_event eventItem;
167 request.initEventItem(&eventItem);
168
169 int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, request.fd, & eventItem);
170 if (epollResult < 0) {
171 ALOGE("Error adding epoll events for fd %d while rebuilding epoll set: %s",
172 request.fd, strerror(errno));
173 }
174 }
175 }
176
scheduleEpollRebuildLocked()177 void Looper::scheduleEpollRebuildLocked() {
178 if (!mEpollRebuildRequired) {
179 #if DEBUG_CALLBACKS
180 ALOGD("%p ~ scheduleEpollRebuildLocked - scheduling epoll set rebuild", this);
181 #endif
182 mEpollRebuildRequired = true;
183 wake();
184 }
185 }
186
pollOnce(int timeoutMillis,int * outFd,int * outEvents,void ** outData)187 int Looper::pollOnce(int timeoutMillis, int* outFd, int* outEvents, void** outData) {
188 int result = 0;
189 for (;;) {
190 while (mResponseIndex < mResponses.size()) {
191 const Response& response = mResponses.itemAt(mResponseIndex++);
192 int ident = response.request.ident;
193 if (ident >= 0) {
194 int fd = response.request.fd;
195 int events = response.events;
196 void* data = response.request.data;
197 #if DEBUG_POLL_AND_WAKE
198 ALOGD("%p ~ pollOnce - returning signalled identifier %d: "
199 "fd=%d, events=0x%x, data=%p",
200 this, ident, fd, events, data);
201 #endif
202 if (outFd != NULL) *outFd = fd;
203 if (outEvents != NULL) *outEvents = events;
204 if (outData != NULL) *outData = data;
205 return ident;
206 }
207 }
208
209 if (result != 0) {
210 #if DEBUG_POLL_AND_WAKE
211 ALOGD("%p ~ pollOnce - returning result %d", this, result);
212 #endif
213 if (outFd != NULL) *outFd = 0;
214 if (outEvents != NULL) *outEvents = 0;
215 if (outData != NULL) *outData = NULL;
216 return result;
217 }
218
219 result = pollInner(timeoutMillis);
220 }
221 }
222
pollInner(int timeoutMillis)223 int Looper::pollInner(int timeoutMillis) {
224 #if DEBUG_POLL_AND_WAKE
225 ALOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis);
226 #endif
227
228 // Adjust the timeout based on when the next message is due.
229 if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX) {
230 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
231 int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime);
232 if (messageTimeoutMillis >= 0
233 && (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis)) {
234 timeoutMillis = messageTimeoutMillis;
235 }
236 #if DEBUG_POLL_AND_WAKE
237 ALOGD("%p ~ pollOnce - next message in %" PRId64 "ns, adjusted timeout: timeoutMillis=%d",
238 this, mNextMessageUptime - now, timeoutMillis);
239 #endif
240 }
241
242 // Poll.
243 int result = POLL_WAKE;
244 mResponses.clear();
245 mResponseIndex = 0;
246
247 // We are about to idle.
248 mPolling = true;
249
250 struct epoll_event eventItems[EPOLL_MAX_EVENTS];
251 int eventCount = epoll_wait(mEpollFd, eventItems, EPOLL_MAX_EVENTS, timeoutMillis);
252
253 // No longer idling.
254 mPolling = false;
255
256 // Acquire lock.
257 mLock.lock();
258
259 // Rebuild epoll set if needed.
260 if (mEpollRebuildRequired) {
261 mEpollRebuildRequired = false;
262 rebuildEpollLocked();
263 goto Done;
264 }
265
266 // Check for poll error.
267 if (eventCount < 0) {
268 if (errno == EINTR) {
269 goto Done;
270 }
271 ALOGW("Poll failed with an unexpected error: %s", strerror(errno));
272 result = POLL_ERROR;
273 goto Done;
274 }
275
276 // Check for poll timeout.
277 if (eventCount == 0) {
278 #if DEBUG_POLL_AND_WAKE
279 ALOGD("%p ~ pollOnce - timeout", this);
280 #endif
281 result = POLL_TIMEOUT;
282 goto Done;
283 }
284
285 // Handle all events.
286 #if DEBUG_POLL_AND_WAKE
287 ALOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount);
288 #endif
289
290 for (int i = 0; i < eventCount; i++) {
291 int fd = eventItems[i].data.fd;
292 uint32_t epollEvents = eventItems[i].events;
293 if (fd == mWakeEventFd) {
294 if (epollEvents & EPOLLIN) {
295 awoken();
296 } else {
297 ALOGW("Ignoring unexpected epoll events 0x%x on wake event fd.", epollEvents);
298 }
299 } else {
300 ssize_t requestIndex = mRequests.indexOfKey(fd);
301 if (requestIndex >= 0) {
302 int events = 0;
303 if (epollEvents & EPOLLIN) events |= EVENT_INPUT;
304 if (epollEvents & EPOLLOUT) events |= EVENT_OUTPUT;
305 if (epollEvents & EPOLLERR) events |= EVENT_ERROR;
306 if (epollEvents & EPOLLHUP) events |= EVENT_HANGUP;
307 pushResponse(events, mRequests.valueAt(requestIndex));
308 } else {
309 ALOGW("Ignoring unexpected epoll events 0x%x on fd %d that is "
310 "no longer registered.", epollEvents, fd);
311 }
312 }
313 }
314 Done: ;
315
316 // Invoke pending message callbacks.
317 mNextMessageUptime = LLONG_MAX;
318 while (mMessageEnvelopes.size() != 0) {
319 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
320 const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0);
321 if (messageEnvelope.uptime <= now) {
322 // Remove the envelope from the list.
323 // We keep a strong reference to the handler until the call to handleMessage
324 // finishes. Then we drop it so that the handler can be deleted *before*
325 // we reacquire our lock.
326 { // obtain handler
327 sp<MessageHandler> handler = messageEnvelope.handler;
328 Message message = messageEnvelope.message;
329 mMessageEnvelopes.removeAt(0);
330 mSendingMessage = true;
331 mLock.unlock();
332
333 #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
334 ALOGD("%p ~ pollOnce - sending message: handler=%p, what=%d",
335 this, handler.get(), message.what);
336 #endif
337 handler->handleMessage(message);
338 } // release handler
339
340 mLock.lock();
341 mSendingMessage = false;
342 result = POLL_CALLBACK;
343 } else {
344 // The last message left at the head of the queue determines the next wakeup time.
345 mNextMessageUptime = messageEnvelope.uptime;
346 break;
347 }
348 }
349
350 // Release lock.
351 mLock.unlock();
352
353 // Invoke all response callbacks.
354 for (size_t i = 0; i < mResponses.size(); i++) {
355 Response& response = mResponses.editItemAt(i);
356 if (response.request.ident == POLL_CALLBACK) {
357 int fd = response.request.fd;
358 int events = response.events;
359 void* data = response.request.data;
360 #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
361 ALOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p",
362 this, response.request.callback.get(), fd, events, data);
363 #endif
364 // Invoke the callback. Note that the file descriptor may be closed by
365 // the callback (and potentially even reused) before the function returns so
366 // we need to be a little careful when removing the file descriptor afterwards.
367 int callbackResult = response.request.callback->handleEvent(fd, events, data);
368 if (callbackResult == 0) {
369 removeFd(fd, response.request.seq);
370 }
371
372 // Clear the callback reference in the response structure promptly because we
373 // will not clear the response vector itself until the next poll.
374 response.request.callback.clear();
375 result = POLL_CALLBACK;
376 }
377 }
378 return result;
379 }
380
pollAll(int timeoutMillis,int * outFd,int * outEvents,void ** outData)381 int Looper::pollAll(int timeoutMillis, int* outFd, int* outEvents, void** outData) {
382 if (timeoutMillis <= 0) {
383 int result;
384 do {
385 result = pollOnce(timeoutMillis, outFd, outEvents, outData);
386 } while (result == POLL_CALLBACK);
387 return result;
388 } else {
389 nsecs_t endTime = systemTime(SYSTEM_TIME_MONOTONIC)
390 + milliseconds_to_nanoseconds(timeoutMillis);
391
392 for (;;) {
393 int result = pollOnce(timeoutMillis, outFd, outEvents, outData);
394 if (result != POLL_CALLBACK) {
395 return result;
396 }
397
398 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
399 timeoutMillis = toMillisecondTimeoutDelay(now, endTime);
400 if (timeoutMillis == 0) {
401 return POLL_TIMEOUT;
402 }
403 }
404 }
405 }
406
wake()407 void Looper::wake() {
408 #if DEBUG_POLL_AND_WAKE
409 ALOGD("%p ~ wake", this);
410 #endif
411
412 uint64_t inc = 1;
413 ssize_t nWrite = TEMP_FAILURE_RETRY(write(mWakeEventFd, &inc, sizeof(uint64_t)));
414 if (nWrite != sizeof(uint64_t)) {
415 if (errno != EAGAIN) {
416 ALOGW("Could not write wake signal: %s", strerror(errno));
417 }
418 }
419 }
420
awoken()421 void Looper::awoken() {
422 #if DEBUG_POLL_AND_WAKE
423 ALOGD("%p ~ awoken", this);
424 #endif
425
426 uint64_t counter;
427 TEMP_FAILURE_RETRY(read(mWakeEventFd, &counter, sizeof(uint64_t)));
428 }
429
pushResponse(int events,const Request & request)430 void Looper::pushResponse(int events, const Request& request) {
431 Response response;
432 response.events = events;
433 response.request = request;
434 mResponses.push(response);
435 }
436
addFd(int fd,int ident,int events,Looper_callbackFunc callback,void * data)437 int Looper::addFd(int fd, int ident, int events, Looper_callbackFunc callback, void* data) {
438 return addFd(fd, ident, events, callback ? new SimpleLooperCallback(callback) : NULL, data);
439 }
440
addFd(int fd,int ident,int events,const sp<LooperCallback> & callback,void * data)441 int Looper::addFd(int fd, int ident, int events, const sp<LooperCallback>& callback, void* data) {
442 #if DEBUG_CALLBACKS
443 ALOGD("%p ~ addFd - fd=%d, ident=%d, events=0x%x, callback=%p, data=%p", this, fd, ident,
444 events, callback.get(), data);
445 #endif
446
447 if (!callback.get()) {
448 if (! mAllowNonCallbacks) {
449 ALOGE("Invalid attempt to set NULL callback but not allowed for this looper.");
450 return -1;
451 }
452
453 if (ident < 0) {
454 ALOGE("Invalid attempt to set NULL callback with ident < 0.");
455 return -1;
456 }
457 } else {
458 ident = POLL_CALLBACK;
459 }
460
461 { // acquire lock
462 AutoMutex _l(mLock);
463
464 Request request;
465 request.fd = fd;
466 request.ident = ident;
467 request.events = events;
468 request.seq = mNextRequestSeq++;
469 request.callback = callback;
470 request.data = data;
471 if (mNextRequestSeq == -1) mNextRequestSeq = 0; // reserve sequence number -1
472
473 struct epoll_event eventItem;
474 request.initEventItem(&eventItem);
475
476 ssize_t requestIndex = mRequests.indexOfKey(fd);
477 if (requestIndex < 0) {
478 int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, fd, & eventItem);
479 if (epollResult < 0) {
480 ALOGE("Error adding epoll events for fd %d: %s", fd, strerror(errno));
481 return -1;
482 }
483 mRequests.add(fd, request);
484 } else {
485 int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_MOD, fd, & eventItem);
486 if (epollResult < 0) {
487 if (errno == ENOENT) {
488 // Tolerate ENOENT because it means that an older file descriptor was
489 // closed before its callback was unregistered and meanwhile a new
490 // file descriptor with the same number has been created and is now
491 // being registered for the first time. This error may occur naturally
492 // when a callback has the side-effect of closing the file descriptor
493 // before returning and unregistering itself. Callback sequence number
494 // checks further ensure that the race is benign.
495 //
496 // Unfortunately due to kernel limitations we need to rebuild the epoll
497 // set from scratch because it may contain an old file handle that we are
498 // now unable to remove since its file descriptor is no longer valid.
499 // No such problem would have occurred if we were using the poll system
500 // call instead, but that approach carries others disadvantages.
501 #if DEBUG_CALLBACKS
502 ALOGD("%p ~ addFd - EPOLL_CTL_MOD failed due to file descriptor "
503 "being recycled, falling back on EPOLL_CTL_ADD: %s",
504 this, strerror(errno));
505 #endif
506 epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, fd, & eventItem);
507 if (epollResult < 0) {
508 ALOGE("Error modifying or adding epoll events for fd %d: %s",
509 fd, strerror(errno));
510 return -1;
511 }
512 scheduleEpollRebuildLocked();
513 } else {
514 ALOGE("Error modifying epoll events for fd %d: %s", fd, strerror(errno));
515 return -1;
516 }
517 }
518 mRequests.replaceValueAt(requestIndex, request);
519 }
520 } // release lock
521 return 1;
522 }
523
removeFd(int fd)524 int Looper::removeFd(int fd) {
525 return removeFd(fd, -1);
526 }
527
removeFd(int fd,int seq)528 int Looper::removeFd(int fd, int seq) {
529 #if DEBUG_CALLBACKS
530 ALOGD("%p ~ removeFd - fd=%d, seq=%d", this, fd, seq);
531 #endif
532
533 { // acquire lock
534 AutoMutex _l(mLock);
535 ssize_t requestIndex = mRequests.indexOfKey(fd);
536 if (requestIndex < 0) {
537 return 0;
538 }
539
540 // Check the sequence number if one was given.
541 if (seq != -1 && mRequests.valueAt(requestIndex).seq != seq) {
542 #if DEBUG_CALLBACKS
543 ALOGD("%p ~ removeFd - sequence number mismatch, oldSeq=%d",
544 this, mRequests.valueAt(requestIndex).seq);
545 #endif
546 return 0;
547 }
548
549 // Always remove the FD from the request map even if an error occurs while
550 // updating the epoll set so that we avoid accidentally leaking callbacks.
551 mRequests.removeItemsAt(requestIndex);
552
553 int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_DEL, fd, NULL);
554 if (epollResult < 0) {
555 if (seq != -1 && (errno == EBADF || errno == ENOENT)) {
556 // Tolerate EBADF or ENOENT when the sequence number is known because it
557 // means that the file descriptor was closed before its callback was
558 // unregistered. This error may occur naturally when a callback has the
559 // side-effect of closing the file descriptor before returning and
560 // unregistering itself.
561 //
562 // Unfortunately due to kernel limitations we need to rebuild the epoll
563 // set from scratch because it may contain an old file handle that we are
564 // now unable to remove since its file descriptor is no longer valid.
565 // No such problem would have occurred if we were using the poll system
566 // call instead, but that approach carries others disadvantages.
567 #if DEBUG_CALLBACKS
568 ALOGD("%p ~ removeFd - EPOLL_CTL_DEL failed due to file descriptor "
569 "being closed: %s", this, strerror(errno));
570 #endif
571 scheduleEpollRebuildLocked();
572 } else {
573 // Some other error occurred. This is really weird because it means
574 // our list of callbacks got out of sync with the epoll set somehow.
575 // We defensively rebuild the epoll set to avoid getting spurious
576 // notifications with nowhere to go.
577 ALOGE("Error removing epoll events for fd %d: %s", fd, strerror(errno));
578 scheduleEpollRebuildLocked();
579 return -1;
580 }
581 }
582 } // release lock
583 return 1;
584 }
585
sendMessage(const sp<MessageHandler> & handler,const Message & message)586 void Looper::sendMessage(const sp<MessageHandler>& handler, const Message& message) {
587 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
588 sendMessageAtTime(now, handler, message);
589 }
590
sendMessageDelayed(nsecs_t uptimeDelay,const sp<MessageHandler> & handler,const Message & message)591 void Looper::sendMessageDelayed(nsecs_t uptimeDelay, const sp<MessageHandler>& handler,
592 const Message& message) {
593 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
594 sendMessageAtTime(now + uptimeDelay, handler, message);
595 }
596
sendMessageAtTime(nsecs_t uptime,const sp<MessageHandler> & handler,const Message & message)597 void Looper::sendMessageAtTime(nsecs_t uptime, const sp<MessageHandler>& handler,
598 const Message& message) {
599 #if DEBUG_CALLBACKS
600 ALOGD("%p ~ sendMessageAtTime - uptime=%" PRId64 ", handler=%p, what=%d",
601 this, uptime, handler.get(), message.what);
602 #endif
603
604 size_t i = 0;
605 { // acquire lock
606 AutoMutex _l(mLock);
607
608 size_t messageCount = mMessageEnvelopes.size();
609 while (i < messageCount && uptime >= mMessageEnvelopes.itemAt(i).uptime) {
610 i += 1;
611 }
612
613 MessageEnvelope messageEnvelope(uptime, handler, message);
614 mMessageEnvelopes.insertAt(messageEnvelope, i, 1);
615
616 // Optimization: If the Looper is currently sending a message, then we can skip
617 // the call to wake() because the next thing the Looper will do after processing
618 // messages is to decide when the next wakeup time should be. In fact, it does
619 // not even matter whether this code is running on the Looper thread.
620 if (mSendingMessage) {
621 return;
622 }
623 } // release lock
624
625 // Wake the poll loop only when we enqueue a new message at the head.
626 if (i == 0) {
627 wake();
628 }
629 }
630
removeMessages(const sp<MessageHandler> & handler)631 void Looper::removeMessages(const sp<MessageHandler>& handler) {
632 #if DEBUG_CALLBACKS
633 ALOGD("%p ~ removeMessages - handler=%p", this, handler.get());
634 #endif
635
636 { // acquire lock
637 AutoMutex _l(mLock);
638
639 for (size_t i = mMessageEnvelopes.size(); i != 0; ) {
640 const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(--i);
641 if (messageEnvelope.handler == handler) {
642 mMessageEnvelopes.removeAt(i);
643 }
644 }
645 } // release lock
646 }
647
removeMessages(const sp<MessageHandler> & handler,int what)648 void Looper::removeMessages(const sp<MessageHandler>& handler, int what) {
649 #if DEBUG_CALLBACKS
650 ALOGD("%p ~ removeMessages - handler=%p, what=%d", this, handler.get(), what);
651 #endif
652
653 { // acquire lock
654 AutoMutex _l(mLock);
655
656 for (size_t i = mMessageEnvelopes.size(); i != 0; ) {
657 const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(--i);
658 if (messageEnvelope.handler == handler
659 && messageEnvelope.message.what == what) {
660 mMessageEnvelopes.removeAt(i);
661 }
662 }
663 } // release lock
664 }
665
isPolling() const666 bool Looper::isPolling() const {
667 return mPolling;
668 }
669
initEventItem(struct epoll_event * eventItem) const670 void Looper::Request::initEventItem(struct epoll_event* eventItem) const {
671 int epollEvents = 0;
672 if (events & EVENT_INPUT) epollEvents |= EPOLLIN;
673 if (events & EVENT_OUTPUT) epollEvents |= EPOLLOUT;
674
675 memset(eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union
676 eventItem->events = epollEvents;
677 eventItem->data.fd = fd;
678 }
679
680 } // namespace android
681