1 /*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
7 */
8
9 #include "includes.h"
10 #include <assert.h>
11
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41
42 struct eloop_sock {
43 int sock;
44 void *eloop_data;
45 void *user_data;
46 eloop_sock_handler handler;
47 WPA_TRACE_REF(eloop);
48 WPA_TRACE_REF(user);
49 WPA_TRACE_INFO
50 };
51
52 struct eloop_timeout {
53 struct dl_list list;
54 struct os_reltime time;
55 void *eloop_data;
56 void *user_data;
57 eloop_timeout_handler handler;
58 WPA_TRACE_REF(eloop);
59 WPA_TRACE_REF(user);
60 WPA_TRACE_INFO
61 };
62
63 struct eloop_signal {
64 int sig;
65 void *user_data;
66 eloop_signal_handler handler;
67 int signaled;
68 };
69
70 struct eloop_sock_table {
71 size_t count;
72 struct eloop_sock *table;
73 eloop_event_type type;
74 int changed;
75 };
76
77 struct eloop_data {
78 int max_sock;
79
80 size_t count; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82 size_t max_pollfd_map; /* number of pollfds_map currently allocated */
83 size_t max_poll_fds; /* number of pollfds currently allocated */
84 struct pollfd *pollfds;
85 struct pollfd **pollfds_map;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88 int max_fd;
89 struct eloop_sock *fd_table;
90 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91 #ifdef CONFIG_ELOOP_EPOLL
92 int epollfd;
93 size_t epoll_max_event_num;
94 struct epoll_event *epoll_events;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
97 int kqueuefd;
98 size_t kqueue_nevents;
99 struct kevent *kqueue_events;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101 struct eloop_sock_table readers;
102 struct eloop_sock_table writers;
103 struct eloop_sock_table exceptions;
104
105 struct dl_list timeout;
106
107 size_t signal_count;
108 struct eloop_signal *signals;
109 int signaled;
110 int pending_terminate;
111
112 int terminate;
113 };
114
115 static struct eloop_data eloop;
116
117
118 #ifdef WPA_TRACE
119
eloop_sigsegv_handler(int sig)120 static void eloop_sigsegv_handler(int sig)
121 {
122 wpa_trace_show("eloop SIGSEGV");
123 abort();
124 }
125
eloop_trace_sock_add_ref(struct eloop_sock_table * table)126 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127 {
128 size_t i;
129
130 if (table == NULL || table->table == NULL)
131 return;
132 for (i = 0; i < table->count; i++) {
133 wpa_trace_add_ref(&table->table[i], eloop,
134 table->table[i].eloop_data);
135 wpa_trace_add_ref(&table->table[i], user,
136 table->table[i].user_data);
137 }
138 }
139
140
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)141 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
142 {
143 size_t i;
144
145 if (table == NULL || table->table == NULL)
146 return;
147 for (i = 0; i < table->count; i++) {
148 wpa_trace_remove_ref(&table->table[i], eloop,
149 table->table[i].eloop_data);
150 wpa_trace_remove_ref(&table->table[i], user,
151 table->table[i].user_data);
152 }
153 }
154
155 #else /* WPA_TRACE */
156
157 #define eloop_trace_sock_add_ref(table) do { } while (0)
158 #define eloop_trace_sock_remove_ref(table) do { } while (0)
159
160 #endif /* WPA_TRACE */
161
162
eloop_init(void)163 int eloop_init(void)
164 {
165 os_memset(&eloop, 0, sizeof(eloop));
166 dl_list_init(&eloop.timeout);
167 #ifdef CONFIG_ELOOP_EPOLL
168 eloop.epollfd = epoll_create1(0);
169 if (eloop.epollfd < 0) {
170 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
171 __func__, strerror(errno));
172 return -1;
173 }
174 #endif /* CONFIG_ELOOP_EPOLL */
175 #ifdef CONFIG_ELOOP_KQUEUE
176 eloop.kqueuefd = kqueue();
177 if (eloop.kqueuefd < 0) {
178 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
179 __func__, strerror(errno));
180 return -1;
181 }
182 #endif /* CONFIG_ELOOP_KQUEUE */
183 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
184 eloop.readers.type = EVENT_TYPE_READ;
185 eloop.writers.type = EVENT_TYPE_WRITE;
186 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
187 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
188 #ifdef WPA_TRACE
189 signal(SIGSEGV, eloop_sigsegv_handler);
190 #endif /* WPA_TRACE */
191 return 0;
192 }
193
194
195 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)196 static int eloop_sock_queue(int sock, eloop_event_type type)
197 {
198 struct epoll_event ev;
199
200 os_memset(&ev, 0, sizeof(ev));
201 switch (type) {
202 case EVENT_TYPE_READ:
203 ev.events = EPOLLIN;
204 break;
205 case EVENT_TYPE_WRITE:
206 ev.events = EPOLLOUT;
207 break;
208 /*
209 * Exceptions are always checked when using epoll, but I suppose it's
210 * possible that someone registered a socket *only* for exception
211 * handling.
212 */
213 case EVENT_TYPE_EXCEPTION:
214 ev.events = EPOLLERR | EPOLLHUP;
215 break;
216 }
217 ev.data.fd = sock;
218 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
219 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
220 __func__, sock, strerror(errno));
221 return -1;
222 }
223 return 0;
224 }
225 #endif /* CONFIG_ELOOP_EPOLL */
226
227
228 #ifdef CONFIG_ELOOP_KQUEUE
229
event_type_kevent_filter(eloop_event_type type)230 static short event_type_kevent_filter(eloop_event_type type)
231 {
232 switch (type) {
233 case EVENT_TYPE_READ:
234 return EVFILT_READ;
235 case EVENT_TYPE_WRITE:
236 return EVFILT_WRITE;
237 default:
238 return 0;
239 }
240 }
241
242
eloop_sock_queue(int sock,eloop_event_type type)243 static int eloop_sock_queue(int sock, eloop_event_type type)
244 {
245 struct kevent ke;
246
247 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
248 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
249 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
250 __func__, sock, strerror(errno));
251 return -1;
252 }
253 return 0;
254 }
255
256 #endif /* CONFIG_ELOOP_KQUEUE */
257
258
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)259 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
260 int sock, eloop_sock_handler handler,
261 void *eloop_data, void *user_data)
262 {
263 #ifdef CONFIG_ELOOP_EPOLL
264 struct epoll_event *temp_events;
265 #endif /* CONFIG_ELOOP_EPOLL */
266 #ifdef CONFIG_ELOOP_KQUEUE
267 struct kevent *temp_events;
268 #endif /* CONFIG_ELOOP_EPOLL */
269 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
270 struct eloop_sock *temp_table;
271 size_t next;
272 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
273 struct eloop_sock *tmp;
274 int new_max_sock;
275
276 if (sock > eloop.max_sock)
277 new_max_sock = sock;
278 else
279 new_max_sock = eloop.max_sock;
280
281 if (table == NULL)
282 return -1;
283
284 #ifdef CONFIG_ELOOP_POLL
285 if ((size_t) new_max_sock >= eloop.max_pollfd_map) {
286 struct pollfd **nmap;
287 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
288 sizeof(struct pollfd *));
289 if (nmap == NULL)
290 return -1;
291
292 eloop.max_pollfd_map = new_max_sock + 50;
293 eloop.pollfds_map = nmap;
294 }
295
296 if (eloop.count + 1 > eloop.max_poll_fds) {
297 struct pollfd *n;
298 size_t nmax = eloop.count + 1 + 50;
299
300 n = os_realloc_array(eloop.pollfds, nmax,
301 sizeof(struct pollfd));
302 if (n == NULL)
303 return -1;
304
305 eloop.max_poll_fds = nmax;
306 eloop.pollfds = n;
307 }
308 #endif /* CONFIG_ELOOP_POLL */
309 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
310 if (new_max_sock >= eloop.max_fd) {
311 next = new_max_sock + 16;
312 temp_table = os_realloc_array(eloop.fd_table, next,
313 sizeof(struct eloop_sock));
314 if (temp_table == NULL)
315 return -1;
316
317 eloop.max_fd = next;
318 eloop.fd_table = temp_table;
319 }
320 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
321
322 #ifdef CONFIG_ELOOP_EPOLL
323 if (eloop.count + 1 > eloop.epoll_max_event_num) {
324 next = eloop.epoll_max_event_num == 0 ? 8 :
325 eloop.epoll_max_event_num * 2;
326 temp_events = os_realloc_array(eloop.epoll_events, next,
327 sizeof(struct epoll_event));
328 if (temp_events == NULL) {
329 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
330 __func__, strerror(errno));
331 return -1;
332 }
333
334 eloop.epoll_max_event_num = next;
335 eloop.epoll_events = temp_events;
336 }
337 #endif /* CONFIG_ELOOP_EPOLL */
338 #ifdef CONFIG_ELOOP_KQUEUE
339 if (eloop.count + 1 > eloop.kqueue_nevents) {
340 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
341 temp_events = os_malloc(next * sizeof(*temp_events));
342 if (!temp_events) {
343 wpa_printf(MSG_ERROR,
344 "%s: malloc for kqueue failed: %s",
345 __func__, strerror(errno));
346 return -1;
347 }
348
349 os_free(eloop.kqueue_events);
350 eloop.kqueue_events = temp_events;
351 eloop.kqueue_nevents = next;
352 }
353 #endif /* CONFIG_ELOOP_KQUEUE */
354
355 eloop_trace_sock_remove_ref(table);
356 tmp = os_realloc_array(table->table, table->count + 1,
357 sizeof(struct eloop_sock));
358 if (tmp == NULL) {
359 eloop_trace_sock_add_ref(table);
360 return -1;
361 }
362
363 tmp[table->count].sock = sock;
364 tmp[table->count].eloop_data = eloop_data;
365 tmp[table->count].user_data = user_data;
366 tmp[table->count].handler = handler;
367 wpa_trace_record(&tmp[table->count]);
368 table->count++;
369 table->table = tmp;
370 eloop.max_sock = new_max_sock;
371 eloop.count++;
372 table->changed = 1;
373 eloop_trace_sock_add_ref(table);
374
375 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
376 if (eloop_sock_queue(sock, table->type) < 0)
377 return -1;
378 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
379 sizeof(struct eloop_sock));
380 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
381 return 0;
382 }
383
384
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)385 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
386 int sock)
387 {
388 #ifdef CONFIG_ELOOP_KQUEUE
389 struct kevent ke;
390 #endif /* CONFIG_ELOOP_KQUEUE */
391 size_t i;
392
393 if (table == NULL || table->table == NULL || table->count == 0)
394 return;
395
396 for (i = 0; i < table->count; i++) {
397 if (table->table[i].sock == sock)
398 break;
399 }
400 if (i == table->count)
401 return;
402 eloop_trace_sock_remove_ref(table);
403 if (i != table->count - 1) {
404 os_memmove(&table->table[i], &table->table[i + 1],
405 (table->count - i - 1) *
406 sizeof(struct eloop_sock));
407 }
408 table->count--;
409 eloop.count--;
410 table->changed = 1;
411 eloop_trace_sock_add_ref(table);
412 #ifdef CONFIG_ELOOP_EPOLL
413 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
414 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
415 __func__, sock, strerror(errno));
416 return;
417 }
418 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
419 #endif /* CONFIG_ELOOP_EPOLL */
420 #ifdef CONFIG_ELOOP_KQUEUE
421 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
422 0, 0);
423 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
424 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
425 __func__, sock, strerror(errno));
426 return;
427 }
428 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
429 #endif /* CONFIG_ELOOP_KQUEUE */
430 }
431
432
433 #ifdef CONFIG_ELOOP_POLL
434
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)435 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
436 {
437 if (fd < mx && fd >= 0)
438 return pollfds_map[fd];
439 return NULL;
440 }
441
442
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)443 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
444 struct eloop_sock_table *writers,
445 struct eloop_sock_table *exceptions,
446 struct pollfd *pollfds,
447 struct pollfd **pollfds_map,
448 int max_pollfd_map)
449 {
450 size_t i;
451 int nxt = 0;
452 int fd;
453 struct pollfd *pfd;
454
455 /* Clear pollfd lookup map. It will be re-populated below. */
456 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
457
458 if (readers && readers->table) {
459 for (i = 0; i < readers->count; i++) {
460 fd = readers->table[i].sock;
461 assert(fd >= 0 && fd < max_pollfd_map);
462 pollfds[nxt].fd = fd;
463 pollfds[nxt].events = POLLIN;
464 pollfds[nxt].revents = 0;
465 pollfds_map[fd] = &(pollfds[nxt]);
466 nxt++;
467 }
468 }
469
470 if (writers && writers->table) {
471 for (i = 0; i < writers->count; i++) {
472 /*
473 * See if we already added this descriptor, update it
474 * if so.
475 */
476 fd = writers->table[i].sock;
477 assert(fd >= 0 && fd < max_pollfd_map);
478 pfd = pollfds_map[fd];
479 if (!pfd) {
480 pfd = &(pollfds[nxt]);
481 pfd->events = 0;
482 pfd->fd = fd;
483 pollfds[i].revents = 0;
484 pollfds_map[fd] = pfd;
485 nxt++;
486 }
487 pfd->events |= POLLOUT;
488 }
489 }
490
491 /*
492 * Exceptions are always checked when using poll, but I suppose it's
493 * possible that someone registered a socket *only* for exception
494 * handling. Set the POLLIN bit in this case.
495 */
496 if (exceptions && exceptions->table) {
497 for (i = 0; i < exceptions->count; i++) {
498 /*
499 * See if we already added this descriptor, just use it
500 * if so.
501 */
502 fd = exceptions->table[i].sock;
503 assert(fd >= 0 && fd < max_pollfd_map);
504 pfd = pollfds_map[fd];
505 if (!pfd) {
506 pfd = &(pollfds[nxt]);
507 pfd->events = POLLIN;
508 pfd->fd = fd;
509 pollfds[i].revents = 0;
510 pollfds_map[fd] = pfd;
511 nxt++;
512 }
513 }
514 }
515
516 return nxt;
517 }
518
519
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)520 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
521 struct pollfd **pollfds_map,
522 int max_pollfd_map,
523 short int revents)
524 {
525 size_t i;
526 struct pollfd *pfd;
527
528 if (!table || !table->table)
529 return 0;
530
531 table->changed = 0;
532 for (i = 0; i < table->count; i++) {
533 pfd = find_pollfd(pollfds_map, table->table[i].sock,
534 max_pollfd_map);
535 if (!pfd)
536 continue;
537
538 if (!(pfd->revents & revents))
539 continue;
540
541 table->table[i].handler(table->table[i].sock,
542 table->table[i].eloop_data,
543 table->table[i].user_data);
544 if (table->changed)
545 return 1;
546 }
547
548 return 0;
549 }
550
551
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)552 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
553 struct eloop_sock_table *writers,
554 struct eloop_sock_table *exceptions,
555 struct pollfd **pollfds_map,
556 int max_pollfd_map)
557 {
558 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
559 max_pollfd_map, POLLIN | POLLERR |
560 POLLHUP))
561 return; /* pollfds may be invalid at this point */
562
563 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
564 max_pollfd_map, POLLOUT))
565 return; /* pollfds may be invalid at this point */
566
567 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
568 max_pollfd_map, POLLERR | POLLHUP);
569 }
570
571 #endif /* CONFIG_ELOOP_POLL */
572
573 #ifdef CONFIG_ELOOP_SELECT
574
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)575 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
576 fd_set *fds)
577 {
578 size_t i;
579
580 FD_ZERO(fds);
581
582 if (table->table == NULL)
583 return;
584
585 for (i = 0; i < table->count; i++) {
586 assert(table->table[i].sock >= 0);
587 FD_SET(table->table[i].sock, fds);
588 }
589 }
590
591
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)592 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
593 fd_set *fds)
594 {
595 size_t i;
596
597 if (table == NULL || table->table == NULL)
598 return;
599
600 table->changed = 0;
601 for (i = 0; i < table->count; i++) {
602 if (FD_ISSET(table->table[i].sock, fds)) {
603 table->table[i].handler(table->table[i].sock,
604 table->table[i].eloop_data,
605 table->table[i].user_data);
606 if (table->changed)
607 break;
608 }
609 }
610 }
611
612 #endif /* CONFIG_ELOOP_SELECT */
613
614
615 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)616 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
617 {
618 struct eloop_sock *table;
619 int i;
620
621 for (i = 0; i < nfds; i++) {
622 table = &eloop.fd_table[events[i].data.fd];
623 if (table->handler == NULL)
624 continue;
625 table->handler(table->sock, table->eloop_data,
626 table->user_data);
627 if (eloop.readers.changed ||
628 eloop.writers.changed ||
629 eloop.exceptions.changed)
630 break;
631 }
632 }
633 #endif /* CONFIG_ELOOP_EPOLL */
634
635
636 #ifdef CONFIG_ELOOP_KQUEUE
637
eloop_sock_table_dispatch(struct kevent * events,int nfds)638 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
639 {
640 struct eloop_sock *table;
641 int i;
642
643 for (i = 0; i < nfds; i++) {
644 table = &eloop.fd_table[events[i].ident];
645 if (table->handler == NULL)
646 continue;
647 table->handler(table->sock, table->eloop_data,
648 table->user_data);
649 if (eloop.readers.changed ||
650 eloop.writers.changed ||
651 eloop.exceptions.changed)
652 break;
653 }
654 }
655
656
eloop_sock_table_requeue(struct eloop_sock_table * table)657 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
658 {
659 size_t i;
660 int r;
661
662 r = 0;
663 for (i = 0; i < table->count && table->table; i++) {
664 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
665 r = -1;
666 }
667 return r;
668 }
669
670 #endif /* CONFIG_ELOOP_KQUEUE */
671
672
eloop_sock_requeue(void)673 int eloop_sock_requeue(void)
674 {
675 int r = 0;
676
677 #ifdef CONFIG_ELOOP_KQUEUE
678 close(eloop.kqueuefd);
679 eloop.kqueuefd = kqueue();
680 if (eloop.kqueuefd < 0) {
681 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
682 __func__, strerror(errno));
683 return -1;
684 }
685
686 if (eloop_sock_table_requeue(&eloop.readers) < 0)
687 r = -1;
688 if (eloop_sock_table_requeue(&eloop.writers) < 0)
689 r = -1;
690 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
691 r = -1;
692 #endif /* CONFIG_ELOOP_KQUEUE */
693
694 return r;
695 }
696
697
eloop_sock_table_destroy(struct eloop_sock_table * table)698 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
699 {
700 if (table) {
701 size_t i;
702
703 for (i = 0; i < table->count && table->table; i++) {
704 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
705 "sock=%d eloop_data=%p user_data=%p "
706 "handler=%p",
707 table->table[i].sock,
708 table->table[i].eloop_data,
709 table->table[i].user_data,
710 table->table[i].handler);
711 wpa_trace_dump_funcname("eloop unregistered socket "
712 "handler",
713 table->table[i].handler);
714 wpa_trace_dump("eloop sock", &table->table[i]);
715 }
716 os_free(table->table);
717 }
718 }
719
720
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)721 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
722 void *eloop_data, void *user_data)
723 {
724 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
725 eloop_data, user_data);
726 }
727
728
eloop_unregister_read_sock(int sock)729 void eloop_unregister_read_sock(int sock)
730 {
731 eloop_unregister_sock(sock, EVENT_TYPE_READ);
732 }
733
734
eloop_get_sock_table(eloop_event_type type)735 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
736 {
737 switch (type) {
738 case EVENT_TYPE_READ:
739 return &eloop.readers;
740 case EVENT_TYPE_WRITE:
741 return &eloop.writers;
742 case EVENT_TYPE_EXCEPTION:
743 return &eloop.exceptions;
744 }
745
746 return NULL;
747 }
748
749
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)750 int eloop_register_sock(int sock, eloop_event_type type,
751 eloop_sock_handler handler,
752 void *eloop_data, void *user_data)
753 {
754 struct eloop_sock_table *table;
755
756 assert(sock >= 0);
757 table = eloop_get_sock_table(type);
758 return eloop_sock_table_add_sock(table, sock, handler,
759 eloop_data, user_data);
760 }
761
762
eloop_unregister_sock(int sock,eloop_event_type type)763 void eloop_unregister_sock(int sock, eloop_event_type type)
764 {
765 struct eloop_sock_table *table;
766
767 table = eloop_get_sock_table(type);
768 eloop_sock_table_remove_sock(table, sock);
769 }
770
771
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)772 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
773 eloop_timeout_handler handler,
774 void *eloop_data, void *user_data)
775 {
776 struct eloop_timeout *timeout, *tmp;
777 os_time_t now_sec;
778
779 timeout = os_zalloc(sizeof(*timeout));
780 if (timeout == NULL)
781 return -1;
782 if (os_get_reltime(&timeout->time) < 0) {
783 os_free(timeout);
784 return -1;
785 }
786 now_sec = timeout->time.sec;
787 timeout->time.sec += secs;
788 if (timeout->time.sec < now_sec) {
789 /*
790 * Integer overflow - assume long enough timeout to be assumed
791 * to be infinite, i.e., the timeout would never happen.
792 */
793 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
794 "ever happen - ignore it", secs);
795 os_free(timeout);
796 return 0;
797 }
798 timeout->time.usec += usecs;
799 while (timeout->time.usec >= 1000000) {
800 timeout->time.sec++;
801 timeout->time.usec -= 1000000;
802 }
803 timeout->eloop_data = eloop_data;
804 timeout->user_data = user_data;
805 timeout->handler = handler;
806 wpa_trace_add_ref(timeout, eloop, eloop_data);
807 wpa_trace_add_ref(timeout, user, user_data);
808 wpa_trace_record(timeout);
809
810 /* Maintain timeouts in order of increasing time */
811 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
812 if (os_reltime_before(&timeout->time, &tmp->time)) {
813 dl_list_add(tmp->list.prev, &timeout->list);
814 return 0;
815 }
816 }
817 dl_list_add_tail(&eloop.timeout, &timeout->list);
818
819 return 0;
820 }
821
822
eloop_remove_timeout(struct eloop_timeout * timeout)823 static void eloop_remove_timeout(struct eloop_timeout *timeout)
824 {
825 dl_list_del(&timeout->list);
826 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
827 wpa_trace_remove_ref(timeout, user, timeout->user_data);
828 os_free(timeout);
829 }
830
831
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)832 int eloop_cancel_timeout(eloop_timeout_handler handler,
833 void *eloop_data, void *user_data)
834 {
835 struct eloop_timeout *timeout, *prev;
836 int removed = 0;
837
838 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
839 struct eloop_timeout, list) {
840 if (timeout->handler == handler &&
841 (timeout->eloop_data == eloop_data ||
842 eloop_data == ELOOP_ALL_CTX) &&
843 (timeout->user_data == user_data ||
844 user_data == ELOOP_ALL_CTX)) {
845 eloop_remove_timeout(timeout);
846 removed++;
847 }
848 }
849
850 return removed;
851 }
852
853
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)854 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
855 void *eloop_data, void *user_data,
856 struct os_reltime *remaining)
857 {
858 struct eloop_timeout *timeout, *prev;
859 int removed = 0;
860 struct os_reltime now;
861
862 os_get_reltime(&now);
863 remaining->sec = remaining->usec = 0;
864
865 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
866 struct eloop_timeout, list) {
867 if (timeout->handler == handler &&
868 (timeout->eloop_data == eloop_data) &&
869 (timeout->user_data == user_data)) {
870 removed = 1;
871 if (os_reltime_before(&now, &timeout->time))
872 os_reltime_sub(&timeout->time, &now, remaining);
873 eloop_remove_timeout(timeout);
874 break;
875 }
876 }
877 return removed;
878 }
879
880
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)881 int eloop_is_timeout_registered(eloop_timeout_handler handler,
882 void *eloop_data, void *user_data)
883 {
884 struct eloop_timeout *tmp;
885
886 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
887 if (tmp->handler == handler &&
888 tmp->eloop_data == eloop_data &&
889 tmp->user_data == user_data)
890 return 1;
891 }
892
893 return 0;
894 }
895
896
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)897 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
898 eloop_timeout_handler handler, void *eloop_data,
899 void *user_data)
900 {
901 struct os_reltime now, requested, remaining;
902 struct eloop_timeout *tmp;
903
904 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
905 if (tmp->handler == handler &&
906 tmp->eloop_data == eloop_data &&
907 tmp->user_data == user_data) {
908 requested.sec = req_secs;
909 requested.usec = req_usecs;
910 os_get_reltime(&now);
911 os_reltime_sub(&tmp->time, &now, &remaining);
912 if (os_reltime_before(&requested, &remaining)) {
913 eloop_cancel_timeout(handler, eloop_data,
914 user_data);
915 eloop_register_timeout(requested.sec,
916 requested.usec,
917 handler, eloop_data,
918 user_data);
919 return 1;
920 }
921 return 0;
922 }
923 }
924
925 return -1;
926 }
927
928
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)929 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
930 eloop_timeout_handler handler, void *eloop_data,
931 void *user_data)
932 {
933 struct os_reltime now, requested, remaining;
934 struct eloop_timeout *tmp;
935
936 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
937 if (tmp->handler == handler &&
938 tmp->eloop_data == eloop_data &&
939 tmp->user_data == user_data) {
940 requested.sec = req_secs;
941 requested.usec = req_usecs;
942 os_get_reltime(&now);
943 os_reltime_sub(&tmp->time, &now, &remaining);
944 if (os_reltime_before(&remaining, &requested)) {
945 eloop_cancel_timeout(handler, eloop_data,
946 user_data);
947 eloop_register_timeout(requested.sec,
948 requested.usec,
949 handler, eloop_data,
950 user_data);
951 return 1;
952 }
953 return 0;
954 }
955 }
956
957 return -1;
958 }
959
960
961 #ifndef CONFIG_NATIVE_WINDOWS
eloop_handle_alarm(int sig)962 static void eloop_handle_alarm(int sig)
963 {
964 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
965 "two seconds. Looks like there\n"
966 "is a bug that ends up in a busy loop that "
967 "prevents clean shutdown.\n"
968 "Killing program forcefully.\n");
969 exit(1);
970 }
971 #endif /* CONFIG_NATIVE_WINDOWS */
972
973
eloop_handle_signal(int sig)974 static void eloop_handle_signal(int sig)
975 {
976 size_t i;
977
978 #ifndef CONFIG_NATIVE_WINDOWS
979 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
980 /* Use SIGALRM to break out from potential busy loops that
981 * would not allow the program to be killed. */
982 eloop.pending_terminate = 1;
983 signal(SIGALRM, eloop_handle_alarm);
984 alarm(2);
985 }
986 #endif /* CONFIG_NATIVE_WINDOWS */
987
988 eloop.signaled++;
989 for (i = 0; i < eloop.signal_count; i++) {
990 if (eloop.signals[i].sig == sig) {
991 eloop.signals[i].signaled++;
992 break;
993 }
994 }
995 }
996
997
eloop_process_pending_signals(void)998 static void eloop_process_pending_signals(void)
999 {
1000 size_t i;
1001
1002 if (eloop.signaled == 0)
1003 return;
1004 eloop.signaled = 0;
1005
1006 if (eloop.pending_terminate) {
1007 #ifndef CONFIG_NATIVE_WINDOWS
1008 alarm(0);
1009 #endif /* CONFIG_NATIVE_WINDOWS */
1010 eloop.pending_terminate = 0;
1011 }
1012
1013 for (i = 0; i < eloop.signal_count; i++) {
1014 if (eloop.signals[i].signaled) {
1015 eloop.signals[i].signaled = 0;
1016 eloop.signals[i].handler(eloop.signals[i].sig,
1017 eloop.signals[i].user_data);
1018 }
1019 }
1020 }
1021
1022
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1023 int eloop_register_signal(int sig, eloop_signal_handler handler,
1024 void *user_data)
1025 {
1026 struct eloop_signal *tmp;
1027
1028 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1029 sizeof(struct eloop_signal));
1030 if (tmp == NULL)
1031 return -1;
1032
1033 tmp[eloop.signal_count].sig = sig;
1034 tmp[eloop.signal_count].user_data = user_data;
1035 tmp[eloop.signal_count].handler = handler;
1036 tmp[eloop.signal_count].signaled = 0;
1037 eloop.signal_count++;
1038 eloop.signals = tmp;
1039 signal(sig, eloop_handle_signal);
1040
1041 return 0;
1042 }
1043
1044
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1045 int eloop_register_signal_terminate(eloop_signal_handler handler,
1046 void *user_data)
1047 {
1048 int ret = eloop_register_signal(SIGINT, handler, user_data);
1049 if (ret == 0)
1050 ret = eloop_register_signal(SIGTERM, handler, user_data);
1051 return ret;
1052 }
1053
1054
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1055 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1056 void *user_data)
1057 {
1058 #ifdef CONFIG_NATIVE_WINDOWS
1059 return 0;
1060 #else /* CONFIG_NATIVE_WINDOWS */
1061 return eloop_register_signal(SIGHUP, handler, user_data);
1062 #endif /* CONFIG_NATIVE_WINDOWS */
1063 }
1064
1065
eloop_run(void)1066 void eloop_run(void)
1067 {
1068 #ifdef CONFIG_ELOOP_POLL
1069 int num_poll_fds;
1070 int timeout_ms = 0;
1071 #endif /* CONFIG_ELOOP_POLL */
1072 #ifdef CONFIG_ELOOP_SELECT
1073 fd_set *rfds, *wfds, *efds;
1074 struct timeval _tv;
1075 #endif /* CONFIG_ELOOP_SELECT */
1076 #ifdef CONFIG_ELOOP_EPOLL
1077 int timeout_ms = -1;
1078 #endif /* CONFIG_ELOOP_EPOLL */
1079 #ifdef CONFIG_ELOOP_KQUEUE
1080 struct timespec ts;
1081 #endif /* CONFIG_ELOOP_KQUEUE */
1082 int res;
1083 struct os_reltime tv, now;
1084
1085 #ifdef CONFIG_ELOOP_SELECT
1086 rfds = os_malloc(sizeof(*rfds));
1087 wfds = os_malloc(sizeof(*wfds));
1088 efds = os_malloc(sizeof(*efds));
1089 if (rfds == NULL || wfds == NULL || efds == NULL)
1090 goto out;
1091 #endif /* CONFIG_ELOOP_SELECT */
1092
1093 while (!eloop.terminate &&
1094 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1095 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1096 struct eloop_timeout *timeout;
1097
1098 if (eloop.pending_terminate) {
1099 /*
1100 * This may happen in some corner cases where a signal
1101 * is received during a blocking operation. We need to
1102 * process the pending signals and exit if requested to
1103 * avoid hitting the SIGALRM limit if the blocking
1104 * operation took more than two seconds.
1105 */
1106 eloop_process_pending_signals();
1107 if (eloop.terminate)
1108 break;
1109 }
1110
1111 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1112 list);
1113 if (timeout) {
1114 os_get_reltime(&now);
1115 if (os_reltime_before(&now, &timeout->time))
1116 os_reltime_sub(&timeout->time, &now, &tv);
1117 else
1118 tv.sec = tv.usec = 0;
1119 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1120 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1121 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1122 #ifdef CONFIG_ELOOP_SELECT
1123 _tv.tv_sec = tv.sec;
1124 _tv.tv_usec = tv.usec;
1125 #endif /* CONFIG_ELOOP_SELECT */
1126 #ifdef CONFIG_ELOOP_KQUEUE
1127 ts.tv_sec = tv.sec;
1128 ts.tv_nsec = tv.usec * 1000L;
1129 #endif /* CONFIG_ELOOP_KQUEUE */
1130 }
1131
1132 #ifdef CONFIG_ELOOP_POLL
1133 num_poll_fds = eloop_sock_table_set_fds(
1134 &eloop.readers, &eloop.writers, &eloop.exceptions,
1135 eloop.pollfds, eloop.pollfds_map,
1136 eloop.max_pollfd_map);
1137 res = poll(eloop.pollfds, num_poll_fds,
1138 timeout ? timeout_ms : -1);
1139 #endif /* CONFIG_ELOOP_POLL */
1140 #ifdef CONFIG_ELOOP_SELECT
1141 eloop_sock_table_set_fds(&eloop.readers, rfds);
1142 eloop_sock_table_set_fds(&eloop.writers, wfds);
1143 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1144 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1145 timeout ? &_tv : NULL);
1146 #endif /* CONFIG_ELOOP_SELECT */
1147 #ifdef CONFIG_ELOOP_EPOLL
1148 if (eloop.count == 0) {
1149 res = 0;
1150 } else {
1151 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1152 eloop.count, timeout_ms);
1153 }
1154 #endif /* CONFIG_ELOOP_EPOLL */
1155 #ifdef CONFIG_ELOOP_KQUEUE
1156 if (eloop.count == 0) {
1157 res = 0;
1158 } else {
1159 res = kevent(eloop.kqueuefd, NULL, 0,
1160 eloop.kqueue_events, eloop.kqueue_nevents,
1161 timeout ? &ts : NULL);
1162 }
1163 #endif /* CONFIG_ELOOP_KQUEUE */
1164 if (res < 0 && errno != EINTR && errno != 0) {
1165 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1166 #ifdef CONFIG_ELOOP_POLL
1167 "poll"
1168 #endif /* CONFIG_ELOOP_POLL */
1169 #ifdef CONFIG_ELOOP_SELECT
1170 "select"
1171 #endif /* CONFIG_ELOOP_SELECT */
1172 #ifdef CONFIG_ELOOP_EPOLL
1173 "epoll"
1174 #endif /* CONFIG_ELOOP_EPOLL */
1175 #ifdef CONFIG_ELOOP_KQUEUE
1176 "kqueue"
1177 #endif /* CONFIG_ELOOP_EKQUEUE */
1178
1179 , strerror(errno));
1180 goto out;
1181 }
1182
1183 eloop.readers.changed = 0;
1184 eloop.writers.changed = 0;
1185 eloop.exceptions.changed = 0;
1186
1187 eloop_process_pending_signals();
1188
1189
1190 /* check if some registered timeouts have occurred */
1191 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1192 list);
1193 if (timeout) {
1194 os_get_reltime(&now);
1195 if (!os_reltime_before(&now, &timeout->time)) {
1196 void *eloop_data = timeout->eloop_data;
1197 void *user_data = timeout->user_data;
1198 eloop_timeout_handler handler =
1199 timeout->handler;
1200 eloop_remove_timeout(timeout);
1201 handler(eloop_data, user_data);
1202 }
1203
1204 }
1205
1206 if (res <= 0)
1207 continue;
1208
1209 if (eloop.readers.changed ||
1210 eloop.writers.changed ||
1211 eloop.exceptions.changed) {
1212 /*
1213 * Sockets may have been closed and reopened with the
1214 * same FD in the signal or timeout handlers, so we
1215 * must skip the previous results and check again
1216 * whether any of the currently registered sockets have
1217 * events.
1218 */
1219 continue;
1220 }
1221
1222 #ifdef CONFIG_ELOOP_POLL
1223 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1224 &eloop.exceptions, eloop.pollfds_map,
1225 eloop.max_pollfd_map);
1226 #endif /* CONFIG_ELOOP_POLL */
1227 #ifdef CONFIG_ELOOP_SELECT
1228 eloop_sock_table_dispatch(&eloop.readers, rfds);
1229 eloop_sock_table_dispatch(&eloop.writers, wfds);
1230 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1231 #endif /* CONFIG_ELOOP_SELECT */
1232 #ifdef CONFIG_ELOOP_EPOLL
1233 eloop_sock_table_dispatch(eloop.epoll_events, res);
1234 #endif /* CONFIG_ELOOP_EPOLL */
1235 #ifdef CONFIG_ELOOP_KQUEUE
1236 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1237 #endif /* CONFIG_ELOOP_KQUEUE */
1238 }
1239
1240 eloop.terminate = 0;
1241 out:
1242 #ifdef CONFIG_ELOOP_SELECT
1243 os_free(rfds);
1244 os_free(wfds);
1245 os_free(efds);
1246 #endif /* CONFIG_ELOOP_SELECT */
1247 return;
1248 }
1249
1250
eloop_terminate(void)1251 void eloop_terminate(void)
1252 {
1253 eloop.terminate = 1;
1254 }
1255
1256
eloop_destroy(void)1257 void eloop_destroy(void)
1258 {
1259 struct eloop_timeout *timeout, *prev;
1260 struct os_reltime now;
1261
1262 os_get_reltime(&now);
1263 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1264 struct eloop_timeout, list) {
1265 int sec, usec;
1266 sec = timeout->time.sec - now.sec;
1267 usec = timeout->time.usec - now.usec;
1268 if (timeout->time.usec < now.usec) {
1269 sec--;
1270 usec += 1000000;
1271 }
1272 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1273 "eloop_data=%p user_data=%p handler=%p",
1274 sec, usec, timeout->eloop_data, timeout->user_data,
1275 timeout->handler);
1276 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1277 timeout->handler);
1278 wpa_trace_dump("eloop timeout", timeout);
1279 eloop_remove_timeout(timeout);
1280 }
1281 eloop_sock_table_destroy(&eloop.readers);
1282 eloop_sock_table_destroy(&eloop.writers);
1283 eloop_sock_table_destroy(&eloop.exceptions);
1284 os_free(eloop.signals);
1285
1286 #ifdef CONFIG_ELOOP_POLL
1287 os_free(eloop.pollfds);
1288 os_free(eloop.pollfds_map);
1289 #endif /* CONFIG_ELOOP_POLL */
1290 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1291 os_free(eloop.fd_table);
1292 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1293 #ifdef CONFIG_ELOOP_EPOLL
1294 os_free(eloop.epoll_events);
1295 close(eloop.epollfd);
1296 #endif /* CONFIG_ELOOP_EPOLL */
1297 #ifdef CONFIG_ELOOP_KQUEUE
1298 os_free(eloop.kqueue_events);
1299 close(eloop.kqueuefd);
1300 #endif /* CONFIG_ELOOP_KQUEUE */
1301 }
1302
1303
eloop_terminated(void)1304 int eloop_terminated(void)
1305 {
1306 return eloop.terminate || eloop.pending_terminate;
1307 }
1308
1309
eloop_wait_for_read_sock(int sock)1310 void eloop_wait_for_read_sock(int sock)
1311 {
1312 #ifdef CONFIG_ELOOP_POLL
1313 struct pollfd pfd;
1314
1315 if (sock < 0)
1316 return;
1317
1318 os_memset(&pfd, 0, sizeof(pfd));
1319 pfd.fd = sock;
1320 pfd.events = POLLIN;
1321
1322 poll(&pfd, 1, -1);
1323 #endif /* CONFIG_ELOOP_POLL */
1324 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1325 /*
1326 * We can use epoll() here. But epoll() requres 4 system calls.
1327 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1328 * epoll fd. So select() is better for performance here.
1329 */
1330 fd_set rfds;
1331
1332 if (sock < 0)
1333 return;
1334
1335 FD_ZERO(&rfds);
1336 FD_SET(sock, &rfds);
1337 select(sock + 1, &rfds, NULL, NULL, NULL);
1338 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1339 #ifdef CONFIG_ELOOP_KQUEUE
1340 int kfd;
1341 struct kevent ke1, ke2;
1342
1343 kfd = kqueue();
1344 if (kfd == -1)
1345 return;
1346 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1347 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1348 close(kfd);
1349 #endif /* CONFIG_ELOOP_KQUEUE */
1350 }
1351
1352 #ifdef CONFIG_ELOOP_SELECT
1353 #undef CONFIG_ELOOP_SELECT
1354 #endif /* CONFIG_ELOOP_SELECT */
1355