1 /*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
7 */
8
9 #include "includes.h"
10 #include <assert.h>
11
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41
42 enum eloop_ctrl_fd_index {
43 ELOOP_CTRL_FD_READ = 0,
44 ELOOP_CTRL_FD_WRITE,
45
46 ELOOP_CTRL_FD_BUTT
47 };
48
49 struct eloop_sock {
50 int sock;
51 void *eloop_data;
52 void *user_data;
53 eloop_sock_handler handler;
54 WPA_TRACE_REF(eloop);
55 WPA_TRACE_REF(user);
56 WPA_TRACE_INFO
57 };
58
59 struct eloop_timeout {
60 struct dl_list list;
61 struct os_reltime time;
62 void *eloop_data;
63 void *user_data;
64 eloop_timeout_handler handler;
65 WPA_TRACE_REF(eloop);
66 WPA_TRACE_REF(user);
67 WPA_TRACE_INFO
68 };
69
70 struct eloop_signal {
71 int sig;
72 void *user_data;
73 eloop_signal_handler handler;
74 int signaled;
75 };
76
77 struct eloop_sock_table {
78 int count;
79 struct eloop_sock *table;
80 eloop_event_type type;
81 int changed;
82 };
83
84 struct eloop_data {
85 int max_sock;
86
87 int count; /* sum of all table counts */
88 #ifdef CONFIG_ELOOP_POLL
89 int max_pollfd_map; /* number of pollfds_map currently allocated */
90 int max_poll_fds; /* number of pollfds currently allocated */
91 struct pollfd *pollfds;
92 struct pollfd **pollfds_map;
93 #endif /* CONFIG_ELOOP_POLL */
94 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
95 int max_fd;
96 struct eloop_sock *fd_table;
97 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
98 #ifdef CONFIG_ELOOP_EPOLL
99 int epollfd;
100 int epoll_max_event_num;
101 struct epoll_event *epoll_events;
102 #endif /* CONFIG_ELOOP_EPOLL */
103 #ifdef CONFIG_ELOOP_KQUEUE
104 int kqueuefd;
105 int kqueue_nevents;
106 struct kevent *kqueue_events;
107 #endif /* CONFIG_ELOOP_KQUEUE */
108 struct eloop_sock_table readers;
109 struct eloop_sock_table writers;
110 struct eloop_sock_table exceptions;
111
112 struct dl_list timeout;
113
114 int ctrl_fd[ELOOP_CTRL_FD_BUTT];
115
116 int signal_count;
117 struct eloop_signal *signals;
118 int signaled;
119 int pending_terminate;
120
121 int terminate;
122 };
123
124 static struct eloop_data eloop;
125
126
127 #ifdef WPA_TRACE
128
eloop_sigsegv_handler(int sig)129 static void eloop_sigsegv_handler(int sig)
130 {
131 wpa_trace_show("eloop SIGSEGV");
132 abort();
133 }
134
eloop_trace_sock_add_ref(struct eloop_sock_table * table)135 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
136 {
137 int i;
138 if (table == NULL || table->table == NULL)
139 return;
140 for (i = 0; i < table->count; i++) {
141 wpa_trace_add_ref(&table->table[i], eloop,
142 table->table[i].eloop_data);
143 wpa_trace_add_ref(&table->table[i], user,
144 table->table[i].user_data);
145 }
146 }
147
148
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)149 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
150 {
151 int i;
152 if (table == NULL || table->table == NULL)
153 return;
154 for (i = 0; i < table->count; i++) {
155 wpa_trace_remove_ref(&table->table[i], eloop,
156 table->table[i].eloop_data);
157 wpa_trace_remove_ref(&table->table[i], user,
158 table->table[i].user_data);
159 }
160 }
161
162 #else /* WPA_TRACE */
163
164 #define eloop_trace_sock_add_ref(table) do { } while (0)
165 #define eloop_trace_sock_remove_ref(table) do { } while (0)
166
167 #endif /* WPA_TRACE */
168
eloop_ctrl_read_handler(void * eloop_ctx,void * sock_ctx)169 static void eloop_ctrl_read_handler(void *eloop_ctx, void *sock_ctx)
170 {
171 int8_t buf;
172
173 (void)eloop_ctx;
174 (void)sock_ctx;
175
176 if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
177 read(eloop.ctrl_fd[ELOOP_CTRL_FD_READ], &buf, 1);
178 } else {
179 wpa_printf(MSG_ERROR, "%s: pipe read end was closed", __func__);
180 }
181 }
182
eloop_ctrl_init()183 static void eloop_ctrl_init()
184 {
185 int ret;
186
187 ret = pipe(eloop.ctrl_fd);
188 if (ret != 0) {
189 wpa_printf(MSG_ERROR, "%s: pipe failed: %s", __func__, strerror(errno));
190 return;
191 }
192 eloop_register_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ],
193 eloop_ctrl_read_handler, NULL, NULL);
194
195 wpa_printf(MSG_INFO, "eloop_ctrl_init: %d", ret);
196 }
197
eloop_ctrl_deinit()198 static void eloop_ctrl_deinit()
199 {
200 if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
201 eloop_unregister_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
202 }
203 close(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
204 close(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE]);
205 eloop.ctrl_fd[ELOOP_CTRL_FD_READ] = -1;
206 eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] = -1;
207
208 wpa_printf(MSG_INFO, "eloop_ctrl_deinit done");
209 }
210
eloop_wakeup()211 static int eloop_wakeup()
212 {
213 int ret = -1;
214 uint8_t buf = '0'; // no meaning
215
216 if (eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] != -1) {
217 ret = write(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE], &buf, 1);
218 } else {
219 wpa_printf(MSG_ERROR, "%s: pipe write end was closed", __func__);
220 }
221 return ret;
222 }
223
eloop_init(void)224 int eloop_init(void)
225 {
226 os_memset(&eloop, 0, sizeof(eloop));
227 dl_list_init(&eloop.timeout);
228 #ifdef CONFIG_ELOOP_EPOLL
229 eloop.epollfd = epoll_create1(0);
230 if (eloop.epollfd < 0) {
231 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
232 __func__, strerror(errno));
233 return -1;
234 }
235 #endif /* CONFIG_ELOOP_EPOLL */
236 #ifdef CONFIG_ELOOP_KQUEUE
237 eloop.kqueuefd = kqueue();
238 if (eloop.kqueuefd < 0) {
239 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
240 __func__, strerror(errno));
241 return -1;
242 }
243 #endif /* CONFIG_ELOOP_KQUEUE */
244 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
245 eloop.readers.type = EVENT_TYPE_READ;
246 eloop.writers.type = EVENT_TYPE_WRITE;
247 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
248 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
249 #ifdef WPA_TRACE
250 signal(SIGSEGV, eloop_sigsegv_handler);
251 #endif /* WPA_TRACE */
252
253 eloop_ctrl_init();
254 return 0;
255 }
256
257
258 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)259 static int eloop_sock_queue(int sock, eloop_event_type type)
260 {
261 struct epoll_event ev;
262
263 os_memset(&ev, 0, sizeof(ev));
264 switch (type) {
265 case EVENT_TYPE_READ:
266 ev.events = EPOLLIN;
267 break;
268 case EVENT_TYPE_WRITE:
269 ev.events = EPOLLOUT;
270 break;
271 /*
272 * Exceptions are always checked when using epoll, but I suppose it's
273 * possible that someone registered a socket *only* for exception
274 * handling.
275 */
276 case EVENT_TYPE_EXCEPTION:
277 ev.events = EPOLLERR | EPOLLHUP;
278 break;
279 }
280 ev.data.fd = sock;
281 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
282 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
283 __func__, sock, strerror(errno));
284 return -1;
285 }
286 return 0;
287 }
288 #endif /* CONFIG_ELOOP_EPOLL */
289
290
291 #ifdef CONFIG_ELOOP_KQUEUE
292
event_type_kevent_filter(eloop_event_type type)293 static short event_type_kevent_filter(eloop_event_type type)
294 {
295 switch (type) {
296 case EVENT_TYPE_READ:
297 return EVFILT_READ;
298 case EVENT_TYPE_WRITE:
299 return EVFILT_WRITE;
300 default:
301 return 0;
302 }
303 }
304
305
eloop_sock_queue(int sock,eloop_event_type type)306 static int eloop_sock_queue(int sock, eloop_event_type type)
307 {
308 struct kevent ke;
309
310 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
311 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
312 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
313 __func__, sock, strerror(errno));
314 return -1;
315 }
316 return 0;
317 }
318
319 #endif /* CONFIG_ELOOP_KQUEUE */
320
321
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)322 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
323 int sock, eloop_sock_handler handler,
324 void *eloop_data, void *user_data)
325 {
326 #ifdef CONFIG_ELOOP_EPOLL
327 struct epoll_event *temp_events;
328 #endif /* CONFIG_ELOOP_EPOLL */
329 #ifdef CONFIG_ELOOP_KQUEUE
330 struct kevent *temp_events;
331 #endif /* CONFIG_ELOOP_EPOLL */
332 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
333 struct eloop_sock *temp_table;
334 int next;
335 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
336 struct eloop_sock *tmp;
337 int new_max_sock;
338
339 if (sock > eloop.max_sock)
340 new_max_sock = sock;
341 else
342 new_max_sock = eloop.max_sock;
343
344 if (table == NULL)
345 return -1;
346
347 #ifdef CONFIG_ELOOP_POLL
348 if (new_max_sock >= eloop.max_pollfd_map) {
349 struct pollfd **nmap;
350 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
351 sizeof(struct pollfd *));
352 if (nmap == NULL)
353 return -1;
354
355 eloop.max_pollfd_map = new_max_sock + 50;
356 eloop.pollfds_map = nmap;
357 }
358
359 if (eloop.count + 1 > eloop.max_poll_fds) {
360 struct pollfd *n;
361 int nmax = eloop.count + 1 + 50;
362 n = os_realloc_array(eloop.pollfds, nmax,
363 sizeof(struct pollfd));
364 if (n == NULL)
365 return -1;
366
367 eloop.max_poll_fds = nmax;
368 eloop.pollfds = n;
369 }
370 #endif /* CONFIG_ELOOP_POLL */
371 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
372 if (new_max_sock >= eloop.max_fd) {
373 next = new_max_sock + 16;
374 temp_table = os_realloc_array(eloop.fd_table, next,
375 sizeof(struct eloop_sock));
376 if (temp_table == NULL)
377 return -1;
378
379 eloop.max_fd = next;
380 eloop.fd_table = temp_table;
381 }
382 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
383
384 #ifdef CONFIG_ELOOP_EPOLL
385 if (eloop.count + 1 > eloop.epoll_max_event_num) {
386 next = eloop.epoll_max_event_num == 0 ? 8 :
387 eloop.epoll_max_event_num * 2;
388 temp_events = os_realloc_array(eloop.epoll_events, next,
389 sizeof(struct epoll_event));
390 if (temp_events == NULL) {
391 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
392 __func__, strerror(errno));
393 return -1;
394 }
395
396 eloop.epoll_max_event_num = next;
397 eloop.epoll_events = temp_events;
398 }
399 #endif /* CONFIG_ELOOP_EPOLL */
400 #ifdef CONFIG_ELOOP_KQUEUE
401 if (eloop.count + 1 > eloop.kqueue_nevents) {
402 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
403 temp_events = os_malloc(next * sizeof(*temp_events));
404 if (!temp_events) {
405 wpa_printf(MSG_ERROR,
406 "%s: malloc for kqueue failed: %s",
407 __func__, strerror(errno));
408 return -1;
409 }
410
411 os_free(eloop.kqueue_events);
412 eloop.kqueue_events = temp_events;
413 eloop.kqueue_nevents = next;
414 }
415 #endif /* CONFIG_ELOOP_KQUEUE */
416
417 eloop_trace_sock_remove_ref(table);
418 tmp = os_realloc_array(table->table, table->count + 1,
419 sizeof(struct eloop_sock));
420 if (tmp == NULL) {
421 eloop_trace_sock_add_ref(table);
422 return -1;
423 }
424
425 tmp[table->count].sock = sock;
426 tmp[table->count].eloop_data = eloop_data;
427 tmp[table->count].user_data = user_data;
428 tmp[table->count].handler = handler;
429 wpa_trace_record(&tmp[table->count]);
430 table->count++;
431 table->table = tmp;
432 eloop.max_sock = new_max_sock;
433 eloop.count++;
434 table->changed = 1;
435 eloop_trace_sock_add_ref(table);
436
437 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
438 if (eloop_sock_queue(sock, table->type) < 0)
439 return -1;
440 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
441 sizeof(struct eloop_sock));
442 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
443 return 0;
444 }
445
446
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)447 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
448 int sock)
449 {
450 #ifdef CONFIG_ELOOP_KQUEUE
451 struct kevent ke;
452 #endif /* CONFIG_ELOOP_KQUEUE */
453 int i;
454
455 if (table == NULL || table->table == NULL || table->count == 0)
456 return;
457
458 for (i = 0; i < table->count; i++) {
459 if (table->table[i].sock == sock)
460 break;
461 }
462 if (i == table->count)
463 return;
464 eloop_trace_sock_remove_ref(table);
465 if (i != table->count - 1) {
466 os_memmove(&table->table[i], &table->table[i + 1],
467 (table->count - i - 1) *
468 sizeof(struct eloop_sock));
469 }
470 table->count--;
471 eloop.count--;
472 table->changed = 1;
473 eloop_trace_sock_add_ref(table);
474 #ifdef CONFIG_ELOOP_EPOLL
475 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
476 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
477 __func__, sock, strerror(errno));
478 return;
479 }
480 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
481 #endif /* CONFIG_ELOOP_EPOLL */
482 #ifdef CONFIG_ELOOP_KQUEUE
483 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
484 0, 0);
485 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
486 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
487 __func__, sock, strerror(errno));
488 return;
489 }
490 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
491 #endif /* CONFIG_ELOOP_KQUEUE */
492 }
493
494
495 #ifdef CONFIG_ELOOP_POLL
496
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)497 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
498 {
499 if (fd < mx && fd >= 0)
500 return pollfds_map[fd];
501 return NULL;
502 }
503
504
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)505 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
506 struct eloop_sock_table *writers,
507 struct eloop_sock_table *exceptions,
508 struct pollfd *pollfds,
509 struct pollfd **pollfds_map,
510 int max_pollfd_map)
511 {
512 int i;
513 int nxt = 0;
514 int fd;
515 struct pollfd *pfd;
516
517 /* Clear pollfd lookup map. It will be re-populated below. */
518 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
519
520 if (readers && readers->table) {
521 for (i = 0; i < readers->count; i++) {
522 fd = readers->table[i].sock;
523 assert(fd >= 0 && fd < max_pollfd_map);
524 pollfds[nxt].fd = fd;
525 pollfds[nxt].events = POLLIN;
526 pollfds[nxt].revents = 0;
527 pollfds_map[fd] = &(pollfds[nxt]);
528 nxt++;
529 }
530 }
531
532 if (writers && writers->table) {
533 for (i = 0; i < writers->count; i++) {
534 /*
535 * See if we already added this descriptor, update it
536 * if so.
537 */
538 fd = writers->table[i].sock;
539 assert(fd >= 0 && fd < max_pollfd_map);
540 pfd = pollfds_map[fd];
541 if (!pfd) {
542 pfd = &(pollfds[nxt]);
543 pfd->events = 0;
544 pfd->fd = fd;
545 pollfds[i].revents = 0;
546 pollfds_map[fd] = pfd;
547 nxt++;
548 }
549 pfd->events |= POLLOUT;
550 }
551 }
552
553 /*
554 * Exceptions are always checked when using poll, but I suppose it's
555 * possible that someone registered a socket *only* for exception
556 * handling. Set the POLLIN bit in this case.
557 */
558 if (exceptions && exceptions->table) {
559 for (i = 0; i < exceptions->count; i++) {
560 /*
561 * See if we already added this descriptor, just use it
562 * if so.
563 */
564 fd = exceptions->table[i].sock;
565 assert(fd >= 0 && fd < max_pollfd_map);
566 pfd = pollfds_map[fd];
567 if (!pfd) {
568 pfd = &(pollfds[nxt]);
569 pfd->events = POLLIN;
570 pfd->fd = fd;
571 pollfds[i].revents = 0;
572 pollfds_map[fd] = pfd;
573 nxt++;
574 }
575 }
576 }
577
578 return nxt;
579 }
580
581
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)582 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
583 struct pollfd **pollfds_map,
584 int max_pollfd_map,
585 short int revents)
586 {
587 int i;
588 struct pollfd *pfd;
589
590 if (!table || !table->table)
591 return 0;
592
593 table->changed = 0;
594 for (i = 0; i < table->count; i++) {
595 pfd = find_pollfd(pollfds_map, table->table[i].sock,
596 max_pollfd_map);
597 if (!pfd)
598 continue;
599
600 if (!(pfd->revents & revents))
601 continue;
602
603 table->table[i].handler(table->table[i].sock,
604 table->table[i].eloop_data,
605 table->table[i].user_data);
606 if (table->changed)
607 return 1;
608 }
609
610 return 0;
611 }
612
613
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)614 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
615 struct eloop_sock_table *writers,
616 struct eloop_sock_table *exceptions,
617 struct pollfd **pollfds_map,
618 int max_pollfd_map)
619 {
620 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
621 max_pollfd_map, POLLIN | POLLERR |
622 POLLHUP))
623 return; /* pollfds may be invalid at this point */
624
625 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
626 max_pollfd_map, POLLOUT))
627 return; /* pollfds may be invalid at this point */
628
629 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
630 max_pollfd_map, POLLERR | POLLHUP);
631 }
632
633 #endif /* CONFIG_ELOOP_POLL */
634
635 #ifdef CONFIG_ELOOP_SELECT
636
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)637 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
638 fd_set *fds)
639 {
640 int i;
641
642 FD_ZERO(fds);
643
644 if (table->table == NULL)
645 return;
646
647 for (i = 0; i < table->count; i++) {
648 assert(table->table[i].sock >= 0);
649 FD_SET(table->table[i].sock, fds);
650 }
651 }
652
653
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)654 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
655 fd_set *fds)
656 {
657 int i;
658
659 if (table == NULL || table->table == NULL)
660 return;
661
662 table->changed = 0;
663 for (i = 0; i < table->count; i++) {
664 if (FD_ISSET(table->table[i].sock, fds)) {
665 table->table[i].handler(table->table[i].sock,
666 table->table[i].eloop_data,
667 table->table[i].user_data);
668 if (table->changed)
669 break;
670 }
671 }
672 }
673
674 #endif /* CONFIG_ELOOP_SELECT */
675
676
677 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)678 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
679 {
680 struct eloop_sock *table;
681 int i;
682
683 for (i = 0; i < nfds; i++) {
684 table = &eloop.fd_table[events[i].data.fd];
685 if (table->handler == NULL)
686 continue;
687 table->handler(table->sock, table->eloop_data,
688 table->user_data);
689 if (eloop.readers.changed ||
690 eloop.writers.changed ||
691 eloop.exceptions.changed)
692 break;
693 }
694 }
695 #endif /* CONFIG_ELOOP_EPOLL */
696
697
698 #ifdef CONFIG_ELOOP_KQUEUE
699
eloop_sock_table_dispatch(struct kevent * events,int nfds)700 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
701 {
702 struct eloop_sock *table;
703 int i;
704
705 for (i = 0; i < nfds; i++) {
706 table = &eloop.fd_table[events[i].ident];
707 if (table->handler == NULL)
708 continue;
709 table->handler(table->sock, table->eloop_data,
710 table->user_data);
711 if (eloop.readers.changed ||
712 eloop.writers.changed ||
713 eloop.exceptions.changed)
714 break;
715 }
716 }
717
718
eloop_sock_table_requeue(struct eloop_sock_table * table)719 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
720 {
721 int i, r;
722
723 r = 0;
724 for (i = 0; i < table->count && table->table; i++) {
725 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
726 r = -1;
727 }
728 return r;
729 }
730
731 #endif /* CONFIG_ELOOP_KQUEUE */
732
733
eloop_sock_requeue(void)734 int eloop_sock_requeue(void)
735 {
736 int r = 0;
737
738 #ifdef CONFIG_ELOOP_KQUEUE
739 close(eloop.kqueuefd);
740 eloop.kqueuefd = kqueue();
741 if (eloop.kqueuefd < 0) {
742 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
743 __func__, strerror(errno));
744 return -1;
745 }
746
747 if (eloop_sock_table_requeue(&eloop.readers) < 0)
748 r = -1;
749 if (eloop_sock_table_requeue(&eloop.writers) < 0)
750 r = -1;
751 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
752 r = -1;
753 #endif /* CONFIG_ELOOP_KQUEUE */
754
755 return r;
756 }
757
758
eloop_sock_table_destroy(struct eloop_sock_table * table)759 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
760 {
761 if (table) {
762 int i;
763 for (i = 0; i < table->count && table->table; i++) {
764 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
765 "sock=%d eloop_data=%p user_data=%p "
766 "handler=%p",
767 table->table[i].sock,
768 table->table[i].eloop_data,
769 table->table[i].user_data,
770 table->table[i].handler);
771 wpa_trace_dump_funcname("eloop unregistered socket "
772 "handler",
773 table->table[i].handler);
774 wpa_trace_dump("eloop sock", &table->table[i]);
775 }
776 os_free(table->table);
777 }
778 }
779
780
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)781 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
782 void *eloop_data, void *user_data)
783 {
784 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
785 eloop_data, user_data);
786 }
787
788
eloop_unregister_read_sock(int sock)789 void eloop_unregister_read_sock(int sock)
790 {
791 eloop_unregister_sock(sock, EVENT_TYPE_READ);
792 }
793
794
eloop_get_sock_table(eloop_event_type type)795 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
796 {
797 switch (type) {
798 case EVENT_TYPE_READ:
799 return &eloop.readers;
800 case EVENT_TYPE_WRITE:
801 return &eloop.writers;
802 case EVENT_TYPE_EXCEPTION:
803 return &eloop.exceptions;
804 }
805
806 return NULL;
807 }
808
809
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)810 int eloop_register_sock(int sock, eloop_event_type type,
811 eloop_sock_handler handler,
812 void *eloop_data, void *user_data)
813 {
814 struct eloop_sock_table *table;
815
816 assert(sock >= 0);
817 table = eloop_get_sock_table(type);
818 return eloop_sock_table_add_sock(table, sock, handler,
819 eloop_data, user_data);
820 }
821
822
eloop_unregister_sock(int sock,eloop_event_type type)823 void eloop_unregister_sock(int sock, eloop_event_type type)
824 {
825 struct eloop_sock_table *table;
826
827 table = eloop_get_sock_table(type);
828 eloop_sock_table_remove_sock(table, sock);
829 }
830
831
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)832 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
833 eloop_timeout_handler handler,
834 void *eloop_data, void *user_data)
835 {
836 struct eloop_timeout *timeout, *tmp;
837 os_time_t now_sec;
838
839 timeout = os_zalloc(sizeof(*timeout));
840 if (timeout == NULL)
841 return -1;
842 if (os_get_reltime(&timeout->time) < 0) {
843 os_free(timeout);
844 return -1;
845 }
846 now_sec = timeout->time.sec;
847 timeout->time.sec += secs;
848 if (timeout->time.sec < now_sec) {
849 /*
850 * Integer overflow - assume long enough timeout to be assumed
851 * to be infinite, i.e., the timeout would never happen.
852 */
853 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
854 "ever happen - ignore it", secs);
855 os_free(timeout);
856 return 0;
857 }
858 timeout->time.usec += usecs;
859 while (timeout->time.usec >= 1000000) {
860 timeout->time.sec++;
861 timeout->time.usec -= 1000000;
862 }
863 timeout->eloop_data = eloop_data;
864 timeout->user_data = user_data;
865 timeout->handler = handler;
866 wpa_trace_add_ref(timeout, eloop, eloop_data);
867 wpa_trace_add_ref(timeout, user, user_data);
868 wpa_trace_record(timeout);
869
870 /* Maintain timeouts in order of increasing time */
871 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
872 if (os_reltime_before(&timeout->time, &tmp->time)) {
873 dl_list_add(tmp->list.prev, &timeout->list);
874 (void)eloop_wakeup();
875 return 0;
876 }
877 }
878 dl_list_add_tail(&eloop.timeout, &timeout->list);
879 (void)eloop_wakeup();
880
881 return 0;
882 }
883
884
eloop_remove_timeout(struct eloop_timeout * timeout)885 static void eloop_remove_timeout(struct eloop_timeout *timeout)
886 {
887 dl_list_del(&timeout->list);
888 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
889 wpa_trace_remove_ref(timeout, user, timeout->user_data);
890 os_free(timeout);
891 }
892
893
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)894 int eloop_cancel_timeout(eloop_timeout_handler handler,
895 void *eloop_data, void *user_data)
896 {
897 struct eloop_timeout *timeout, *prev;
898 int removed = 0;
899
900 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
901 struct eloop_timeout, list) {
902 if (timeout->handler == handler &&
903 (timeout->eloop_data == eloop_data ||
904 eloop_data == ELOOP_ALL_CTX) &&
905 (timeout->user_data == user_data ||
906 user_data == ELOOP_ALL_CTX)) {
907 eloop_remove_timeout(timeout);
908 removed++;
909 }
910 }
911
912 return removed;
913 }
914
915
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)916 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
917 void *eloop_data, void *user_data,
918 struct os_reltime *remaining)
919 {
920 struct eloop_timeout *timeout, *prev;
921 int removed = 0;
922 struct os_reltime now;
923
924 os_get_reltime(&now);
925 remaining->sec = remaining->usec = 0;
926
927 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
928 struct eloop_timeout, list) {
929 if (timeout->handler == handler &&
930 (timeout->eloop_data == eloop_data) &&
931 (timeout->user_data == user_data)) {
932 removed = 1;
933 if (os_reltime_before(&now, &timeout->time))
934 os_reltime_sub(&timeout->time, &now, remaining);
935 eloop_remove_timeout(timeout);
936 break;
937 }
938 }
939 return removed;
940 }
941
942
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)943 int eloop_is_timeout_registered(eloop_timeout_handler handler,
944 void *eloop_data, void *user_data)
945 {
946 struct eloop_timeout *tmp;
947
948 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
949 if (tmp->handler == handler &&
950 tmp->eloop_data == eloop_data &&
951 tmp->user_data == user_data)
952 return 1;
953 }
954
955 return 0;
956 }
957
958
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)959 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
960 eloop_timeout_handler handler, void *eloop_data,
961 void *user_data)
962 {
963 struct os_reltime now, requested, remaining;
964 struct eloop_timeout *tmp;
965
966 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
967 if (tmp->handler == handler &&
968 tmp->eloop_data == eloop_data &&
969 tmp->user_data == user_data) {
970 requested.sec = req_secs;
971 requested.usec = req_usecs;
972 os_get_reltime(&now);
973 os_reltime_sub(&tmp->time, &now, &remaining);
974 if (os_reltime_before(&requested, &remaining)) {
975 eloop_cancel_timeout(handler, eloop_data,
976 user_data);
977 eloop_register_timeout(requested.sec,
978 requested.usec,
979 handler, eloop_data,
980 user_data);
981 return 1;
982 }
983 return 0;
984 }
985 }
986
987 return -1;
988 }
989
990
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)991 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
992 eloop_timeout_handler handler, void *eloop_data,
993 void *user_data)
994 {
995 struct os_reltime now, requested, remaining;
996 struct eloop_timeout *tmp;
997
998 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
999 if (tmp->handler == handler &&
1000 tmp->eloop_data == eloop_data &&
1001 tmp->user_data == user_data) {
1002 requested.sec = req_secs;
1003 requested.usec = req_usecs;
1004 os_get_reltime(&now);
1005 os_reltime_sub(&tmp->time, &now, &remaining);
1006 if (os_reltime_before(&remaining, &requested)) {
1007 eloop_cancel_timeout(handler, eloop_data,
1008 user_data);
1009 eloop_register_timeout(requested.sec,
1010 requested.usec,
1011 handler, eloop_data,
1012 user_data);
1013 return 1;
1014 }
1015 return 0;
1016 }
1017 }
1018
1019 return -1;
1020 }
1021
1022
1023 #ifndef CONFIG_NATIVE_WINDOWS
eloop_handle_alarm(int sig)1024 static void eloop_handle_alarm(int sig)
1025 {
1026 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
1027 "two seconds. Looks like there\n"
1028 "is a bug that ends up in a busy loop that "
1029 "prevents clean shutdown.\n"
1030 "Killing program forcefully.\n");
1031 exit(1);
1032 }
1033 #endif /* CONFIG_NATIVE_WINDOWS */
1034
1035
eloop_handle_signal(int sig)1036 static void eloop_handle_signal(int sig)
1037 {
1038 int i;
1039
1040 #ifndef CONFIG_NATIVE_WINDOWS
1041 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
1042 /* Use SIGALRM to break out from potential busy loops that
1043 * would not allow the program to be killed. */
1044 eloop.pending_terminate = 1;
1045 signal(SIGALRM, eloop_handle_alarm);
1046 alarm(2);
1047 }
1048 #endif /* CONFIG_NATIVE_WINDOWS */
1049
1050 eloop.signaled++;
1051 for (i = 0; i < eloop.signal_count; i++) {
1052 if (eloop.signals[i].sig == sig) {
1053 eloop.signals[i].signaled++;
1054 break;
1055 }
1056 }
1057 (void)eloop_wakeup();
1058 }
1059
1060
eloop_process_pending_signals(void)1061 static void eloop_process_pending_signals(void)
1062 {
1063 int i;
1064
1065 if (eloop.signaled == 0)
1066 return;
1067 eloop.signaled = 0;
1068
1069 if (eloop.pending_terminate) {
1070 #ifndef CONFIG_NATIVE_WINDOWS
1071 alarm(0);
1072 #endif /* CONFIG_NATIVE_WINDOWS */
1073 eloop.pending_terminate = 0;
1074 }
1075
1076 for (i = 0; i < eloop.signal_count; i++) {
1077 if (eloop.signals[i].signaled) {
1078 eloop.signals[i].signaled = 0;
1079 eloop.signals[i].handler(eloop.signals[i].sig,
1080 eloop.signals[i].user_data);
1081 }
1082 }
1083 }
1084
1085
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1086 int eloop_register_signal(int sig, eloop_signal_handler handler,
1087 void *user_data)
1088 {
1089 struct eloop_signal *tmp;
1090
1091 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1092 sizeof(struct eloop_signal));
1093 if (tmp == NULL)
1094 return -1;
1095
1096 tmp[eloop.signal_count].sig = sig;
1097 tmp[eloop.signal_count].user_data = user_data;
1098 tmp[eloop.signal_count].handler = handler;
1099 tmp[eloop.signal_count].signaled = 0;
1100 eloop.signal_count++;
1101 eloop.signals = tmp;
1102 signal(sig, eloop_handle_signal);
1103
1104 return 0;
1105 }
1106
1107
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1108 int eloop_register_signal_terminate(eloop_signal_handler handler,
1109 void *user_data)
1110 {
1111 int ret = eloop_register_signal(SIGINT, handler, user_data);
1112 if (ret == 0)
1113 ret = eloop_register_signal(SIGTERM, handler, user_data);
1114 return ret;
1115 }
1116
1117
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1118 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1119 void *user_data)
1120 {
1121 #ifdef CONFIG_NATIVE_WINDOWS
1122 return 0;
1123 #else /* CONFIG_NATIVE_WINDOWS */
1124 return eloop_register_signal(SIGHUP, handler, user_data);
1125 #endif /* CONFIG_NATIVE_WINDOWS */
1126 }
1127
1128
eloop_run(void)1129 void eloop_run(void)
1130 {
1131 #ifdef CONFIG_ELOOP_POLL
1132 int num_poll_fds;
1133 int timeout_ms = 0;
1134 #endif /* CONFIG_ELOOP_POLL */
1135 #ifdef CONFIG_ELOOP_SELECT
1136 fd_set *rfds, *wfds, *efds;
1137 struct timeval _tv;
1138 #endif /* CONFIG_ELOOP_SELECT */
1139 #ifdef CONFIG_ELOOP_EPOLL
1140 int timeout_ms = -1;
1141 #endif /* CONFIG_ELOOP_EPOLL */
1142 #ifdef CONFIG_ELOOP_KQUEUE
1143 struct timespec ts;
1144 #endif /* CONFIG_ELOOP_KQUEUE */
1145 int res;
1146 struct os_reltime tv, now;
1147
1148 #ifdef CONFIG_ELOOP_SELECT
1149 rfds = os_malloc(sizeof(*rfds));
1150 wfds = os_malloc(sizeof(*wfds));
1151 efds = os_malloc(sizeof(*efds));
1152 if (rfds == NULL || wfds == NULL || efds == NULL)
1153 goto out;
1154 #endif /* CONFIG_ELOOP_SELECT */
1155
1156 while (!eloop.terminate &&
1157 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1158 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1159 struct eloop_timeout *timeout;
1160
1161 if (eloop.pending_terminate) {
1162 /*
1163 * This may happen in some corner cases where a signal
1164 * is received during a blocking operation. We need to
1165 * process the pending signals and exit if requested to
1166 * avoid hitting the SIGALRM limit if the blocking
1167 * operation took more than two seconds.
1168 */
1169 eloop_process_pending_signals();
1170 if (eloop.terminate)
1171 break;
1172 }
1173
1174 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1175 list);
1176 if (timeout) {
1177 os_get_reltime(&now);
1178 if (os_reltime_before(&now, &timeout->time))
1179 os_reltime_sub(&timeout->time, &now, &tv);
1180 else
1181 tv.sec = tv.usec = 0;
1182 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1183 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1184 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1185 #ifdef CONFIG_ELOOP_SELECT
1186 _tv.tv_sec = tv.sec;
1187 _tv.tv_usec = tv.usec;
1188 #endif /* CONFIG_ELOOP_SELECT */
1189 #ifdef CONFIG_ELOOP_KQUEUE
1190 ts.tv_sec = tv.sec;
1191 ts.tv_nsec = tv.usec * 1000L;
1192 #endif /* CONFIG_ELOOP_KQUEUE */
1193 }
1194
1195 #ifdef CONFIG_ELOOP_POLL
1196 num_poll_fds = eloop_sock_table_set_fds(
1197 &eloop.readers, &eloop.writers, &eloop.exceptions,
1198 eloop.pollfds, eloop.pollfds_map,
1199 eloop.max_pollfd_map);
1200 res = poll(eloop.pollfds, num_poll_fds,
1201 timeout ? timeout_ms : -1);
1202 #endif /* CONFIG_ELOOP_POLL */
1203 #ifdef CONFIG_ELOOP_SELECT
1204 eloop_sock_table_set_fds(&eloop.readers, rfds);
1205 eloop_sock_table_set_fds(&eloop.writers, wfds);
1206 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1207 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1208 timeout ? &_tv : NULL);
1209 #endif /* CONFIG_ELOOP_SELECT */
1210 #ifdef CONFIG_ELOOP_EPOLL
1211 if (eloop.count == 0) {
1212 res = 0;
1213 } else {
1214 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1215 eloop.count, timeout_ms);
1216 }
1217 #endif /* CONFIG_ELOOP_EPOLL */
1218 #ifdef CONFIG_ELOOP_KQUEUE
1219 if (eloop.count == 0) {
1220 res = 0;
1221 } else {
1222 res = kevent(eloop.kqueuefd, NULL, 0,
1223 eloop.kqueue_events, eloop.kqueue_nevents,
1224 timeout ? &ts : NULL);
1225 }
1226 #endif /* CONFIG_ELOOP_KQUEUE */
1227 if (res < 0 && errno != EINTR && errno != 0) {
1228 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1229 #ifdef CONFIG_ELOOP_POLL
1230 "poll"
1231 #endif /* CONFIG_ELOOP_POLL */
1232 #ifdef CONFIG_ELOOP_SELECT
1233 "select"
1234 #endif /* CONFIG_ELOOP_SELECT */
1235 #ifdef CONFIG_ELOOP_EPOLL
1236 "epoll"
1237 #endif /* CONFIG_ELOOP_EPOLL */
1238 #ifdef CONFIG_ELOOP_KQUEUE
1239 "kqueue"
1240 #endif /* CONFIG_ELOOP_EKQUEUE */
1241
1242 , strerror(errno));
1243 goto out;
1244 }
1245
1246 eloop.readers.changed = 0;
1247 eloop.writers.changed = 0;
1248 eloop.exceptions.changed = 0;
1249
1250 eloop_process_pending_signals();
1251
1252
1253 /* check if some registered timeouts have occurred */
1254 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1255 list);
1256 if (timeout) {
1257 os_get_reltime(&now);
1258 if (!os_reltime_before(&now, &timeout->time)) {
1259 void *eloop_data = timeout->eloop_data;
1260 void *user_data = timeout->user_data;
1261 eloop_timeout_handler handler =
1262 timeout->handler;
1263 eloop_remove_timeout(timeout);
1264 handler(eloop_data, user_data);
1265 }
1266
1267 }
1268
1269 if (res <= 0)
1270 continue;
1271
1272 if (eloop.readers.changed ||
1273 eloop.writers.changed ||
1274 eloop.exceptions.changed) {
1275 /*
1276 * Sockets may have been closed and reopened with the
1277 * same FD in the signal or timeout handlers, so we
1278 * must skip the previous results and check again
1279 * whether any of the currently registered sockets have
1280 * events.
1281 */
1282 continue;
1283 }
1284
1285 #ifdef CONFIG_ELOOP_POLL
1286 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1287 &eloop.exceptions, eloop.pollfds_map,
1288 eloop.max_pollfd_map);
1289 #endif /* CONFIG_ELOOP_POLL */
1290 #ifdef CONFIG_ELOOP_SELECT
1291 eloop_sock_table_dispatch(&eloop.readers, rfds);
1292 eloop_sock_table_dispatch(&eloop.writers, wfds);
1293 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1294 #endif /* CONFIG_ELOOP_SELECT */
1295 #ifdef CONFIG_ELOOP_EPOLL
1296 eloop_sock_table_dispatch(eloop.epoll_events, res);
1297 #endif /* CONFIG_ELOOP_EPOLL */
1298 #ifdef CONFIG_ELOOP_KQUEUE
1299 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1300 #endif /* CONFIG_ELOOP_KQUEUE */
1301 }
1302
1303 eloop.terminate = 0;
1304 out:
1305 #ifdef CONFIG_ELOOP_SELECT
1306 os_free(rfds);
1307 os_free(wfds);
1308 os_free(efds);
1309 #endif /* CONFIG_ELOOP_SELECT */
1310 return;
1311 }
1312
1313
eloop_terminate(void)1314 void eloop_terminate(void)
1315 {
1316 eloop.terminate = 1;
1317 }
1318
1319
eloop_destroy(void)1320 void eloop_destroy(void)
1321 {
1322 struct eloop_timeout *timeout, *prev;
1323 struct os_reltime now;
1324
1325 os_get_reltime(&now);
1326 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1327 struct eloop_timeout, list) {
1328 int sec, usec;
1329 sec = timeout->time.sec - now.sec;
1330 usec = timeout->time.usec - now.usec;
1331 if (timeout->time.usec < now.usec) {
1332 sec--;
1333 usec += 1000000;
1334 }
1335 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1336 "eloop_data=%p user_data=%p handler=%p",
1337 sec, usec, timeout->eloop_data, timeout->user_data,
1338 timeout->handler);
1339 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1340 timeout->handler);
1341 wpa_trace_dump("eloop timeout", timeout);
1342 eloop_remove_timeout(timeout);
1343 }
1344 eloop_ctrl_deinit();
1345 eloop_sock_table_destroy(&eloop.readers);
1346 eloop_sock_table_destroy(&eloop.writers);
1347 eloop_sock_table_destroy(&eloop.exceptions);
1348 os_free(eloop.signals);
1349
1350 #ifdef CONFIG_ELOOP_POLL
1351 os_free(eloop.pollfds);
1352 os_free(eloop.pollfds_map);
1353 #endif /* CONFIG_ELOOP_POLL */
1354 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1355 os_free(eloop.fd_table);
1356 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1357 #ifdef CONFIG_ELOOP_EPOLL
1358 os_free(eloop.epoll_events);
1359 close(eloop.epollfd);
1360 #endif /* CONFIG_ELOOP_EPOLL */
1361 #ifdef CONFIG_ELOOP_KQUEUE
1362 os_free(eloop.kqueue_events);
1363 close(eloop.kqueuefd);
1364 #endif /* CONFIG_ELOOP_KQUEUE */
1365 }
1366
1367
eloop_terminated(void)1368 int eloop_terminated(void)
1369 {
1370 return eloop.terminate || eloop.pending_terminate;
1371 }
1372
1373
eloop_wait_for_read_sock(int sock)1374 void eloop_wait_for_read_sock(int sock)
1375 {
1376 #ifdef CONFIG_ELOOP_POLL
1377 struct pollfd pfd;
1378
1379 if (sock < 0)
1380 return;
1381
1382 os_memset(&pfd, 0, sizeof(pfd));
1383 pfd.fd = sock;
1384 pfd.events = POLLIN;
1385
1386 poll(&pfd, 1, -1);
1387 #endif /* CONFIG_ELOOP_POLL */
1388 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1389 /*
1390 * We can use epoll() here. But epoll() requres 4 system calls.
1391 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1392 * epoll fd. So select() is better for performance here.
1393 */
1394 fd_set rfds;
1395
1396 if (sock < 0)
1397 return;
1398
1399 FD_ZERO(&rfds);
1400 FD_SET(sock, &rfds);
1401 select(sock + 1, &rfds, NULL, NULL, NULL);
1402 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1403 #ifdef CONFIG_ELOOP_KQUEUE
1404 int kfd;
1405 struct kevent ke1, ke2;
1406
1407 kfd = kqueue();
1408 if (kfd == -1)
1409 return;
1410 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1411 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1412 close(kfd);
1413 #endif /* CONFIG_ELOOP_KQUEUE */
1414 }
1415
1416 #ifdef CONFIG_ELOOP_SELECT
1417 #undef CONFIG_ELOOP_SELECT
1418 #endif /* CONFIG_ELOOP_SELECT */
1419