1 /*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
7 */
8
9 #include "includes.h"
10 #include <assert.h>
11 #ifdef CONFIG_DRIVER_HDF
12 #include <pthread.h>
13 #endif
14
15 #include "common.h"
16 #include "trace.h"
17 #include "list.h"
18 #include "eloop.h"
19
20 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
21 #error Do not define both of poll and epoll
22 #endif
23
24 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
25 #error Do not define both of poll and kqueue
26 #endif
27
28 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
29 !defined(CONFIG_ELOOP_KQUEUE)
30 #define CONFIG_ELOOP_SELECT
31 #endif
32
33 #ifdef CONFIG_ELOOP_POLL
34 #include <poll.h>
35 #endif /* CONFIG_ELOOP_POLL */
36
37 #ifdef CONFIG_ELOOP_EPOLL
38 #include <sys/epoll.h>
39 #endif /* CONFIG_ELOOP_EPOLL */
40
41 #ifdef CONFIG_ELOOP_KQUEUE
42 #include <sys/event.h>
43 #endif /* CONFIG_ELOOP_KQUEUE */
44
45 #ifdef CONFIG_DRIVER_HDF
46 enum eloop_ctrl_fd_index {
47 ELOOP_CTRL_FD_READ = 0,
48 ELOOP_CTRL_FD_WRITE,
49
50 ELOOP_CTRL_FD_BUTT
51 };
52 #endif
53
54 struct eloop_sock {
55 int sock;
56 void *eloop_data;
57 void *user_data;
58 eloop_sock_handler handler;
59 WPA_TRACE_REF(eloop);
60 WPA_TRACE_REF(user);
61 WPA_TRACE_INFO
62 };
63
64 struct eloop_timeout {
65 struct dl_list list;
66 struct os_reltime time;
67 void *eloop_data;
68 void *user_data;
69 eloop_timeout_handler handler;
70 WPA_TRACE_REF(eloop);
71 WPA_TRACE_REF(user);
72 WPA_TRACE_INFO
73 };
74
75 struct eloop_signal {
76 int sig;
77 void *user_data;
78 eloop_signal_handler handler;
79 int signaled;
80 };
81
82 struct eloop_sock_table {
83 size_t count;
84 struct eloop_sock *table;
85 eloop_event_type type;
86 int changed;
87 };
88
89 struct eloop_data {
90 int max_sock;
91
92 size_t count; /* sum of all table counts */
93 #ifdef CONFIG_ELOOP_POLL
94 size_t max_pollfd_map; /* number of pollfds_map currently allocated */
95 size_t max_poll_fds; /* number of pollfds currently allocated */
96 struct pollfd *pollfds;
97 struct pollfd **pollfds_map;
98 #endif /* CONFIG_ELOOP_POLL */
99 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
100 int max_fd;
101 struct eloop_sock *fd_table;
102 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
103 #ifdef CONFIG_ELOOP_EPOLL
104 int epollfd;
105 size_t epoll_max_event_num;
106 struct epoll_event *epoll_events;
107 #endif /* CONFIG_ELOOP_EPOLL */
108 #ifdef CONFIG_ELOOP_KQUEUE
109 int kqueuefd;
110 size_t kqueue_nevents;
111 struct kevent *kqueue_events;
112 #endif /* CONFIG_ELOOP_KQUEUE */
113 struct eloop_sock_table readers;
114 struct eloop_sock_table writers;
115 struct eloop_sock_table exceptions;
116
117 struct dl_list timeout;
118 #ifdef CONFIG_DRIVER_HDF
119 int ctrl_fd[ELOOP_CTRL_FD_BUTT];
120 #endif
121 size_t signal_count;
122 struct eloop_signal *signals;
123 int signaled;
124 int pending_terminate;
125
126 int terminate;
127 };
128
129 static struct eloop_data eloop;
130 #ifdef CONFIG_DRIVER_HDF
131 static pthread_mutex_t lock;
132 #endif
133 #ifdef WPA_TRACE
134
eloop_sigsegv_handler(int sig)135 static void eloop_sigsegv_handler(int sig)
136 {
137 wpa_trace_show("eloop SIGSEGV");
138 abort();
139 }
140
eloop_trace_sock_add_ref(struct eloop_sock_table * table)141 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
142 {
143 size_t i;
144
145 if (table == NULL || table->table == NULL)
146 return;
147 for (i = 0; i < table->count; i++) {
148 wpa_trace_add_ref(&table->table[i], eloop,
149 table->table[i].eloop_data);
150 wpa_trace_add_ref(&table->table[i], user,
151 table->table[i].user_data);
152 }
153 }
154
155
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)156 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
157 {
158 size_t i;
159
160 if (table == NULL || table->table == NULL)
161 return;
162 for (i = 0; i < table->count; i++) {
163 wpa_trace_remove_ref(&table->table[i], eloop,
164 table->table[i].eloop_data);
165 wpa_trace_remove_ref(&table->table[i], user,
166 table->table[i].user_data);
167 }
168 }
169
170 #else /* WPA_TRACE */
171
172 #define eloop_trace_sock_add_ref(table) do { } while (0)
173 #define eloop_trace_sock_remove_ref(table) do { } while (0)
174
175 #endif /* WPA_TRACE */
176 #ifdef CONFIG_DRIVER_HDF
eloop_ctrl_read_handler(int sock,void * eloop_ctx,void * sock_ctx)177 static void eloop_ctrl_read_handler(int sock, void *eloop_ctx, void *sock_ctx)
178 {
179 int8_t buf;
180
181 (void)eloop_ctx;
182 (void)sock_ctx;
183 if (sock != eloop.ctrl_fd[ELOOP_CTRL_FD_READ]) {
184 wpa_printf(MSG_ERROR, "%s: socket is mismatched.", __func__);
185 return;
186 }
187
188 if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
189 read(eloop.ctrl_fd[ELOOP_CTRL_FD_READ], &buf, 1);
190 } else {
191 wpa_printf(MSG_ERROR, "%s: pipe read end was closed", __func__);
192 }
193 }
194
eloop_ctrl_init()195 static void eloop_ctrl_init()
196 {
197 int ret;
198
199 ret = pipe(eloop.ctrl_fd);
200 if (ret != 0) {
201 wpa_printf(MSG_ERROR, "%s: pipe failed: %s", __func__, strerror(errno));
202 return;
203 }
204 eloop_register_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ],
205 eloop_ctrl_read_handler, NULL, NULL);
206
207 wpa_printf(MSG_INFO, "eloop_ctrl_init: %d", ret);
208 }
209
eloop_ctrl_deinit()210 static void eloop_ctrl_deinit()
211 {
212 if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
213 eloop_unregister_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
214 }
215 close(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
216 close(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE]);
217 eloop.ctrl_fd[ELOOP_CTRL_FD_READ] = -1;
218 eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] = -1;
219
220 wpa_printf(MSG_INFO, "eloop_ctrl_deinit done");
221 }
222
eloop_wakeup()223 static int eloop_wakeup()
224 {
225 int ret = -1;
226 uint8_t buf = '0'; // no meaning
227
228 if (eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] != -1) {
229 ret = write(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE], &buf, 1);
230 } else {
231 wpa_printf(MSG_ERROR, "%s: pipe write end was closed", __func__);
232 }
233 return ret;
234 }
235 #endif // CONFIG_DRIVER_HDF
236
eloop_init(void)237 int eloop_init(void)
238 {
239 os_memset(&eloop, 0, sizeof(eloop));
240 dl_list_init(&eloop.timeout);
241 #ifdef CONFIG_ELOOP_EPOLL
242 eloop.epollfd = epoll_create1(0);
243 if (eloop.epollfd < 0) {
244 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
245 __func__, strerror(errno));
246 return -1;
247 }
248 #endif /* CONFIG_ELOOP_EPOLL */
249 #ifdef CONFIG_ELOOP_KQUEUE
250 eloop.kqueuefd = kqueue();
251 if (eloop.kqueuefd < 0) {
252 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
253 __func__, strerror(errno));
254 return -1;
255 }
256 #endif /* CONFIG_ELOOP_KQUEUE */
257 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
258 eloop.readers.type = EVENT_TYPE_READ;
259 eloop.writers.type = EVENT_TYPE_WRITE;
260 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
261 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
262 #ifdef WPA_TRACE
263 signal(SIGSEGV, eloop_sigsegv_handler);
264 #endif /* WPA_TRACE */
265 #ifdef CONFIG_DRIVER_HDF
266 eloop_ctrl_init();
267 #endif
268 return 0;
269 }
270
271
272 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)273 static int eloop_sock_queue(int sock, eloop_event_type type)
274 {
275 struct epoll_event ev;
276
277 os_memset(&ev, 0, sizeof(ev));
278 switch (type) {
279 case EVENT_TYPE_READ:
280 ev.events = EPOLLIN;
281 break;
282 case EVENT_TYPE_WRITE:
283 ev.events = EPOLLOUT;
284 break;
285 /*
286 * Exceptions are always checked when using epoll, but I suppose it's
287 * possible that someone registered a socket *only* for exception
288 * handling.
289 */
290 case EVENT_TYPE_EXCEPTION:
291 ev.events = EPOLLERR | EPOLLHUP;
292 break;
293 }
294 ev.data.fd = sock;
295 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
296 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
297 __func__, sock, strerror(errno));
298 return -1;
299 }
300 return 0;
301 }
302 #endif /* CONFIG_ELOOP_EPOLL */
303
304
305 #ifdef CONFIG_ELOOP_KQUEUE
306
event_type_kevent_filter(eloop_event_type type)307 static short event_type_kevent_filter(eloop_event_type type)
308 {
309 switch (type) {
310 case EVENT_TYPE_READ:
311 return EVFILT_READ;
312 case EVENT_TYPE_WRITE:
313 return EVFILT_WRITE;
314 default:
315 return 0;
316 }
317 }
318
319
eloop_sock_queue(int sock,eloop_event_type type)320 static int eloop_sock_queue(int sock, eloop_event_type type)
321 {
322 struct kevent ke;
323
324 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
325 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
326 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
327 __func__, sock, strerror(errno));
328 return -1;
329 }
330 return 0;
331 }
332
333 #endif /* CONFIG_ELOOP_KQUEUE */
334
335
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)336 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
337 int sock, eloop_sock_handler handler,
338 void *eloop_data, void *user_data)
339 {
340 #ifdef CONFIG_ELOOP_EPOLL
341 struct epoll_event *temp_events;
342 #endif /* CONFIG_ELOOP_EPOLL */
343 #ifdef CONFIG_ELOOP_KQUEUE
344 struct kevent *temp_events;
345 #endif /* CONFIG_ELOOP_EPOLL */
346 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
347 struct eloop_sock *temp_table;
348 size_t next;
349 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
350 struct eloop_sock *tmp;
351 int new_max_sock;
352
353 if (sock > eloop.max_sock)
354 new_max_sock = sock;
355 else
356 new_max_sock = eloop.max_sock;
357
358 if (table == NULL)
359 return -1;
360
361 #ifdef CONFIG_ELOOP_POLL
362 if ((size_t) new_max_sock >= eloop.max_pollfd_map) {
363 struct pollfd **nmap;
364 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
365 sizeof(struct pollfd *));
366 if (nmap == NULL)
367 return -1;
368
369 eloop.max_pollfd_map = new_max_sock + 50;
370 eloop.pollfds_map = nmap;
371 }
372
373 if (eloop.count + 1 > eloop.max_poll_fds) {
374 struct pollfd *n;
375 size_t nmax = eloop.count + 1 + 50;
376
377 n = os_realloc_array(eloop.pollfds, nmax,
378 sizeof(struct pollfd));
379 if (n == NULL)
380 return -1;
381
382 eloop.max_poll_fds = nmax;
383 eloop.pollfds = n;
384 }
385 #endif /* CONFIG_ELOOP_POLL */
386 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
387 if (new_max_sock >= eloop.max_fd) {
388 next = new_max_sock + 16;
389 temp_table = os_realloc_array(eloop.fd_table, next,
390 sizeof(struct eloop_sock));
391 if (temp_table == NULL)
392 return -1;
393
394 eloop.max_fd = next;
395 eloop.fd_table = temp_table;
396 }
397 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
398
399 #ifdef CONFIG_ELOOP_EPOLL
400 if (eloop.count + 1 > eloop.epoll_max_event_num) {
401 next = eloop.epoll_max_event_num == 0 ? 8 :
402 eloop.epoll_max_event_num * 2;
403 temp_events = os_realloc_array(eloop.epoll_events, next,
404 sizeof(struct epoll_event));
405 if (temp_events == NULL) {
406 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
407 __func__, strerror(errno));
408 return -1;
409 }
410
411 eloop.epoll_max_event_num = next;
412 eloop.epoll_events = temp_events;
413 }
414 #endif /* CONFIG_ELOOP_EPOLL */
415 #ifdef CONFIG_ELOOP_KQUEUE
416 if (eloop.count + 1 > eloop.kqueue_nevents) {
417 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
418 temp_events = os_malloc(next * sizeof(*temp_events));
419 if (!temp_events) {
420 wpa_printf(MSG_ERROR,
421 "%s: malloc for kqueue failed: %s",
422 __func__, strerror(errno));
423 return -1;
424 }
425
426 os_free(eloop.kqueue_events);
427 eloop.kqueue_events = temp_events;
428 eloop.kqueue_nevents = next;
429 }
430 #endif /* CONFIG_ELOOP_KQUEUE */
431
432 eloop_trace_sock_remove_ref(table);
433 tmp = os_realloc_array(table->table, table->count + 1,
434 sizeof(struct eloop_sock));
435 if (tmp == NULL) {
436 eloop_trace_sock_add_ref(table);
437 return -1;
438 }
439
440 tmp[table->count].sock = sock;
441 tmp[table->count].eloop_data = eloop_data;
442 tmp[table->count].user_data = user_data;
443 tmp[table->count].handler = handler;
444 wpa_trace_record(&tmp[table->count]);
445 table->count++;
446 table->table = tmp;
447 eloop.max_sock = new_max_sock;
448 eloop.count++;
449 table->changed = 1;
450 eloop_trace_sock_add_ref(table);
451
452 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
453 if (eloop_sock_queue(sock, table->type) < 0)
454 return -1;
455 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
456 sizeof(struct eloop_sock));
457 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
458 return 0;
459 }
460
461
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)462 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
463 int sock)
464 {
465 #ifdef CONFIG_ELOOP_KQUEUE
466 struct kevent ke;
467 #endif /* CONFIG_ELOOP_KQUEUE */
468 size_t i;
469
470 if (table == NULL || table->table == NULL || table->count == 0)
471 return;
472
473 for (i = 0; i < table->count; i++) {
474 if (table->table[i].sock == sock)
475 break;
476 }
477 if (i == table->count)
478 return;
479 eloop_trace_sock_remove_ref(table);
480 if (i != table->count - 1) {
481 os_memmove(&table->table[i], &table->table[i + 1],
482 (table->count - i - 1) *
483 sizeof(struct eloop_sock));
484 }
485 table->count--;
486 eloop.count--;
487 table->changed = 1;
488 eloop_trace_sock_add_ref(table);
489 #ifdef CONFIG_ELOOP_EPOLL
490 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
491 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
492 __func__, sock, strerror(errno));
493 return;
494 }
495 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
496 #endif /* CONFIG_ELOOP_EPOLL */
497 #ifdef CONFIG_ELOOP_KQUEUE
498 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
499 0, 0);
500 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
501 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
502 __func__, sock, strerror(errno));
503 return;
504 }
505 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
506 #endif /* CONFIG_ELOOP_KQUEUE */
507 }
508
509
510 #ifdef CONFIG_ELOOP_POLL
511
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)512 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
513 {
514 if (fd < mx && fd >= 0)
515 return pollfds_map[fd];
516 return NULL;
517 }
518
519
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)520 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
521 struct eloop_sock_table *writers,
522 struct eloop_sock_table *exceptions,
523 struct pollfd *pollfds,
524 struct pollfd **pollfds_map,
525 int max_pollfd_map)
526 {
527 size_t i;
528 int nxt = 0;
529 int fd;
530 struct pollfd *pfd;
531
532 /* Clear pollfd lookup map. It will be re-populated below. */
533 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
534
535 if (readers && readers->table) {
536 for (i = 0; i < readers->count; i++) {
537 fd = readers->table[i].sock;
538 assert(fd >= 0 && fd < max_pollfd_map);
539 pollfds[nxt].fd = fd;
540 pollfds[nxt].events = POLLIN;
541 pollfds[nxt].revents = 0;
542 pollfds_map[fd] = &(pollfds[nxt]);
543 nxt++;
544 }
545 }
546
547 if (writers && writers->table) {
548 for (i = 0; i < writers->count; i++) {
549 /*
550 * See if we already added this descriptor, update it
551 * if so.
552 */
553 fd = writers->table[i].sock;
554 assert(fd >= 0 && fd < max_pollfd_map);
555 pfd = pollfds_map[fd];
556 if (!pfd) {
557 pfd = &(pollfds[nxt]);
558 pfd->events = 0;
559 pfd->fd = fd;
560 pollfds[i].revents = 0;
561 pollfds_map[fd] = pfd;
562 nxt++;
563 }
564 pfd->events |= POLLOUT;
565 }
566 }
567
568 /*
569 * Exceptions are always checked when using poll, but I suppose it's
570 * possible that someone registered a socket *only* for exception
571 * handling. Set the POLLIN bit in this case.
572 */
573 if (exceptions && exceptions->table) {
574 for (i = 0; i < exceptions->count; i++) {
575 /*
576 * See if we already added this descriptor, just use it
577 * if so.
578 */
579 fd = exceptions->table[i].sock;
580 assert(fd >= 0 && fd < max_pollfd_map);
581 pfd = pollfds_map[fd];
582 if (!pfd) {
583 pfd = &(pollfds[nxt]);
584 pfd->events = POLLIN;
585 pfd->fd = fd;
586 pollfds[i].revents = 0;
587 pollfds_map[fd] = pfd;
588 nxt++;
589 }
590 }
591 }
592
593 return nxt;
594 }
595
596
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)597 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
598 struct pollfd **pollfds_map,
599 int max_pollfd_map,
600 short int revents)
601 {
602 size_t i;
603 struct pollfd *pfd;
604
605 if (!table || !table->table)
606 return 0;
607
608 table->changed = 0;
609 for (i = 0; i < table->count; i++) {
610 pfd = find_pollfd(pollfds_map, table->table[i].sock,
611 max_pollfd_map);
612 if (!pfd)
613 continue;
614
615 if (!(pfd->revents & revents))
616 continue;
617
618 table->table[i].handler(table->table[i].sock,
619 table->table[i].eloop_data,
620 table->table[i].user_data);
621 if (table->changed)
622 return 1;
623 }
624
625 return 0;
626 }
627
628
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)629 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
630 struct eloop_sock_table *writers,
631 struct eloop_sock_table *exceptions,
632 struct pollfd **pollfds_map,
633 int max_pollfd_map)
634 {
635 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
636 max_pollfd_map, POLLIN | POLLERR |
637 POLLHUP))
638 return; /* pollfds may be invalid at this point */
639
640 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
641 max_pollfd_map, POLLOUT))
642 return; /* pollfds may be invalid at this point */
643
644 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
645 max_pollfd_map, POLLERR | POLLHUP);
646 }
647
648 #endif /* CONFIG_ELOOP_POLL */
649
650 #ifdef CONFIG_ELOOP_SELECT
651
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)652 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
653 fd_set *fds)
654 {
655 size_t i;
656
657 FD_ZERO(fds);
658
659 if (table->table == NULL)
660 return;
661
662 for (i = 0; i < table->count; i++) {
663 assert(table->table[i].sock >= 0);
664 FD_SET(table->table[i].sock, fds);
665 }
666 }
667
668
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)669 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
670 fd_set *fds)
671 {
672 size_t i;
673
674 if (table == NULL || table->table == NULL)
675 return;
676
677 table->changed = 0;
678 for (i = 0; i < table->count; i++) {
679 if (FD_ISSET(table->table[i].sock, fds)) {
680 table->table[i].handler(table->table[i].sock,
681 table->table[i].eloop_data,
682 table->table[i].user_data);
683 if (table->changed)
684 break;
685 }
686 }
687 }
688
689 #endif /* CONFIG_ELOOP_SELECT */
690
691
692 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)693 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
694 {
695 struct eloop_sock *table;
696 int i;
697
698 for (i = 0; i < nfds; i++) {
699 table = &eloop.fd_table[events[i].data.fd];
700 if (table->handler == NULL)
701 continue;
702 table->handler(table->sock, table->eloop_data,
703 table->user_data);
704 if (eloop.readers.changed ||
705 eloop.writers.changed ||
706 eloop.exceptions.changed)
707 break;
708 }
709 }
710 #endif /* CONFIG_ELOOP_EPOLL */
711
712
713 #ifdef CONFIG_ELOOP_KQUEUE
714
eloop_sock_table_dispatch(struct kevent * events,int nfds)715 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
716 {
717 struct eloop_sock *table;
718 int i;
719
720 for (i = 0; i < nfds; i++) {
721 table = &eloop.fd_table[events[i].ident];
722 if (table->handler == NULL)
723 continue;
724 table->handler(table->sock, table->eloop_data,
725 table->user_data);
726 if (eloop.readers.changed ||
727 eloop.writers.changed ||
728 eloop.exceptions.changed)
729 break;
730 }
731 }
732
733
eloop_sock_table_requeue(struct eloop_sock_table * table)734 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
735 {
736 size_t i;
737 int r;
738
739 r = 0;
740 for (i = 0; i < table->count && table->table; i++) {
741 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
742 r = -1;
743 }
744 return r;
745 }
746
747 #endif /* CONFIG_ELOOP_KQUEUE */
748
749
eloop_sock_requeue(void)750 int eloop_sock_requeue(void)
751 {
752 int r = 0;
753
754 #ifdef CONFIG_ELOOP_KQUEUE
755 close(eloop.kqueuefd);
756 eloop.kqueuefd = kqueue();
757 if (eloop.kqueuefd < 0) {
758 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
759 __func__, strerror(errno));
760 return -1;
761 }
762
763 if (eloop_sock_table_requeue(&eloop.readers) < 0)
764 r = -1;
765 if (eloop_sock_table_requeue(&eloop.writers) < 0)
766 r = -1;
767 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
768 r = -1;
769 #endif /* CONFIG_ELOOP_KQUEUE */
770
771 return r;
772 }
773
774
eloop_sock_table_destroy(struct eloop_sock_table * table)775 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
776 {
777 if (table) {
778 size_t i;
779
780 for (i = 0; i < table->count && table->table; i++) {
781 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
782 "sock=%d eloop_data=%p user_data=%p "
783 "handler=%p",
784 table->table[i].sock,
785 table->table[i].eloop_data,
786 table->table[i].user_data,
787 table->table[i].handler);
788 wpa_trace_dump_funcname("eloop unregistered socket "
789 "handler",
790 table->table[i].handler);
791 wpa_trace_dump("eloop sock", &table->table[i]);
792 }
793 os_free(table->table);
794 }
795 }
796
797
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)798 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
799 void *eloop_data, void *user_data)
800 {
801 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
802 eloop_data, user_data);
803 }
804
805
eloop_unregister_read_sock(int sock)806 void eloop_unregister_read_sock(int sock)
807 {
808 eloop_unregister_sock(sock, EVENT_TYPE_READ);
809 }
810
811
eloop_get_sock_table(eloop_event_type type)812 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
813 {
814 switch (type) {
815 case EVENT_TYPE_READ:
816 return &eloop.readers;
817 case EVENT_TYPE_WRITE:
818 return &eloop.writers;
819 case EVENT_TYPE_EXCEPTION:
820 return &eloop.exceptions;
821 }
822
823 return NULL;
824 }
825
826
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)827 int eloop_register_sock(int sock, eloop_event_type type,
828 eloop_sock_handler handler,
829 void *eloop_data, void *user_data)
830 {
831 struct eloop_sock_table *table;
832
833 assert(sock >= 0);
834 table = eloop_get_sock_table(type);
835 return eloop_sock_table_add_sock(table, sock, handler,
836 eloop_data, user_data);
837 }
838
839
eloop_unregister_sock(int sock,eloop_event_type type)840 void eloop_unregister_sock(int sock, eloop_event_type type)
841 {
842 struct eloop_sock_table *table;
843
844 table = eloop_get_sock_table(type);
845 eloop_sock_table_remove_sock(table, sock);
846 }
847
848
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)849 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
850 eloop_timeout_handler handler,
851 void *eloop_data, void *user_data)
852 {
853 struct eloop_timeout *timeout, *tmp;
854 os_time_t now_sec;
855
856 timeout = os_zalloc(sizeof(*timeout));
857 if (timeout == NULL)
858 return -1;
859 if (os_get_reltime(&timeout->time) < 0) {
860 os_free(timeout);
861 return -1;
862 }
863 now_sec = timeout->time.sec;
864 timeout->time.sec += secs;
865 if (timeout->time.sec < now_sec)
866 goto overflow;
867 timeout->time.usec += usecs;
868 while (timeout->time.usec >= 1000000) {
869 timeout->time.sec++;
870 timeout->time.usec -= 1000000;
871 }
872 if (timeout->time.sec < now_sec)
873 goto overflow;
874 timeout->eloop_data = eloop_data;
875 timeout->user_data = user_data;
876 timeout->handler = handler;
877 wpa_trace_add_ref(timeout, eloop, eloop_data);
878 wpa_trace_add_ref(timeout, user, user_data);
879 wpa_trace_record(timeout);
880 #ifdef CONFIG_DRIVER_HDF
881 pthread_mutex_lock(&lock);
882 #endif
883 /* Maintain timeouts in order of increasing time */
884 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
885 if (os_reltime_before(&timeout->time, &tmp->time)) {
886 dl_list_add(tmp->list.prev, &timeout->list);
887 #ifdef CONFIG_DRIVER_HDF
888 (void)eloop_wakeup();
889 pthread_mutex_unlock(&lock);
890 #endif
891 return 0;
892 }
893 }
894 dl_list_add_tail(&eloop.timeout, &timeout->list);
895 #ifdef CONFIG_DRIVER_HDF
896 pthread_mutex_unlock(&lock);
897 (void)eloop_wakeup();
898 #endif
899
900 return 0;
901
902 overflow:
903 /*
904 * Integer overflow - assume long enough timeout to be assumed
905 * to be infinite, i.e., the timeout would never happen.
906 */
907 wpa_printf(MSG_DEBUG,
908 "ELOOP: Too long timeout (secs=%u usecs=%u) to ever happen - ignore it",
909 secs,usecs);
910 os_free(timeout);
911 return 0;
912 }
913
914
eloop_remove_timeout(struct eloop_timeout * timeout)915 static void eloop_remove_timeout(struct eloop_timeout *timeout)
916 {
917 #ifdef CONFIG_DRIVER_HDF
918 pthread_mutex_lock(&lock);
919 #endif
920 dl_list_del(&timeout->list);
921 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
922 wpa_trace_remove_ref(timeout, user, timeout->user_data);
923 os_free(timeout);
924 #ifdef CONFIG_DRIVER_HDF
925 pthread_mutex_unlock(&lock);
926 #endif
927 }
928
929
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)930 int eloop_cancel_timeout(eloop_timeout_handler handler,
931 void *eloop_data, void *user_data)
932 {
933 struct eloop_timeout *timeout, *prev;
934 int removed = 0;
935
936 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
937 struct eloop_timeout, list) {
938 if (timeout->handler == handler &&
939 (timeout->eloop_data == eloop_data ||
940 eloop_data == ELOOP_ALL_CTX) &&
941 (timeout->user_data == user_data ||
942 user_data == ELOOP_ALL_CTX)) {
943 eloop_remove_timeout(timeout);
944 removed++;
945 }
946 }
947
948 return removed;
949 }
950
951
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)952 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
953 void *eloop_data, void *user_data,
954 struct os_reltime *remaining)
955 {
956 struct eloop_timeout *timeout, *prev;
957 int removed = 0;
958 struct os_reltime now;
959
960 os_get_reltime(&now);
961 remaining->sec = remaining->usec = 0;
962
963 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
964 struct eloop_timeout, list) {
965 if (timeout->handler == handler &&
966 (timeout->eloop_data == eloop_data) &&
967 (timeout->user_data == user_data)) {
968 removed = 1;
969 if (os_reltime_before(&now, &timeout->time))
970 os_reltime_sub(&timeout->time, &now, remaining);
971 eloop_remove_timeout(timeout);
972 break;
973 }
974 }
975 return removed;
976 }
977
978
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)979 int eloop_is_timeout_registered(eloop_timeout_handler handler,
980 void *eloop_data, void *user_data)
981 {
982 struct eloop_timeout *tmp;
983 #ifdef CONFIG_DRIVER_HDF
984 pthread_mutex_lock(&lock);
985 #endif
986 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
987 if (tmp->handler == handler &&
988 tmp->eloop_data == eloop_data &&
989 tmp->user_data == user_data) {
990 #ifdef CONFIG_DRIVER_HDF
991 pthread_mutex_unlock(&lock);
992 #endif
993 return 1;
994 }
995 }
996 #ifdef CONFIG_DRIVER_HDF
997 pthread_mutex_unlock(&lock);
998 #endif
999 return 0;
1000 }
1001
1002
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)1003 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
1004 eloop_timeout_handler handler, void *eloop_data,
1005 void *user_data)
1006 {
1007 struct os_reltime now, requested, remaining;
1008 struct eloop_timeout *tmp;
1009
1010 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
1011 if (tmp->handler == handler &&
1012 tmp->eloop_data == eloop_data &&
1013 tmp->user_data == user_data) {
1014 requested.sec = req_secs;
1015 requested.usec = req_usecs;
1016 os_get_reltime(&now);
1017 os_reltime_sub(&tmp->time, &now, &remaining);
1018 if (os_reltime_before(&requested, &remaining)) {
1019 eloop_cancel_timeout(handler, eloop_data,
1020 user_data);
1021 eloop_register_timeout(requested.sec,
1022 requested.usec,
1023 handler, eloop_data,
1024 user_data);
1025 return 1;
1026 }
1027 return 0;
1028 }
1029 }
1030
1031 return -1;
1032 }
1033
1034
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)1035 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
1036 eloop_timeout_handler handler, void *eloop_data,
1037 void *user_data)
1038 {
1039 struct os_reltime now, requested, remaining;
1040 struct eloop_timeout *tmp;
1041
1042 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
1043 if (tmp->handler == handler &&
1044 tmp->eloop_data == eloop_data &&
1045 tmp->user_data == user_data) {
1046 requested.sec = req_secs;
1047 requested.usec = req_usecs;
1048 os_get_reltime(&now);
1049 os_reltime_sub(&tmp->time, &now, &remaining);
1050 if (os_reltime_before(&remaining, &requested)) {
1051 eloop_cancel_timeout(handler, eloop_data,
1052 user_data);
1053 eloop_register_timeout(requested.sec,
1054 requested.usec,
1055 handler, eloop_data,
1056 user_data);
1057 return 1;
1058 }
1059 return 0;
1060 }
1061 }
1062
1063 return -1;
1064 }
1065
1066
1067 #ifndef CONFIG_NATIVE_WINDOWS
eloop_handle_alarm(int sig)1068 static void eloop_handle_alarm(int sig)
1069 {
1070 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
1071 "two seconds. Looks like there\n"
1072 "is a bug that ends up in a busy loop that "
1073 "prevents clean shutdown.\n"
1074 "Killing program forcefully.\n");
1075 exit(1);
1076 }
1077 #endif /* CONFIG_NATIVE_WINDOWS */
1078
1079
eloop_handle_signal(int sig)1080 static void eloop_handle_signal(int sig)
1081 {
1082 size_t i;
1083
1084 #ifndef CONFIG_NATIVE_WINDOWS
1085 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
1086 /* Use SIGALRM to break out from potential busy loops that
1087 * would not allow the program to be killed. */
1088 eloop.pending_terminate = 1;
1089 signal(SIGALRM, eloop_handle_alarm);
1090 alarm(2);
1091 }
1092 #endif /* CONFIG_NATIVE_WINDOWS */
1093
1094 eloop.signaled++;
1095 for (i = 0; i < eloop.signal_count; i++) {
1096 if (eloop.signals[i].sig == sig) {
1097 eloop.signals[i].signaled++;
1098 break;
1099 }
1100 }
1101 #ifdef CONFIG_DRIVER_HDF
1102 (void)eloop_wakeup();
1103 #endif
1104 }
1105
1106
eloop_process_pending_signals(void)1107 static void eloop_process_pending_signals(void)
1108 {
1109 size_t i;
1110
1111 if (eloop.signaled == 0)
1112 return;
1113 eloop.signaled = 0;
1114
1115 if (eloop.pending_terminate) {
1116 #ifndef CONFIG_NATIVE_WINDOWS
1117 alarm(0);
1118 #endif /* CONFIG_NATIVE_WINDOWS */
1119 eloop.pending_terminate = 0;
1120 }
1121
1122 for (i = 0; i < eloop.signal_count; i++) {
1123 if (eloop.signals[i].signaled) {
1124 eloop.signals[i].signaled = 0;
1125 eloop.signals[i].handler(eloop.signals[i].sig,
1126 eloop.signals[i].user_data);
1127 }
1128 }
1129 }
1130
1131
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1132 int eloop_register_signal(int sig, eloop_signal_handler handler,
1133 void *user_data)
1134 {
1135 struct eloop_signal *tmp;
1136
1137 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1138 sizeof(struct eloop_signal));
1139 if (tmp == NULL)
1140 return -1;
1141
1142 tmp[eloop.signal_count].sig = sig;
1143 tmp[eloop.signal_count].user_data = user_data;
1144 tmp[eloop.signal_count].handler = handler;
1145 tmp[eloop.signal_count].signaled = 0;
1146 eloop.signal_count++;
1147 eloop.signals = tmp;
1148 signal(sig, eloop_handle_signal);
1149
1150 return 0;
1151 }
1152
1153
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1154 int eloop_register_signal_terminate(eloop_signal_handler handler,
1155 void *user_data)
1156 {
1157 int ret = eloop_register_signal(SIGINT, handler, user_data);
1158 if (ret == 0)
1159 ret = eloop_register_signal(SIGTERM, handler, user_data);
1160 return ret;
1161 }
1162
1163
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1164 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1165 void *user_data)
1166 {
1167 #ifdef CONFIG_NATIVE_WINDOWS
1168 return 0;
1169 #else /* CONFIG_NATIVE_WINDOWS */
1170 return eloop_register_signal(SIGHUP, handler, user_data);
1171 #endif /* CONFIG_NATIVE_WINDOWS */
1172 }
1173
1174
eloop_run(void)1175 void eloop_run(void)
1176 {
1177 #ifdef CONFIG_ELOOP_POLL
1178 int num_poll_fds;
1179 int timeout_ms = 0;
1180 #endif /* CONFIG_ELOOP_POLL */
1181 #ifdef CONFIG_ELOOP_SELECT
1182 fd_set *rfds, *wfds, *efds;
1183 struct timeval _tv;
1184 #endif /* CONFIG_ELOOP_SELECT */
1185 #ifdef CONFIG_ELOOP_EPOLL
1186 int timeout_ms = -1;
1187 #endif /* CONFIG_ELOOP_EPOLL */
1188 #ifdef CONFIG_ELOOP_KQUEUE
1189 struct timespec ts;
1190 #endif /* CONFIG_ELOOP_KQUEUE */
1191 int res;
1192 struct os_reltime tv, now;
1193
1194 #ifdef CONFIG_ELOOP_SELECT
1195 rfds = os_malloc(sizeof(*rfds));
1196 wfds = os_malloc(sizeof(*wfds));
1197 efds = os_malloc(sizeof(*efds));
1198 if (rfds == NULL || wfds == NULL || efds == NULL)
1199 goto out;
1200 #endif /* CONFIG_ELOOP_SELECT */
1201
1202 while (!eloop.terminate &&
1203 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1204 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1205 struct eloop_timeout *timeout;
1206
1207 if (eloop.pending_terminate) {
1208 /*
1209 * This may happen in some corner cases where a signal
1210 * is received during a blocking operation. We need to
1211 * process the pending signals and exit if requested to
1212 * avoid hitting the SIGALRM limit if the blocking
1213 * operation took more than two seconds.
1214 */
1215 eloop_process_pending_signals();
1216 if (eloop.terminate)
1217 break;
1218 }
1219
1220 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1221 list);
1222 if (timeout) {
1223 os_get_reltime(&now);
1224 if (os_reltime_before(&now, &timeout->time))
1225 os_reltime_sub(&timeout->time, &now, &tv);
1226 else
1227 tv.sec = tv.usec = 0;
1228 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1229 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1230 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1231 #ifdef CONFIG_ELOOP_SELECT
1232 _tv.tv_sec = tv.sec;
1233 _tv.tv_usec = tv.usec;
1234 #endif /* CONFIG_ELOOP_SELECT */
1235 #ifdef CONFIG_ELOOP_KQUEUE
1236 ts.tv_sec = tv.sec;
1237 ts.tv_nsec = tv.usec * 1000L;
1238 #endif /* CONFIG_ELOOP_KQUEUE */
1239 }
1240
1241 #ifdef CONFIG_ELOOP_POLL
1242 num_poll_fds = eloop_sock_table_set_fds(
1243 &eloop.readers, &eloop.writers, &eloop.exceptions,
1244 eloop.pollfds, eloop.pollfds_map,
1245 eloop.max_pollfd_map);
1246 res = poll(eloop.pollfds, num_poll_fds,
1247 timeout ? timeout_ms : -1);
1248 #endif /* CONFIG_ELOOP_POLL */
1249 #ifdef CONFIG_ELOOP_SELECT
1250 eloop_sock_table_set_fds(&eloop.readers, rfds);
1251 eloop_sock_table_set_fds(&eloop.writers, wfds);
1252 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1253 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1254 timeout ? &_tv : NULL);
1255 #endif /* CONFIG_ELOOP_SELECT */
1256 #ifdef CONFIG_ELOOP_EPOLL
1257 if (eloop.count == 0) {
1258 res = 0;
1259 } else {
1260 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1261 eloop.count, timeout_ms);
1262 }
1263 #endif /* CONFIG_ELOOP_EPOLL */
1264 #ifdef CONFIG_ELOOP_KQUEUE
1265 if (eloop.count == 0) {
1266 res = 0;
1267 } else {
1268 res = kevent(eloop.kqueuefd, NULL, 0,
1269 eloop.kqueue_events, eloop.kqueue_nevents,
1270 timeout ? &ts : NULL);
1271 }
1272 #endif /* CONFIG_ELOOP_KQUEUE */
1273 if (res < 0 && errno != EINTR && errno != 0) {
1274 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1275 #ifdef CONFIG_ELOOP_POLL
1276 "poll"
1277 #endif /* CONFIG_ELOOP_POLL */
1278 #ifdef CONFIG_ELOOP_SELECT
1279 "select"
1280 #endif /* CONFIG_ELOOP_SELECT */
1281 #ifdef CONFIG_ELOOP_EPOLL
1282 "epoll"
1283 #endif /* CONFIG_ELOOP_EPOLL */
1284 #ifdef CONFIG_ELOOP_KQUEUE
1285 "kqueue"
1286 #endif /* CONFIG_ELOOP_EKQUEUE */
1287
1288 , strerror(errno));
1289 goto out;
1290 }
1291
1292 eloop.readers.changed = 0;
1293 eloop.writers.changed = 0;
1294 eloop.exceptions.changed = 0;
1295
1296 eloop_process_pending_signals();
1297
1298
1299 /* check if some registered timeouts have occurred */
1300 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1301 list);
1302 if (timeout) {
1303 os_get_reltime(&now);
1304 if (!os_reltime_before(&now, &timeout->time)) {
1305 void *eloop_data = timeout->eloop_data;
1306 void *user_data = timeout->user_data;
1307 eloop_timeout_handler handler =
1308 timeout->handler;
1309 eloop_remove_timeout(timeout);
1310 handler(eloop_data, user_data);
1311 }
1312
1313 }
1314
1315 if (res <= 0)
1316 continue;
1317
1318 if (eloop.readers.changed ||
1319 eloop.writers.changed ||
1320 eloop.exceptions.changed) {
1321 /*
1322 * Sockets may have been closed and reopened with the
1323 * same FD in the signal or timeout handlers, so we
1324 * must skip the previous results and check again
1325 * whether any of the currently registered sockets have
1326 * events.
1327 */
1328 continue;
1329 }
1330
1331 #ifdef CONFIG_ELOOP_POLL
1332 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1333 &eloop.exceptions, eloop.pollfds_map,
1334 eloop.max_pollfd_map);
1335 #endif /* CONFIG_ELOOP_POLL */
1336 #ifdef CONFIG_ELOOP_SELECT
1337 eloop_sock_table_dispatch(&eloop.readers, rfds);
1338 eloop_sock_table_dispatch(&eloop.writers, wfds);
1339 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1340 #endif /* CONFIG_ELOOP_SELECT */
1341 #ifdef CONFIG_ELOOP_EPOLL
1342 eloop_sock_table_dispatch(eloop.epoll_events, res);
1343 #endif /* CONFIG_ELOOP_EPOLL */
1344 #ifdef CONFIG_ELOOP_KQUEUE
1345 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1346 #endif /* CONFIG_ELOOP_KQUEUE */
1347 }
1348
1349 eloop.terminate = 0;
1350 out:
1351 #ifdef CONFIG_ELOOP_SELECT
1352 os_free(rfds);
1353 os_free(wfds);
1354 os_free(efds);
1355 #endif /* CONFIG_ELOOP_SELECT */
1356 return;
1357 }
1358
1359
eloop_terminate(void)1360 void eloop_terminate(void)
1361 {
1362 eloop.terminate = 1;
1363 }
1364
1365
eloop_destroy(void)1366 void eloop_destroy(void)
1367 {
1368 struct eloop_timeout *timeout, *prev;
1369 struct os_reltime now;
1370
1371 os_get_reltime(&now);
1372 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1373 struct eloop_timeout, list) {
1374 int sec, usec;
1375 sec = timeout->time.sec - now.sec;
1376 usec = timeout->time.usec - now.usec;
1377 if (timeout->time.usec < now.usec) {
1378 sec--;
1379 usec += 1000000;
1380 }
1381 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1382 "eloop_data=%p user_data=%p handler=%p",
1383 sec, usec, timeout->eloop_data, timeout->user_data,
1384 timeout->handler);
1385 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1386 timeout->handler);
1387 wpa_trace_dump("eloop timeout", timeout);
1388 eloop_remove_timeout(timeout);
1389 }
1390 #ifdef CONFIG_DRIVER_HDF
1391 eloop_ctrl_deinit();
1392 #endif
1393 eloop_sock_table_destroy(&eloop.readers);
1394 eloop_sock_table_destroy(&eloop.writers);
1395 eloop_sock_table_destroy(&eloop.exceptions);
1396 os_free(eloop.signals);
1397
1398 #ifdef CONFIG_ELOOP_POLL
1399 os_free(eloop.pollfds);
1400 os_free(eloop.pollfds_map);
1401 #endif /* CONFIG_ELOOP_POLL */
1402 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1403 os_free(eloop.fd_table);
1404 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1405 #ifdef CONFIG_ELOOP_EPOLL
1406 os_free(eloop.epoll_events);
1407 close(eloop.epollfd);
1408 #endif /* CONFIG_ELOOP_EPOLL */
1409 #ifdef CONFIG_ELOOP_KQUEUE
1410 os_free(eloop.kqueue_events);
1411 close(eloop.kqueuefd);
1412 #endif /* CONFIG_ELOOP_KQUEUE */
1413 }
1414
1415
eloop_terminated(void)1416 int eloop_terminated(void)
1417 {
1418 return eloop.terminate || eloop.pending_terminate;
1419 }
1420
1421
eloop_wait_for_read_sock(int sock)1422 void eloop_wait_for_read_sock(int sock)
1423 {
1424 #ifdef CONFIG_ELOOP_POLL
1425 struct pollfd pfd;
1426
1427 if (sock < 0)
1428 return;
1429
1430 os_memset(&pfd, 0, sizeof(pfd));
1431 pfd.fd = sock;
1432 pfd.events = POLLIN;
1433
1434 poll(&pfd, 1, -1);
1435 #endif /* CONFIG_ELOOP_POLL */
1436 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1437 /*
1438 * We can use epoll() here. But epoll() requres 4 system calls.
1439 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1440 * epoll fd. So select() is better for performance here.
1441 */
1442 fd_set rfds;
1443
1444 if (sock < 0)
1445 return;
1446
1447 FD_ZERO(&rfds);
1448 FD_SET(sock, &rfds);
1449 select(sock + 1, &rfds, NULL, NULL, NULL);
1450 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1451 #ifdef CONFIG_ELOOP_KQUEUE
1452 int kfd;
1453 struct kevent ke1, ke2;
1454
1455 kfd = kqueue();
1456 if (kfd == -1)
1457 return;
1458 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1459 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1460 close(kfd);
1461 #endif /* CONFIG_ELOOP_KQUEUE */
1462 }
1463
1464 #ifdef CONFIG_ELOOP_SELECT
1465 #undef CONFIG_ELOOP_SELECT
1466 #endif /* CONFIG_ELOOP_SELECT */
1467