1 /*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
7 */
8
9 #include "includes.h"
10 #include <assert.h>
11 #ifdef CONFIG_DRIVER_HDF
12 #include <pthread.h>
13 #endif
14
15 #include "common.h"
16 #include "trace.h"
17 #include "list.h"
18 #include "eloop.h"
19
20 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
21 #error Do not define both of poll and epoll
22 #endif
23
24 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
25 #error Do not define both of poll and kqueue
26 #endif
27
28 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
29 !defined(CONFIG_ELOOP_KQUEUE)
30 #define CONFIG_ELOOP_SELECT
31 #endif
32
33 #ifdef CONFIG_ELOOP_POLL
34 #include <poll.h>
35 #endif /* CONFIG_ELOOP_POLL */
36
37 #ifdef CONFIG_ELOOP_EPOLL
38 #include <sys/epoll.h>
39 #endif /* CONFIG_ELOOP_EPOLL */
40
41 #ifdef CONFIG_ELOOP_KQUEUE
42 #include <sys/event.h>
43 #endif /* CONFIG_ELOOP_KQUEUE */
44
45 #ifdef CONFIG_DRIVER_HDF
46 enum eloop_ctrl_fd_index {
47 ELOOP_CTRL_FD_READ = 0,
48 ELOOP_CTRL_FD_WRITE,
49
50 ELOOP_CTRL_FD_BUTT
51 };
52 #endif
53
54 struct eloop_sock {
55 int sock;
56 void *eloop_data;
57 void *user_data;
58 eloop_sock_handler handler;
59 WPA_TRACE_REF(eloop);
60 WPA_TRACE_REF(user);
61 WPA_TRACE_INFO
62 };
63
64 struct eloop_timeout {
65 struct dl_list list;
66 struct os_reltime time;
67 void *eloop_data;
68 void *user_data;
69 eloop_timeout_handler handler;
70 WPA_TRACE_REF(eloop);
71 WPA_TRACE_REF(user);
72 WPA_TRACE_INFO
73 };
74
75 struct eloop_signal {
76 int sig;
77 void *user_data;
78 eloop_signal_handler handler;
79 int signaled;
80 };
81
82 struct eloop_sock_table {
83 int count;
84 struct eloop_sock *table;
85 eloop_event_type type;
86 int changed;
87 };
88
89 struct eloop_data {
90 int max_sock;
91
92 int count; /* sum of all table counts */
93 #ifdef CONFIG_ELOOP_POLL
94 int max_pollfd_map; /* number of pollfds_map currently allocated */
95 int max_poll_fds; /* number of pollfds currently allocated */
96 struct pollfd *pollfds;
97 struct pollfd **pollfds_map;
98 #endif /* CONFIG_ELOOP_POLL */
99 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
100 int max_fd;
101 struct eloop_sock *fd_table;
102 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
103 #ifdef CONFIG_ELOOP_EPOLL
104 int epollfd;
105 int epoll_max_event_num;
106 struct epoll_event *epoll_events;
107 #endif /* CONFIG_ELOOP_EPOLL */
108 #ifdef CONFIG_ELOOP_KQUEUE
109 int kqueuefd;
110 int kqueue_nevents;
111 struct kevent *kqueue_events;
112 #endif /* CONFIG_ELOOP_KQUEUE */
113 struct eloop_sock_table readers;
114 struct eloop_sock_table writers;
115 struct eloop_sock_table exceptions;
116
117 struct dl_list timeout;
118 #ifdef CONFIG_DRIVER_HDF
119 int ctrl_fd[ELOOP_CTRL_FD_BUTT];
120 #endif
121 int signal_count;
122 struct eloop_signal *signals;
123 int signaled;
124 int pending_terminate;
125
126 int terminate;
127 };
128
129 static struct eloop_data eloop;
130 #ifdef CONFIG_DRIVER_HDF
131 static pthread_mutex_t lock;
132 #endif
133 #ifdef WPA_TRACE
134
eloop_sigsegv_handler(int sig)135 static void eloop_sigsegv_handler(int sig)
136 {
137 wpa_trace_show("eloop SIGSEGV");
138 abort();
139 }
140
eloop_trace_sock_add_ref(struct eloop_sock_table * table)141 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
142 {
143 int i;
144 if (table == NULL || table->table == NULL)
145 return;
146 for (i = 0; i < table->count; i++) {
147 wpa_trace_add_ref(&table->table[i], eloop,
148 table->table[i].eloop_data);
149 wpa_trace_add_ref(&table->table[i], user,
150 table->table[i].user_data);
151 }
152 }
153
154
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)155 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
156 {
157 int i;
158 if (table == NULL || table->table == NULL)
159 return;
160 for (i = 0; i < table->count; i++) {
161 wpa_trace_remove_ref(&table->table[i], eloop,
162 table->table[i].eloop_data);
163 wpa_trace_remove_ref(&table->table[i], user,
164 table->table[i].user_data);
165 }
166 }
167
168 #else /* WPA_TRACE */
169
170 #define eloop_trace_sock_add_ref(table) do { } while (0)
171 #define eloop_trace_sock_remove_ref(table) do { } while (0)
172
173 #endif /* WPA_TRACE */
174 #ifdef CONFIG_DRIVER_HDF
eloop_ctrl_read_handler(int sock,void * eloop_ctx,void * sock_ctx)175 static void eloop_ctrl_read_handler(int sock, void *eloop_ctx, void *sock_ctx)
176 {
177 int8_t buf;
178
179 (void)eloop_ctx;
180 (void)sock_ctx;
181 if (sock != eloop.ctrl_fd[ELOOP_CTRL_FD_READ]) {
182 wpa_printf(MSG_ERROR, "%s: socket is mismatched.", __func__);
183 return;
184 }
185
186 if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
187 read(eloop.ctrl_fd[ELOOP_CTRL_FD_READ], &buf, 1);
188 } else {
189 wpa_printf(MSG_ERROR, "%s: pipe read end was closed", __func__);
190 }
191 }
192
eloop_ctrl_init()193 static void eloop_ctrl_init()
194 {
195 int ret;
196
197 ret = pipe(eloop.ctrl_fd);
198 if (ret != 0) {
199 wpa_printf(MSG_ERROR, "%s: pipe failed: %s", __func__, strerror(errno));
200 return;
201 }
202 eloop_register_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ],
203 eloop_ctrl_read_handler, NULL, NULL);
204
205 wpa_printf(MSG_INFO, "eloop_ctrl_init: %d", ret);
206 }
207
eloop_ctrl_deinit()208 static void eloop_ctrl_deinit()
209 {
210 if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
211 eloop_unregister_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
212 }
213 close(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
214 close(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE]);
215 eloop.ctrl_fd[ELOOP_CTRL_FD_READ] = -1;
216 eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] = -1;
217
218 wpa_printf(MSG_INFO, "eloop_ctrl_deinit done");
219 }
220
eloop_wakeup()221 static int eloop_wakeup()
222 {
223 int ret = -1;
224 uint8_t buf = '0'; // no meaning
225
226 if (eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] != -1) {
227 ret = write(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE], &buf, 1);
228 } else {
229 wpa_printf(MSG_ERROR, "%s: pipe write end was closed", __func__);
230 }
231 return ret;
232 }
233 #endif // CONFIG_DRIVER_HDF
234
eloop_init(void)235 int eloop_init(void)
236 {
237 os_memset(&eloop, 0, sizeof(eloop));
238 dl_list_init(&eloop.timeout);
239 #ifdef CONFIG_ELOOP_EPOLL
240 eloop.epollfd = epoll_create1(0);
241 if (eloop.epollfd < 0) {
242 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
243 __func__, strerror(errno));
244 return -1;
245 }
246 #endif /* CONFIG_ELOOP_EPOLL */
247 #ifdef CONFIG_ELOOP_KQUEUE
248 eloop.kqueuefd = kqueue();
249 if (eloop.kqueuefd < 0) {
250 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
251 __func__, strerror(errno));
252 return -1;
253 }
254 #endif /* CONFIG_ELOOP_KQUEUE */
255 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
256 eloop.readers.type = EVENT_TYPE_READ;
257 eloop.writers.type = EVENT_TYPE_WRITE;
258 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
259 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
260 #ifdef WPA_TRACE
261 signal(SIGSEGV, eloop_sigsegv_handler);
262 #endif /* WPA_TRACE */
263 #ifdef CONFIG_DRIVER_HDF
264 eloop_ctrl_init();
265 #endif
266 return 0;
267 }
268
269
270 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)271 static int eloop_sock_queue(int sock, eloop_event_type type)
272 {
273 struct epoll_event ev;
274
275 os_memset(&ev, 0, sizeof(ev));
276 switch (type) {
277 case EVENT_TYPE_READ:
278 ev.events = EPOLLIN;
279 break;
280 case EVENT_TYPE_WRITE:
281 ev.events = EPOLLOUT;
282 break;
283 /*
284 * Exceptions are always checked when using epoll, but I suppose it's
285 * possible that someone registered a socket *only* for exception
286 * handling.
287 */
288 case EVENT_TYPE_EXCEPTION:
289 ev.events = EPOLLERR | EPOLLHUP;
290 break;
291 }
292 ev.data.fd = sock;
293 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
294 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
295 __func__, sock, strerror(errno));
296 return -1;
297 }
298 return 0;
299 }
300 #endif /* CONFIG_ELOOP_EPOLL */
301
302
303 #ifdef CONFIG_ELOOP_KQUEUE
304
event_type_kevent_filter(eloop_event_type type)305 static short event_type_kevent_filter(eloop_event_type type)
306 {
307 switch (type) {
308 case EVENT_TYPE_READ:
309 return EVFILT_READ;
310 case EVENT_TYPE_WRITE:
311 return EVFILT_WRITE;
312 default:
313 return 0;
314 }
315 }
316
317
eloop_sock_queue(int sock,eloop_event_type type)318 static int eloop_sock_queue(int sock, eloop_event_type type)
319 {
320 struct kevent ke;
321
322 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
323 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
324 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
325 __func__, sock, strerror(errno));
326 return -1;
327 }
328 return 0;
329 }
330
331 #endif /* CONFIG_ELOOP_KQUEUE */
332
333
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)334 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
335 int sock, eloop_sock_handler handler,
336 void *eloop_data, void *user_data)
337 {
338 #ifdef CONFIG_ELOOP_EPOLL
339 struct epoll_event *temp_events;
340 #endif /* CONFIG_ELOOP_EPOLL */
341 #ifdef CONFIG_ELOOP_KQUEUE
342 struct kevent *temp_events;
343 #endif /* CONFIG_ELOOP_EPOLL */
344 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
345 struct eloop_sock *temp_table;
346 int next;
347 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
348 struct eloop_sock *tmp;
349 int new_max_sock;
350
351 if (sock > eloop.max_sock)
352 new_max_sock = sock;
353 else
354 new_max_sock = eloop.max_sock;
355
356 if (table == NULL)
357 return -1;
358
359 #ifdef CONFIG_ELOOP_POLL
360 if (new_max_sock >= eloop.max_pollfd_map) {
361 struct pollfd **nmap;
362 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
363 sizeof(struct pollfd *));
364 if (nmap == NULL)
365 return -1;
366
367 eloop.max_pollfd_map = new_max_sock + 50;
368 eloop.pollfds_map = nmap;
369 }
370
371 if (eloop.count + 1 > eloop.max_poll_fds) {
372 struct pollfd *n;
373 int nmax = eloop.count + 1 + 50;
374 n = os_realloc_array(eloop.pollfds, nmax,
375 sizeof(struct pollfd));
376 if (n == NULL)
377 return -1;
378
379 eloop.max_poll_fds = nmax;
380 eloop.pollfds = n;
381 }
382 #endif /* CONFIG_ELOOP_POLL */
383 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
384 if (new_max_sock >= eloop.max_fd) {
385 next = new_max_sock + 16;
386 temp_table = os_realloc_array(eloop.fd_table, next,
387 sizeof(struct eloop_sock));
388 if (temp_table == NULL)
389 return -1;
390
391 eloop.max_fd = next;
392 eloop.fd_table = temp_table;
393 }
394 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
395
396 #ifdef CONFIG_ELOOP_EPOLL
397 if (eloop.count + 1 > eloop.epoll_max_event_num) {
398 next = eloop.epoll_max_event_num == 0 ? 8 :
399 eloop.epoll_max_event_num * 2;
400 temp_events = os_realloc_array(eloop.epoll_events, next,
401 sizeof(struct epoll_event));
402 if (temp_events == NULL) {
403 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
404 __func__, strerror(errno));
405 return -1;
406 }
407
408 eloop.epoll_max_event_num = next;
409 eloop.epoll_events = temp_events;
410 }
411 #endif /* CONFIG_ELOOP_EPOLL */
412 #ifdef CONFIG_ELOOP_KQUEUE
413 if (eloop.count + 1 > eloop.kqueue_nevents) {
414 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
415 temp_events = os_malloc(next * sizeof(*temp_events));
416 if (!temp_events) {
417 wpa_printf(MSG_ERROR,
418 "%s: malloc for kqueue failed: %s",
419 __func__, strerror(errno));
420 return -1;
421 }
422
423 os_free(eloop.kqueue_events);
424 eloop.kqueue_events = temp_events;
425 eloop.kqueue_nevents = next;
426 }
427 #endif /* CONFIG_ELOOP_KQUEUE */
428
429 eloop_trace_sock_remove_ref(table);
430 tmp = os_realloc_array(table->table, table->count + 1,
431 sizeof(struct eloop_sock));
432 if (tmp == NULL) {
433 eloop_trace_sock_add_ref(table);
434 return -1;
435 }
436
437 tmp[table->count].sock = sock;
438 tmp[table->count].eloop_data = eloop_data;
439 tmp[table->count].user_data = user_data;
440 tmp[table->count].handler = handler;
441 wpa_trace_record(&tmp[table->count]);
442 table->count++;
443 table->table = tmp;
444 eloop.max_sock = new_max_sock;
445 eloop.count++;
446 table->changed = 1;
447 eloop_trace_sock_add_ref(table);
448
449 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
450 if (eloop_sock_queue(sock, table->type) < 0)
451 return -1;
452 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
453 sizeof(struct eloop_sock));
454 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
455 return 0;
456 }
457
458
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)459 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
460 int sock)
461 {
462 #ifdef CONFIG_ELOOP_KQUEUE
463 struct kevent ke;
464 #endif /* CONFIG_ELOOP_KQUEUE */
465 int i;
466
467 if (table == NULL || table->table == NULL || table->count == 0)
468 return;
469
470 for (i = 0; i < table->count; i++) {
471 if (table->table[i].sock == sock)
472 break;
473 }
474 if (i == table->count)
475 return;
476 eloop_trace_sock_remove_ref(table);
477 if (i != table->count - 1) {
478 os_memmove(&table->table[i], &table->table[i + 1],
479 (table->count - i - 1) *
480 sizeof(struct eloop_sock));
481 }
482 table->count--;
483 eloop.count--;
484 table->changed = 1;
485 eloop_trace_sock_add_ref(table);
486 #ifdef CONFIG_ELOOP_EPOLL
487 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
488 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
489 __func__, sock, strerror(errno));
490 return;
491 }
492 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
493 #endif /* CONFIG_ELOOP_EPOLL */
494 #ifdef CONFIG_ELOOP_KQUEUE
495 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
496 0, 0);
497 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
498 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
499 __func__, sock, strerror(errno));
500 return;
501 }
502 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
503 #endif /* CONFIG_ELOOP_KQUEUE */
504 }
505
506
507 #ifdef CONFIG_ELOOP_POLL
508
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)509 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
510 {
511 if (fd < mx && fd >= 0)
512 return pollfds_map[fd];
513 return NULL;
514 }
515
516
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)517 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
518 struct eloop_sock_table *writers,
519 struct eloop_sock_table *exceptions,
520 struct pollfd *pollfds,
521 struct pollfd **pollfds_map,
522 int max_pollfd_map)
523 {
524 int i;
525 int nxt = 0;
526 int fd;
527 struct pollfd *pfd;
528
529 /* Clear pollfd lookup map. It will be re-populated below. */
530 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
531
532 if (readers && readers->table) {
533 for (i = 0; i < readers->count; i++) {
534 fd = readers->table[i].sock;
535 assert(fd >= 0 && fd < max_pollfd_map);
536 pollfds[nxt].fd = fd;
537 pollfds[nxt].events = POLLIN;
538 pollfds[nxt].revents = 0;
539 pollfds_map[fd] = &(pollfds[nxt]);
540 nxt++;
541 }
542 }
543
544 if (writers && writers->table) {
545 for (i = 0; i < writers->count; i++) {
546 /*
547 * See if we already added this descriptor, update it
548 * if so.
549 */
550 fd = writers->table[i].sock;
551 assert(fd >= 0 && fd < max_pollfd_map);
552 pfd = pollfds_map[fd];
553 if (!pfd) {
554 pfd = &(pollfds[nxt]);
555 pfd->events = 0;
556 pfd->fd = fd;
557 pollfds[i].revents = 0;
558 pollfds_map[fd] = pfd;
559 nxt++;
560 }
561 pfd->events |= POLLOUT;
562 }
563 }
564
565 /*
566 * Exceptions are always checked when using poll, but I suppose it's
567 * possible that someone registered a socket *only* for exception
568 * handling. Set the POLLIN bit in this case.
569 */
570 if (exceptions && exceptions->table) {
571 for (i = 0; i < exceptions->count; i++) {
572 /*
573 * See if we already added this descriptor, just use it
574 * if so.
575 */
576 fd = exceptions->table[i].sock;
577 assert(fd >= 0 && fd < max_pollfd_map);
578 pfd = pollfds_map[fd];
579 if (!pfd) {
580 pfd = &(pollfds[nxt]);
581 pfd->events = POLLIN;
582 pfd->fd = fd;
583 pollfds[i].revents = 0;
584 pollfds_map[fd] = pfd;
585 nxt++;
586 }
587 }
588 }
589
590 return nxt;
591 }
592
593
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)594 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
595 struct pollfd **pollfds_map,
596 int max_pollfd_map,
597 short int revents)
598 {
599 int i;
600 struct pollfd *pfd;
601
602 if (!table || !table->table)
603 return 0;
604
605 table->changed = 0;
606 for (i = 0; i < table->count; i++) {
607 pfd = find_pollfd(pollfds_map, table->table[i].sock,
608 max_pollfd_map);
609 if (!pfd)
610 continue;
611
612 if (!(pfd->revents & revents))
613 continue;
614
615 table->table[i].handler(table->table[i].sock,
616 table->table[i].eloop_data,
617 table->table[i].user_data);
618 if (table->changed)
619 return 1;
620 }
621
622 return 0;
623 }
624
625
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)626 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
627 struct eloop_sock_table *writers,
628 struct eloop_sock_table *exceptions,
629 struct pollfd **pollfds_map,
630 int max_pollfd_map)
631 {
632 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
633 max_pollfd_map, POLLIN | POLLERR |
634 POLLHUP))
635 return; /* pollfds may be invalid at this point */
636
637 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
638 max_pollfd_map, POLLOUT))
639 return; /* pollfds may be invalid at this point */
640
641 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
642 max_pollfd_map, POLLERR | POLLHUP);
643 }
644
645 #endif /* CONFIG_ELOOP_POLL */
646
647 #ifdef CONFIG_ELOOP_SELECT
648
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)649 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
650 fd_set *fds)
651 {
652 int i;
653
654 FD_ZERO(fds);
655
656 if (table->table == NULL)
657 return;
658
659 for (i = 0; i < table->count; i++) {
660 assert(table->table[i].sock >= 0);
661 FD_SET(table->table[i].sock, fds);
662 }
663 }
664
665
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)666 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
667 fd_set *fds)
668 {
669 int i;
670
671 if (table == NULL || table->table == NULL)
672 return;
673
674 table->changed = 0;
675 for (i = 0; i < table->count; i++) {
676 if (FD_ISSET(table->table[i].sock, fds)) {
677 table->table[i].handler(table->table[i].sock,
678 table->table[i].eloop_data,
679 table->table[i].user_data);
680 if (table->changed)
681 break;
682 }
683 }
684 }
685
686 #endif /* CONFIG_ELOOP_SELECT */
687
688
689 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)690 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
691 {
692 struct eloop_sock *table;
693 int i;
694
695 for (i = 0; i < nfds; i++) {
696 table = &eloop.fd_table[events[i].data.fd];
697 if (table->handler == NULL)
698 continue;
699 table->handler(table->sock, table->eloop_data,
700 table->user_data);
701 if (eloop.readers.changed ||
702 eloop.writers.changed ||
703 eloop.exceptions.changed)
704 break;
705 }
706 }
707 #endif /* CONFIG_ELOOP_EPOLL */
708
709
710 #ifdef CONFIG_ELOOP_KQUEUE
711
eloop_sock_table_dispatch(struct kevent * events,int nfds)712 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
713 {
714 struct eloop_sock *table;
715 int i;
716
717 for (i = 0; i < nfds; i++) {
718 table = &eloop.fd_table[events[i].ident];
719 if (table->handler == NULL)
720 continue;
721 table->handler(table->sock, table->eloop_data,
722 table->user_data);
723 if (eloop.readers.changed ||
724 eloop.writers.changed ||
725 eloop.exceptions.changed)
726 break;
727 }
728 }
729
730
eloop_sock_table_requeue(struct eloop_sock_table * table)731 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
732 {
733 int i, r;
734
735 r = 0;
736 for (i = 0; i < table->count && table->table; i++) {
737 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
738 r = -1;
739 }
740 return r;
741 }
742
743 #endif /* CONFIG_ELOOP_KQUEUE */
744
745
eloop_sock_requeue(void)746 int eloop_sock_requeue(void)
747 {
748 int r = 0;
749
750 #ifdef CONFIG_ELOOP_KQUEUE
751 close(eloop.kqueuefd);
752 eloop.kqueuefd = kqueue();
753 if (eloop.kqueuefd < 0) {
754 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
755 __func__, strerror(errno));
756 return -1;
757 }
758
759 if (eloop_sock_table_requeue(&eloop.readers) < 0)
760 r = -1;
761 if (eloop_sock_table_requeue(&eloop.writers) < 0)
762 r = -1;
763 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
764 r = -1;
765 #endif /* CONFIG_ELOOP_KQUEUE */
766
767 return r;
768 }
769
770
eloop_sock_table_destroy(struct eloop_sock_table * table)771 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
772 {
773 if (table) {
774 int i;
775 for (i = 0; i < table->count && table->table; i++) {
776 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
777 "sock=%d eloop_data=%p user_data=%p "
778 "handler=%p",
779 table->table[i].sock,
780 table->table[i].eloop_data,
781 table->table[i].user_data,
782 table->table[i].handler);
783 wpa_trace_dump_funcname("eloop unregistered socket "
784 "handler",
785 table->table[i].handler);
786 wpa_trace_dump("eloop sock", &table->table[i]);
787 }
788 os_free(table->table);
789 }
790 }
791
792
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)793 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
794 void *eloop_data, void *user_data)
795 {
796 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
797 eloop_data, user_data);
798 }
799
800
eloop_unregister_read_sock(int sock)801 void eloop_unregister_read_sock(int sock)
802 {
803 eloop_unregister_sock(sock, EVENT_TYPE_READ);
804 }
805
806
eloop_get_sock_table(eloop_event_type type)807 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
808 {
809 switch (type) {
810 case EVENT_TYPE_READ:
811 return &eloop.readers;
812 case EVENT_TYPE_WRITE:
813 return &eloop.writers;
814 case EVENT_TYPE_EXCEPTION:
815 return &eloop.exceptions;
816 }
817
818 return NULL;
819 }
820
821
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)822 int eloop_register_sock(int sock, eloop_event_type type,
823 eloop_sock_handler handler,
824 void *eloop_data, void *user_data)
825 {
826 struct eloop_sock_table *table;
827
828 assert(sock >= 0);
829 table = eloop_get_sock_table(type);
830 return eloop_sock_table_add_sock(table, sock, handler,
831 eloop_data, user_data);
832 }
833
834
eloop_unregister_sock(int sock,eloop_event_type type)835 void eloop_unregister_sock(int sock, eloop_event_type type)
836 {
837 struct eloop_sock_table *table;
838
839 table = eloop_get_sock_table(type);
840 eloop_sock_table_remove_sock(table, sock);
841 }
842
843
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)844 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
845 eloop_timeout_handler handler,
846 void *eloop_data, void *user_data)
847 {
848 struct eloop_timeout *timeout, *tmp;
849 os_time_t now_sec;
850
851 timeout = os_zalloc(sizeof(*timeout));
852 if (timeout == NULL)
853 return -1;
854 if (os_get_reltime(&timeout->time) < 0) {
855 os_free(timeout);
856 return -1;
857 }
858 now_sec = timeout->time.sec;
859 timeout->time.sec += secs;
860 if (timeout->time.sec < now_sec) {
861 /*
862 * Integer overflow - assume long enough timeout to be assumed
863 * to be infinite, i.e., the timeout would never happen.
864 */
865 wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
866 "ever happen - ignore it", secs);
867 os_free(timeout);
868 return 0;
869 }
870 timeout->time.usec += usecs;
871 while (timeout->time.usec >= 1000000) {
872 timeout->time.sec++;
873 timeout->time.usec -= 1000000;
874 }
875 timeout->eloop_data = eloop_data;
876 timeout->user_data = user_data;
877 timeout->handler = handler;
878 wpa_trace_add_ref(timeout, eloop, eloop_data);
879 wpa_trace_add_ref(timeout, user, user_data);
880 wpa_trace_record(timeout);
881 #ifdef CONFIG_DRIVER_HDF
882 pthread_mutex_lock(&lock);
883 #endif
884 /* Maintain timeouts in order of increasing time */
885 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
886 if (os_reltime_before(&timeout->time, &tmp->time)) {
887 dl_list_add(tmp->list.prev, &timeout->list);
888 #ifdef CONFIG_DRIVER_HDF
889 (void)eloop_wakeup();
890 pthread_mutex_unlock(&lock);
891 #endif
892 return 0;
893 }
894 }
895 dl_list_add_tail(&eloop.timeout, &timeout->list);
896 #ifdef CONFIG_DRIVER_HDF
897 pthread_mutex_unlock(&lock);
898 (void)eloop_wakeup();
899 #endif
900
901 return 0;
902 }
903
904
eloop_remove_timeout(struct eloop_timeout * timeout)905 static void eloop_remove_timeout(struct eloop_timeout *timeout)
906 {
907 #ifdef CONFIG_DRIVER_HDF
908 pthread_mutex_lock(&lock);
909 #endif
910 dl_list_del(&timeout->list);
911 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
912 wpa_trace_remove_ref(timeout, user, timeout->user_data);
913 os_free(timeout);
914 #ifdef CONFIG_DRIVER_HDF
915 pthread_mutex_unlock(&lock);
916 #endif
917 }
918
919
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)920 int eloop_cancel_timeout(eloop_timeout_handler handler,
921 void *eloop_data, void *user_data)
922 {
923 struct eloop_timeout *timeout, *prev;
924 int removed = 0;
925
926 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
927 struct eloop_timeout, list) {
928 if (timeout->handler == handler &&
929 (timeout->eloop_data == eloop_data ||
930 eloop_data == ELOOP_ALL_CTX) &&
931 (timeout->user_data == user_data ||
932 user_data == ELOOP_ALL_CTX)) {
933 eloop_remove_timeout(timeout);
934 removed++;
935 }
936 }
937
938 return removed;
939 }
940
941
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)942 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
943 void *eloop_data, void *user_data,
944 struct os_reltime *remaining)
945 {
946 struct eloop_timeout *timeout, *prev;
947 int removed = 0;
948 struct os_reltime now;
949
950 os_get_reltime(&now);
951 remaining->sec = remaining->usec = 0;
952
953 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
954 struct eloop_timeout, list) {
955 if (timeout->handler == handler &&
956 (timeout->eloop_data == eloop_data) &&
957 (timeout->user_data == user_data)) {
958 removed = 1;
959 if (os_reltime_before(&now, &timeout->time))
960 os_reltime_sub(&timeout->time, &now, remaining);
961 eloop_remove_timeout(timeout);
962 break;
963 }
964 }
965 return removed;
966 }
967
968
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)969 int eloop_is_timeout_registered(eloop_timeout_handler handler,
970 void *eloop_data, void *user_data)
971 {
972 struct eloop_timeout *tmp;
973 #ifdef CONFIG_DRIVER_HDF
974 pthread_mutex_lock(&lock);
975 #endif
976 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
977 if (tmp->handler == handler &&
978 tmp->eloop_data == eloop_data &&
979 tmp->user_data == user_data) {
980 #ifdef CONFIG_DRIVER_HDF
981 pthread_mutex_unlock(&lock);
982 #endif
983 return 1;
984 }
985 }
986 #ifdef CONFIG_DRIVER_HDF
987 pthread_mutex_unlock(&lock);
988 #endif
989 return 0;
990 }
991
992
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)993 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
994 eloop_timeout_handler handler, void *eloop_data,
995 void *user_data)
996 {
997 struct os_reltime now, requested, remaining;
998 struct eloop_timeout *tmp;
999
1000 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
1001 if (tmp->handler == handler &&
1002 tmp->eloop_data == eloop_data &&
1003 tmp->user_data == user_data) {
1004 requested.sec = req_secs;
1005 requested.usec = req_usecs;
1006 os_get_reltime(&now);
1007 os_reltime_sub(&tmp->time, &now, &remaining);
1008 if (os_reltime_before(&requested, &remaining)) {
1009 eloop_cancel_timeout(handler, eloop_data,
1010 user_data);
1011 eloop_register_timeout(requested.sec,
1012 requested.usec,
1013 handler, eloop_data,
1014 user_data);
1015 return 1;
1016 }
1017 return 0;
1018 }
1019 }
1020
1021 return -1;
1022 }
1023
1024
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)1025 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
1026 eloop_timeout_handler handler, void *eloop_data,
1027 void *user_data)
1028 {
1029 struct os_reltime now, requested, remaining;
1030 struct eloop_timeout *tmp;
1031
1032 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
1033 if (tmp->handler == handler &&
1034 tmp->eloop_data == eloop_data &&
1035 tmp->user_data == user_data) {
1036 requested.sec = req_secs;
1037 requested.usec = req_usecs;
1038 os_get_reltime(&now);
1039 os_reltime_sub(&tmp->time, &now, &remaining);
1040 if (os_reltime_before(&remaining, &requested)) {
1041 eloop_cancel_timeout(handler, eloop_data,
1042 user_data);
1043 eloop_register_timeout(requested.sec,
1044 requested.usec,
1045 handler, eloop_data,
1046 user_data);
1047 return 1;
1048 }
1049 return 0;
1050 }
1051 }
1052
1053 return -1;
1054 }
1055
1056
1057 #ifndef CONFIG_NATIVE_WINDOWS
eloop_handle_alarm(int sig)1058 static void eloop_handle_alarm(int sig)
1059 {
1060 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
1061 "two seconds. Looks like there\n"
1062 "is a bug that ends up in a busy loop that "
1063 "prevents clean shutdown.\n"
1064 "Killing program forcefully.\n");
1065 exit(1);
1066 }
1067 #endif /* CONFIG_NATIVE_WINDOWS */
1068
1069
eloop_handle_signal(int sig)1070 static void eloop_handle_signal(int sig)
1071 {
1072 int i;
1073
1074 #ifndef CONFIG_NATIVE_WINDOWS
1075 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
1076 /* Use SIGALRM to break out from potential busy loops that
1077 * would not allow the program to be killed. */
1078 eloop.pending_terminate = 1;
1079 signal(SIGALRM, eloop_handle_alarm);
1080 alarm(2);
1081 }
1082 #endif /* CONFIG_NATIVE_WINDOWS */
1083
1084 eloop.signaled++;
1085 for (i = 0; i < eloop.signal_count; i++) {
1086 if (eloop.signals[i].sig == sig) {
1087 eloop.signals[i].signaled++;
1088 break;
1089 }
1090 }
1091 #ifdef CONFIG_DRIVER_HDF
1092 (void)eloop_wakeup();
1093 #endif
1094 }
1095
1096
eloop_process_pending_signals(void)1097 static void eloop_process_pending_signals(void)
1098 {
1099 int i;
1100
1101 if (eloop.signaled == 0)
1102 return;
1103 eloop.signaled = 0;
1104
1105 if (eloop.pending_terminate) {
1106 #ifndef CONFIG_NATIVE_WINDOWS
1107 alarm(0);
1108 #endif /* CONFIG_NATIVE_WINDOWS */
1109 eloop.pending_terminate = 0;
1110 }
1111
1112 for (i = 0; i < eloop.signal_count; i++) {
1113 if (eloop.signals[i].signaled) {
1114 eloop.signals[i].signaled = 0;
1115 eloop.signals[i].handler(eloop.signals[i].sig,
1116 eloop.signals[i].user_data);
1117 }
1118 }
1119 }
1120
1121
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1122 int eloop_register_signal(int sig, eloop_signal_handler handler,
1123 void *user_data)
1124 {
1125 struct eloop_signal *tmp;
1126
1127 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1128 sizeof(struct eloop_signal));
1129 if (tmp == NULL)
1130 return -1;
1131
1132 tmp[eloop.signal_count].sig = sig;
1133 tmp[eloop.signal_count].user_data = user_data;
1134 tmp[eloop.signal_count].handler = handler;
1135 tmp[eloop.signal_count].signaled = 0;
1136 eloop.signal_count++;
1137 eloop.signals = tmp;
1138 signal(sig, eloop_handle_signal);
1139
1140 return 0;
1141 }
1142
1143
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1144 int eloop_register_signal_terminate(eloop_signal_handler handler,
1145 void *user_data)
1146 {
1147 int ret = eloop_register_signal(SIGINT, handler, user_data);
1148 if (ret == 0)
1149 ret = eloop_register_signal(SIGTERM, handler, user_data);
1150 return ret;
1151 }
1152
1153
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1154 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1155 void *user_data)
1156 {
1157 #ifdef CONFIG_NATIVE_WINDOWS
1158 return 0;
1159 #else /* CONFIG_NATIVE_WINDOWS */
1160 return eloop_register_signal(SIGHUP, handler, user_data);
1161 #endif /* CONFIG_NATIVE_WINDOWS */
1162 }
1163
1164
eloop_run(void)1165 void eloop_run(void)
1166 {
1167 #ifdef CONFIG_ELOOP_POLL
1168 int num_poll_fds;
1169 int timeout_ms = 0;
1170 #endif /* CONFIG_ELOOP_POLL */
1171 #ifdef CONFIG_ELOOP_SELECT
1172 fd_set *rfds, *wfds, *efds;
1173 struct timeval _tv;
1174 #endif /* CONFIG_ELOOP_SELECT */
1175 #ifdef CONFIG_ELOOP_EPOLL
1176 int timeout_ms = -1;
1177 #endif /* CONFIG_ELOOP_EPOLL */
1178 #ifdef CONFIG_ELOOP_KQUEUE
1179 struct timespec ts;
1180 #endif /* CONFIG_ELOOP_KQUEUE */
1181 int res;
1182 struct os_reltime tv, now;
1183
1184 #ifdef CONFIG_ELOOP_SELECT
1185 rfds = os_malloc(sizeof(*rfds));
1186 wfds = os_malloc(sizeof(*wfds));
1187 efds = os_malloc(sizeof(*efds));
1188 if (rfds == NULL || wfds == NULL || efds == NULL)
1189 goto out;
1190 #endif /* CONFIG_ELOOP_SELECT */
1191
1192 while (!eloop.terminate &&
1193 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1194 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1195 struct eloop_timeout *timeout;
1196
1197 if (eloop.pending_terminate) {
1198 /*
1199 * This may happen in some corner cases where a signal
1200 * is received during a blocking operation. We need to
1201 * process the pending signals and exit if requested to
1202 * avoid hitting the SIGALRM limit if the blocking
1203 * operation took more than two seconds.
1204 */
1205 eloop_process_pending_signals();
1206 if (eloop.terminate)
1207 break;
1208 }
1209
1210 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1211 list);
1212 if (timeout) {
1213 os_get_reltime(&now);
1214 if (os_reltime_before(&now, &timeout->time))
1215 os_reltime_sub(&timeout->time, &now, &tv);
1216 else
1217 tv.sec = tv.usec = 0;
1218 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1219 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1220 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1221 #ifdef CONFIG_ELOOP_SELECT
1222 _tv.tv_sec = tv.sec;
1223 _tv.tv_usec = tv.usec;
1224 #endif /* CONFIG_ELOOP_SELECT */
1225 #ifdef CONFIG_ELOOP_KQUEUE
1226 ts.tv_sec = tv.sec;
1227 ts.tv_nsec = tv.usec * 1000L;
1228 #endif /* CONFIG_ELOOP_KQUEUE */
1229 }
1230
1231 #ifdef CONFIG_ELOOP_POLL
1232 num_poll_fds = eloop_sock_table_set_fds(
1233 &eloop.readers, &eloop.writers, &eloop.exceptions,
1234 eloop.pollfds, eloop.pollfds_map,
1235 eloop.max_pollfd_map);
1236 res = poll(eloop.pollfds, num_poll_fds,
1237 timeout ? timeout_ms : -1);
1238 #endif /* CONFIG_ELOOP_POLL */
1239 #ifdef CONFIG_ELOOP_SELECT
1240 eloop_sock_table_set_fds(&eloop.readers, rfds);
1241 eloop_sock_table_set_fds(&eloop.writers, wfds);
1242 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1243 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1244 timeout ? &_tv : NULL);
1245 #endif /* CONFIG_ELOOP_SELECT */
1246 #ifdef CONFIG_ELOOP_EPOLL
1247 if (eloop.count == 0) {
1248 res = 0;
1249 } else {
1250 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1251 eloop.count, timeout_ms);
1252 }
1253 #endif /* CONFIG_ELOOP_EPOLL */
1254 #ifdef CONFIG_ELOOP_KQUEUE
1255 if (eloop.count == 0) {
1256 res = 0;
1257 } else {
1258 res = kevent(eloop.kqueuefd, NULL, 0,
1259 eloop.kqueue_events, eloop.kqueue_nevents,
1260 timeout ? &ts : NULL);
1261 }
1262 #endif /* CONFIG_ELOOP_KQUEUE */
1263 if (res < 0 && errno != EINTR && errno != 0) {
1264 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1265 #ifdef CONFIG_ELOOP_POLL
1266 "poll"
1267 #endif /* CONFIG_ELOOP_POLL */
1268 #ifdef CONFIG_ELOOP_SELECT
1269 "select"
1270 #endif /* CONFIG_ELOOP_SELECT */
1271 #ifdef CONFIG_ELOOP_EPOLL
1272 "epoll"
1273 #endif /* CONFIG_ELOOP_EPOLL */
1274 #ifdef CONFIG_ELOOP_KQUEUE
1275 "kqueue"
1276 #endif /* CONFIG_ELOOP_EKQUEUE */
1277
1278 , strerror(errno));
1279 goto out;
1280 }
1281
1282 eloop.readers.changed = 0;
1283 eloop.writers.changed = 0;
1284 eloop.exceptions.changed = 0;
1285
1286 eloop_process_pending_signals();
1287
1288
1289 /* check if some registered timeouts have occurred */
1290 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1291 list);
1292 if (timeout) {
1293 os_get_reltime(&now);
1294 if (!os_reltime_before(&now, &timeout->time)) {
1295 void *eloop_data = timeout->eloop_data;
1296 void *user_data = timeout->user_data;
1297 eloop_timeout_handler handler =
1298 timeout->handler;
1299 eloop_remove_timeout(timeout);
1300 handler(eloop_data, user_data);
1301 }
1302
1303 }
1304
1305 if (res <= 0)
1306 continue;
1307
1308 if (eloop.readers.changed ||
1309 eloop.writers.changed ||
1310 eloop.exceptions.changed) {
1311 /*
1312 * Sockets may have been closed and reopened with the
1313 * same FD in the signal or timeout handlers, so we
1314 * must skip the previous results and check again
1315 * whether any of the currently registered sockets have
1316 * events.
1317 */
1318 continue;
1319 }
1320
1321 #ifdef CONFIG_ELOOP_POLL
1322 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1323 &eloop.exceptions, eloop.pollfds_map,
1324 eloop.max_pollfd_map);
1325 #endif /* CONFIG_ELOOP_POLL */
1326 #ifdef CONFIG_ELOOP_SELECT
1327 eloop_sock_table_dispatch(&eloop.readers, rfds);
1328 eloop_sock_table_dispatch(&eloop.writers, wfds);
1329 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1330 #endif /* CONFIG_ELOOP_SELECT */
1331 #ifdef CONFIG_ELOOP_EPOLL
1332 eloop_sock_table_dispatch(eloop.epoll_events, res);
1333 #endif /* CONFIG_ELOOP_EPOLL */
1334 #ifdef CONFIG_ELOOP_KQUEUE
1335 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1336 #endif /* CONFIG_ELOOP_KQUEUE */
1337 }
1338
1339 eloop.terminate = 0;
1340 out:
1341 #ifdef CONFIG_ELOOP_SELECT
1342 os_free(rfds);
1343 os_free(wfds);
1344 os_free(efds);
1345 #endif /* CONFIG_ELOOP_SELECT */
1346 return;
1347 }
1348
1349
eloop_terminate(void)1350 void eloop_terminate(void)
1351 {
1352 eloop.terminate = 1;
1353 }
1354
1355
eloop_destroy(void)1356 void eloop_destroy(void)
1357 {
1358 struct eloop_timeout *timeout, *prev;
1359 struct os_reltime now;
1360
1361 os_get_reltime(&now);
1362 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1363 struct eloop_timeout, list) {
1364 int sec, usec;
1365 sec = timeout->time.sec - now.sec;
1366 usec = timeout->time.usec - now.usec;
1367 if (timeout->time.usec < now.usec) {
1368 sec--;
1369 usec += 1000000;
1370 }
1371 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1372 "eloop_data=%p user_data=%p handler=%p",
1373 sec, usec, timeout->eloop_data, timeout->user_data,
1374 timeout->handler);
1375 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1376 timeout->handler);
1377 wpa_trace_dump("eloop timeout", timeout);
1378 eloop_remove_timeout(timeout);
1379 }
1380 #ifdef CONFIG_DRIVER_HDF
1381 eloop_ctrl_deinit();
1382 #endif
1383 eloop_sock_table_destroy(&eloop.readers);
1384 eloop_sock_table_destroy(&eloop.writers);
1385 eloop_sock_table_destroy(&eloop.exceptions);
1386 os_free(eloop.signals);
1387
1388 #ifdef CONFIG_ELOOP_POLL
1389 os_free(eloop.pollfds);
1390 os_free(eloop.pollfds_map);
1391 #endif /* CONFIG_ELOOP_POLL */
1392 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1393 os_free(eloop.fd_table);
1394 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1395 #ifdef CONFIG_ELOOP_EPOLL
1396 os_free(eloop.epoll_events);
1397 close(eloop.epollfd);
1398 #endif /* CONFIG_ELOOP_EPOLL */
1399 #ifdef CONFIG_ELOOP_KQUEUE
1400 os_free(eloop.kqueue_events);
1401 close(eloop.kqueuefd);
1402 #endif /* CONFIG_ELOOP_KQUEUE */
1403 }
1404
1405
eloop_terminated(void)1406 int eloop_terminated(void)
1407 {
1408 return eloop.terminate || eloop.pending_terminate;
1409 }
1410
1411
eloop_wait_for_read_sock(int sock)1412 void eloop_wait_for_read_sock(int sock)
1413 {
1414 #ifdef CONFIG_ELOOP_POLL
1415 struct pollfd pfd;
1416
1417 if (sock < 0)
1418 return;
1419
1420 os_memset(&pfd, 0, sizeof(pfd));
1421 pfd.fd = sock;
1422 pfd.events = POLLIN;
1423
1424 poll(&pfd, 1, -1);
1425 #endif /* CONFIG_ELOOP_POLL */
1426 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1427 /*
1428 * We can use epoll() here. But epoll() requres 4 system calls.
1429 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1430 * epoll fd. So select() is better for performance here.
1431 */
1432 fd_set rfds;
1433
1434 if (sock < 0)
1435 return;
1436
1437 FD_ZERO(&rfds);
1438 FD_SET(sock, &rfds);
1439 select(sock + 1, &rfds, NULL, NULL, NULL);
1440 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1441 #ifdef CONFIG_ELOOP_KQUEUE
1442 int kfd;
1443 struct kevent ke1, ke2;
1444
1445 kfd = kqueue();
1446 if (kfd == -1)
1447 return;
1448 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1449 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1450 close(kfd);
1451 #endif /* CONFIG_ELOOP_KQUEUE */
1452 }
1453
1454 #ifdef CONFIG_ELOOP_SELECT
1455 #undef CONFIG_ELOOP_SELECT
1456 #endif /* CONFIG_ELOOP_SELECT */
1457