• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Event loop based on select() loop
3  * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4  *
5  * This software may be distributed under the terms of the BSD license.
6  * See README for more details.
7  */
8 
9 #include "includes.h"
10 #include <assert.h>
11 
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16 
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20 
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24 
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26     !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29 
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33 
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37 
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41 
42 struct eloop_sock {
43 	int sock;
44 	void *eloop_data;
45 	void *user_data;
46 	eloop_sock_handler handler;
47 	WPA_TRACE_REF(eloop);
48 	WPA_TRACE_REF(user);
49 	WPA_TRACE_INFO
50 };
51 
52 struct eloop_timeout {
53 	struct dl_list list;
54 	struct os_reltime time;
55 	void *eloop_data;
56 	void *user_data;
57 	eloop_timeout_handler handler;
58 	WPA_TRACE_REF(eloop);
59 	WPA_TRACE_REF(user);
60 	WPA_TRACE_INFO
61 };
62 
63 struct eloop_signal {
64 	int sig;
65 	void *user_data;
66 	eloop_signal_handler handler;
67 	int signaled;
68 };
69 
70 struct eloop_sock_table {
71 	int count;
72 	struct eloop_sock *table;
73 	eloop_event_type type;
74 	int changed;
75 };
76 
77 struct eloop_data {
78 	int max_sock;
79 
80 	int count; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82 	int max_pollfd_map; /* number of pollfds_map currently allocated */
83 	int max_poll_fds; /* number of pollfds currently allocated */
84 	struct pollfd *pollfds;
85 	struct pollfd **pollfds_map;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88 	int max_fd;
89 	struct eloop_sock *fd_table;
90 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91 #ifdef CONFIG_ELOOP_EPOLL
92 	int epollfd;
93 	int epoll_max_event_num;
94 	struct epoll_event *epoll_events;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
97 	int kqueuefd;
98 	int kqueue_nevents;
99 	struct kevent *kqueue_events;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101 	struct eloop_sock_table readers;
102 	struct eloop_sock_table writers;
103 	struct eloop_sock_table exceptions;
104 
105 	struct dl_list timeout;
106 
107 	int signal_count;
108 	struct eloop_signal *signals;
109 	int signaled;
110 	int pending_terminate;
111 
112 	int terminate;
113 };
114 
115 static struct eloop_data eloop;
116 
117 
118 #ifdef WPA_TRACE
119 
eloop_sigsegv_handler(int sig)120 static void eloop_sigsegv_handler(int sig)
121 {
122 	wpa_trace_show("eloop SIGSEGV");
123 	abort();
124 }
125 
eloop_trace_sock_add_ref(struct eloop_sock_table * table)126 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127 {
128 	int i;
129 	if (table == NULL || table->table == NULL)
130 		return;
131 	for (i = 0; i < table->count; i++) {
132 		wpa_trace_add_ref(&table->table[i], eloop,
133 				  table->table[i].eloop_data);
134 		wpa_trace_add_ref(&table->table[i], user,
135 				  table->table[i].user_data);
136 	}
137 }
138 
139 
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)140 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
141 {
142 	int i;
143 	if (table == NULL || table->table == NULL)
144 		return;
145 	for (i = 0; i < table->count; i++) {
146 		wpa_trace_remove_ref(&table->table[i], eloop,
147 				     table->table[i].eloop_data);
148 		wpa_trace_remove_ref(&table->table[i], user,
149 				     table->table[i].user_data);
150 	}
151 }
152 
153 #else /* WPA_TRACE */
154 
155 #define eloop_trace_sock_add_ref(table) do { } while (0)
156 #define eloop_trace_sock_remove_ref(table) do { } while (0)
157 
158 #endif /* WPA_TRACE */
159 
160 
eloop_init(void)161 int eloop_init(void)
162 {
163 	os_memset(&eloop, 0, sizeof(eloop));
164 	dl_list_init(&eloop.timeout);
165 #ifdef CONFIG_ELOOP_EPOLL
166 	eloop.epollfd = epoll_create1(0);
167 	if (eloop.epollfd < 0) {
168 		wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
169 			   __func__, strerror(errno));
170 		return -1;
171 	}
172 #endif /* CONFIG_ELOOP_EPOLL */
173 #ifdef CONFIG_ELOOP_KQUEUE
174 	eloop.kqueuefd = kqueue();
175 	if (eloop.kqueuefd < 0) {
176 		wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
177 			   __func__, strerror(errno));
178 		return -1;
179 	}
180 #endif /* CONFIG_ELOOP_KQUEUE */
181 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
182 	eloop.readers.type = EVENT_TYPE_READ;
183 	eloop.writers.type = EVENT_TYPE_WRITE;
184 	eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
185 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
186 #ifdef WPA_TRACE
187 	signal(SIGSEGV, eloop_sigsegv_handler);
188 #endif /* WPA_TRACE */
189 	return 0;
190 }
191 
192 
193 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)194 static int eloop_sock_queue(int sock, eloop_event_type type)
195 {
196 	struct epoll_event ev;
197 
198 	os_memset(&ev, 0, sizeof(ev));
199 	switch (type) {
200 	case EVENT_TYPE_READ:
201 		ev.events = EPOLLIN;
202 		break;
203 	case EVENT_TYPE_WRITE:
204 		ev.events = EPOLLOUT;
205 		break;
206 	/*
207 	 * Exceptions are always checked when using epoll, but I suppose it's
208 	 * possible that someone registered a socket *only* for exception
209 	 * handling.
210 	 */
211 	case EVENT_TYPE_EXCEPTION:
212 		ev.events = EPOLLERR | EPOLLHUP;
213 		break;
214 	}
215 	ev.data.fd = sock;
216 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
217 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
218 			   __func__, sock, strerror(errno));
219 		return -1;
220 	}
221 	return 0;
222 }
223 #endif /* CONFIG_ELOOP_EPOLL */
224 
225 
226 #ifdef CONFIG_ELOOP_KQUEUE
eloop_sock_queue(int sock,eloop_event_type type)227 static int eloop_sock_queue(int sock, eloop_event_type type)
228 {
229 	int filter;
230 	struct kevent ke;
231 
232 	switch (type) {
233 	case EVENT_TYPE_READ:
234 		filter = EVFILT_READ;
235 		break;
236 	case EVENT_TYPE_WRITE:
237 		filter = EVFILT_WRITE;
238 		break;
239 	default:
240 		filter = 0;
241 	}
242 	EV_SET(&ke, sock, filter, EV_ADD, 0, 0, 0);
243 	if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
244 		wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
245 			   __func__, sock, strerror(errno));
246 		return -1;
247 	}
248 	return 0;
249 }
250 #endif /* CONFIG_ELOOP_KQUEUE */
251 
252 
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)253 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
254                                      int sock, eloop_sock_handler handler,
255                                      void *eloop_data, void *user_data)
256 {
257 #ifdef CONFIG_ELOOP_EPOLL
258 	struct epoll_event *temp_events;
259 #endif /* CONFIG_ELOOP_EPOLL */
260 #ifdef CONFIG_ELOOP_KQUEUE
261 	struct kevent *temp_events;
262 #endif /* CONFIG_ELOOP_EPOLL */
263 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
264 	struct eloop_sock *temp_table;
265 	int next;
266 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
267 	struct eloop_sock *tmp;
268 	int new_max_sock;
269 
270 	if (sock > eloop.max_sock)
271 		new_max_sock = sock;
272 	else
273 		new_max_sock = eloop.max_sock;
274 
275 	if (table == NULL)
276 		return -1;
277 
278 #ifdef CONFIG_ELOOP_POLL
279 	if (new_max_sock >= eloop.max_pollfd_map) {
280 		struct pollfd **nmap;
281 		nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
282 					sizeof(struct pollfd *));
283 		if (nmap == NULL)
284 			return -1;
285 
286 		eloop.max_pollfd_map = new_max_sock + 50;
287 		eloop.pollfds_map = nmap;
288 	}
289 
290 	if (eloop.count + 1 > eloop.max_poll_fds) {
291 		struct pollfd *n;
292 		int nmax = eloop.count + 1 + 50;
293 		n = os_realloc_array(eloop.pollfds, nmax,
294 				     sizeof(struct pollfd));
295 		if (n == NULL)
296 			return -1;
297 
298 		eloop.max_poll_fds = nmax;
299 		eloop.pollfds = n;
300 	}
301 #endif /* CONFIG_ELOOP_POLL */
302 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
303 	if (new_max_sock >= eloop.max_fd) {
304 		next = eloop.max_fd == 0 ? 16 : eloop.max_fd * 2;
305 		temp_table = os_realloc_array(eloop.fd_table, next,
306 					      sizeof(struct eloop_sock));
307 		if (temp_table == NULL)
308 			return -1;
309 
310 		eloop.max_fd = next;
311 		eloop.fd_table = temp_table;
312 	}
313 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
314 
315 #ifdef CONFIG_ELOOP_EPOLL
316 	if (eloop.count + 1 > eloop.epoll_max_event_num) {
317 		next = eloop.epoll_max_event_num == 0 ? 8 :
318 			eloop.epoll_max_event_num * 2;
319 		temp_events = os_realloc_array(eloop.epoll_events, next,
320 					       sizeof(struct epoll_event));
321 		if (temp_events == NULL) {
322 			wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
323 				   __func__, strerror(errno));
324 			return -1;
325 		}
326 
327 		eloop.epoll_max_event_num = next;
328 		eloop.epoll_events = temp_events;
329 	}
330 #endif /* CONFIG_ELOOP_EPOLL */
331 #ifdef CONFIG_ELOOP_KQUEUE
332 	if (eloop.count + 1 > eloop.kqueue_nevents) {
333 		next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
334 		temp_events = os_malloc(next * sizeof(*temp_events));
335 		if (!temp_events) {
336 			wpa_printf(MSG_ERROR,
337 				   "%s: malloc for kqueue failed: %s",
338 				   __func__, strerror(errno));
339 			return -1;
340 		}
341 
342 		os_free(eloop.kqueue_events);
343 		eloop.kqueue_events = temp_events;
344 		eloop.kqueue_nevents = next;
345 	}
346 #endif /* CONFIG_ELOOP_KQUEUE */
347 
348 	eloop_trace_sock_remove_ref(table);
349 	tmp = os_realloc_array(table->table, table->count + 1,
350 			       sizeof(struct eloop_sock));
351 	if (tmp == NULL) {
352 		eloop_trace_sock_add_ref(table);
353 		return -1;
354 	}
355 
356 	tmp[table->count].sock = sock;
357 	tmp[table->count].eloop_data = eloop_data;
358 	tmp[table->count].user_data = user_data;
359 	tmp[table->count].handler = handler;
360 	wpa_trace_record(&tmp[table->count]);
361 	table->count++;
362 	table->table = tmp;
363 	eloop.max_sock = new_max_sock;
364 	eloop.count++;
365 	table->changed = 1;
366 	eloop_trace_sock_add_ref(table);
367 
368 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
369 	if (eloop_sock_queue(sock, table->type) < 0)
370 		return -1;
371 	os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
372 		  sizeof(struct eloop_sock));
373 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
374 	return 0;
375 }
376 
377 
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)378 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
379                                          int sock)
380 {
381 #ifdef CONFIG_ELOOP_KQUEUE
382 	struct kevent ke;
383 #endif /* CONFIG_ELOOP_KQUEUE */
384 	int i;
385 
386 	if (table == NULL || table->table == NULL || table->count == 0)
387 		return;
388 
389 	for (i = 0; i < table->count; i++) {
390 		if (table->table[i].sock == sock)
391 			break;
392 	}
393 	if (i == table->count)
394 		return;
395 	eloop_trace_sock_remove_ref(table);
396 	if (i != table->count - 1) {
397 		os_memmove(&table->table[i], &table->table[i + 1],
398 			   (table->count - i - 1) *
399 			   sizeof(struct eloop_sock));
400 	}
401 	table->count--;
402 	eloop.count--;
403 	table->changed = 1;
404 	eloop_trace_sock_add_ref(table);
405 #ifdef CONFIG_ELOOP_EPOLL
406 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
407 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
408 			   __func__, sock, strerror(errno));
409 		return;
410 	}
411 	os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
412 #endif /* CONFIG_ELOOP_EPOLL */
413 #ifdef CONFIG_ELOOP_KQUEUE
414 	EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, 0);
415 	if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
416 		wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
417 			   __func__, sock, strerror(errno));
418 		return;
419 	}
420 	os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
421 #endif /* CONFIG_ELOOP_KQUEUE */
422 }
423 
424 
425 #ifdef CONFIG_ELOOP_POLL
426 
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)427 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
428 {
429 	if (fd < mx && fd >= 0)
430 		return pollfds_map[fd];
431 	return NULL;
432 }
433 
434 
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)435 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
436 				    struct eloop_sock_table *writers,
437 				    struct eloop_sock_table *exceptions,
438 				    struct pollfd *pollfds,
439 				    struct pollfd **pollfds_map,
440 				    int max_pollfd_map)
441 {
442 	int i;
443 	int nxt = 0;
444 	int fd;
445 	struct pollfd *pfd;
446 
447 	/* Clear pollfd lookup map. It will be re-populated below. */
448 	os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
449 
450 	if (readers && readers->table) {
451 		for (i = 0; i < readers->count; i++) {
452 			fd = readers->table[i].sock;
453 			assert(fd >= 0 && fd < max_pollfd_map);
454 			pollfds[nxt].fd = fd;
455 			pollfds[nxt].events = POLLIN;
456 			pollfds[nxt].revents = 0;
457 			pollfds_map[fd] = &(pollfds[nxt]);
458 			nxt++;
459 		}
460 	}
461 
462 	if (writers && writers->table) {
463 		for (i = 0; i < writers->count; i++) {
464 			/*
465 			 * See if we already added this descriptor, update it
466 			 * if so.
467 			 */
468 			fd = writers->table[i].sock;
469 			assert(fd >= 0 && fd < max_pollfd_map);
470 			pfd = pollfds_map[fd];
471 			if (!pfd) {
472 				pfd = &(pollfds[nxt]);
473 				pfd->events = 0;
474 				pfd->fd = fd;
475 				pollfds[i].revents = 0;
476 				pollfds_map[fd] = pfd;
477 				nxt++;
478 			}
479 			pfd->events |= POLLOUT;
480 		}
481 	}
482 
483 	/*
484 	 * Exceptions are always checked when using poll, but I suppose it's
485 	 * possible that someone registered a socket *only* for exception
486 	 * handling. Set the POLLIN bit in this case.
487 	 */
488 	if (exceptions && exceptions->table) {
489 		for (i = 0; i < exceptions->count; i++) {
490 			/*
491 			 * See if we already added this descriptor, just use it
492 			 * if so.
493 			 */
494 			fd = exceptions->table[i].sock;
495 			assert(fd >= 0 && fd < max_pollfd_map);
496 			pfd = pollfds_map[fd];
497 			if (!pfd) {
498 				pfd = &(pollfds[nxt]);
499 				pfd->events = POLLIN;
500 				pfd->fd = fd;
501 				pollfds[i].revents = 0;
502 				pollfds_map[fd] = pfd;
503 				nxt++;
504 			}
505 		}
506 	}
507 
508 	return nxt;
509 }
510 
511 
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)512 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
513 					   struct pollfd **pollfds_map,
514 					   int max_pollfd_map,
515 					   short int revents)
516 {
517 	int i;
518 	struct pollfd *pfd;
519 
520 	if (!table || !table->table)
521 		return 0;
522 
523 	table->changed = 0;
524 	for (i = 0; i < table->count; i++) {
525 		pfd = find_pollfd(pollfds_map, table->table[i].sock,
526 				  max_pollfd_map);
527 		if (!pfd)
528 			continue;
529 
530 		if (!(pfd->revents & revents))
531 			continue;
532 
533 		table->table[i].handler(table->table[i].sock,
534 					table->table[i].eloop_data,
535 					table->table[i].user_data);
536 		if (table->changed)
537 			return 1;
538 	}
539 
540 	return 0;
541 }
542 
543 
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)544 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
545 				      struct eloop_sock_table *writers,
546 				      struct eloop_sock_table *exceptions,
547 				      struct pollfd **pollfds_map,
548 				      int max_pollfd_map)
549 {
550 	if (eloop_sock_table_dispatch_table(readers, pollfds_map,
551 					    max_pollfd_map, POLLIN | POLLERR |
552 					    POLLHUP))
553 		return; /* pollfds may be invalid at this point */
554 
555 	if (eloop_sock_table_dispatch_table(writers, pollfds_map,
556 					    max_pollfd_map, POLLOUT))
557 		return; /* pollfds may be invalid at this point */
558 
559 	eloop_sock_table_dispatch_table(exceptions, pollfds_map,
560 					max_pollfd_map, POLLERR | POLLHUP);
561 }
562 
563 #endif /* CONFIG_ELOOP_POLL */
564 
565 #ifdef CONFIG_ELOOP_SELECT
566 
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)567 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
568 				     fd_set *fds)
569 {
570 	int i;
571 
572 	FD_ZERO(fds);
573 
574 	if (table->table == NULL)
575 		return;
576 
577 	for (i = 0; i < table->count; i++) {
578 		assert(table->table[i].sock >= 0);
579 		FD_SET(table->table[i].sock, fds);
580 	}
581 }
582 
583 
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)584 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
585 				      fd_set *fds)
586 {
587 	int i;
588 
589 	if (table == NULL || table->table == NULL)
590 		return;
591 
592 	table->changed = 0;
593 	for (i = 0; i < table->count; i++) {
594 		if (FD_ISSET(table->table[i].sock, fds)) {
595 			table->table[i].handler(table->table[i].sock,
596 						table->table[i].eloop_data,
597 						table->table[i].user_data);
598 			if (table->changed)
599 				break;
600 		}
601 	}
602 }
603 
604 #endif /* CONFIG_ELOOP_SELECT */
605 
606 
607 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)608 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
609 {
610 	struct eloop_sock *table;
611 	int i;
612 
613 	for (i = 0; i < nfds; i++) {
614 		table = &eloop.fd_table[events[i].data.fd];
615 		if (table->handler == NULL)
616 			continue;
617 		table->handler(table->sock, table->eloop_data,
618 			       table->user_data);
619 		if (eloop.readers.changed ||
620 		    eloop.writers.changed ||
621 		    eloop.exceptions.changed)
622 			break;
623 	}
624 }
625 #endif /* CONFIG_ELOOP_EPOLL */
626 
627 
628 #ifdef CONFIG_ELOOP_KQUEUE
629 
eloop_sock_table_dispatch(struct kevent * events,int nfds)630 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
631 {
632 	struct eloop_sock *table;
633 	int i;
634 
635 	for (i = 0; i < nfds; i++) {
636 		table = &eloop.fd_table[events[i].ident];
637 		if (table->handler == NULL)
638 			continue;
639 		table->handler(table->sock, table->eloop_data,
640 			       table->user_data);
641 		if (eloop.readers.changed ||
642 		    eloop.writers.changed ||
643 		    eloop.exceptions.changed)
644 			break;
645 	}
646 }
647 
648 
eloop_sock_table_requeue(struct eloop_sock_table * table)649 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
650 {
651 	int i, r;
652 
653 	r = 0;
654 	for (i = 0; i < table->count && table->table; i++) {
655 		if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
656 			r = -1;
657 	}
658 	return r;
659 }
660 
661 #endif /* CONFIG_ELOOP_KQUEUE */
662 
663 
eloop_sock_requeue(void)664 int eloop_sock_requeue(void)
665 {
666 	int r = 0;
667 
668 #ifdef CONFIG_ELOOP_KQUEUE
669 	close(eloop.kqueuefd);
670 	eloop.kqueuefd = kqueue();
671 	if (eloop.kqueuefd < 0) {
672 		wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
673 			   __func__, strerror(errno));
674 		return -1;
675 	}
676 
677 	if (eloop_sock_table_requeue(&eloop.readers) < 0)
678 		r = -1;
679 	if (eloop_sock_table_requeue(&eloop.writers) < 0)
680 		r = -1;
681 	if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
682 		r = -1;
683 #endif /* CONFIG_ELOOP_KQUEUE */
684 
685 	return r;
686 }
687 
688 
eloop_sock_table_destroy(struct eloop_sock_table * table)689 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
690 {
691 	if (table) {
692 		int i;
693 		for (i = 0; i < table->count && table->table; i++) {
694 			wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
695 				   "sock=%d eloop_data=%p user_data=%p "
696 				   "handler=%p",
697 				   table->table[i].sock,
698 				   table->table[i].eloop_data,
699 				   table->table[i].user_data,
700 				   table->table[i].handler);
701 			wpa_trace_dump_funcname("eloop unregistered socket "
702 						"handler",
703 						table->table[i].handler);
704 			wpa_trace_dump("eloop sock", &table->table[i]);
705 		}
706 		os_free(table->table);
707 	}
708 }
709 
710 
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)711 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
712 			     void *eloop_data, void *user_data)
713 {
714 	return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
715 				   eloop_data, user_data);
716 }
717 
718 
eloop_unregister_read_sock(int sock)719 void eloop_unregister_read_sock(int sock)
720 {
721 	eloop_unregister_sock(sock, EVENT_TYPE_READ);
722 }
723 
724 
eloop_get_sock_table(eloop_event_type type)725 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
726 {
727 	switch (type) {
728 	case EVENT_TYPE_READ:
729 		return &eloop.readers;
730 	case EVENT_TYPE_WRITE:
731 		return &eloop.writers;
732 	case EVENT_TYPE_EXCEPTION:
733 		return &eloop.exceptions;
734 	}
735 
736 	return NULL;
737 }
738 
739 
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)740 int eloop_register_sock(int sock, eloop_event_type type,
741 			eloop_sock_handler handler,
742 			void *eloop_data, void *user_data)
743 {
744 	struct eloop_sock_table *table;
745 
746 	assert(sock >= 0);
747 	table = eloop_get_sock_table(type);
748 	return eloop_sock_table_add_sock(table, sock, handler,
749 					 eloop_data, user_data);
750 }
751 
752 
eloop_unregister_sock(int sock,eloop_event_type type)753 void eloop_unregister_sock(int sock, eloop_event_type type)
754 {
755 	struct eloop_sock_table *table;
756 
757 	table = eloop_get_sock_table(type);
758 	eloop_sock_table_remove_sock(table, sock);
759 }
760 
761 
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)762 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
763 			   eloop_timeout_handler handler,
764 			   void *eloop_data, void *user_data)
765 {
766 	struct eloop_timeout *timeout, *tmp;
767 	os_time_t now_sec;
768 
769 	timeout = os_zalloc(sizeof(*timeout));
770 	if (timeout == NULL)
771 		return -1;
772 	if (os_get_reltime(&timeout->time) < 0) {
773 		os_free(timeout);
774 		return -1;
775 	}
776 	now_sec = timeout->time.sec;
777 	timeout->time.sec += secs;
778 	if (timeout->time.sec < now_sec) {
779 		/*
780 		 * Integer overflow - assume long enough timeout to be assumed
781 		 * to be infinite, i.e., the timeout would never happen.
782 		 */
783 		wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
784 			   "ever happen - ignore it", secs);
785 		os_free(timeout);
786 		return 0;
787 	}
788 	timeout->time.usec += usecs;
789 	while (timeout->time.usec >= 1000000) {
790 		timeout->time.sec++;
791 		timeout->time.usec -= 1000000;
792 	}
793 	timeout->eloop_data = eloop_data;
794 	timeout->user_data = user_data;
795 	timeout->handler = handler;
796 	wpa_trace_add_ref(timeout, eloop, eloop_data);
797 	wpa_trace_add_ref(timeout, user, user_data);
798 	wpa_trace_record(timeout);
799 
800 	/* Maintain timeouts in order of increasing time */
801 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
802 		if (os_reltime_before(&timeout->time, &tmp->time)) {
803 			dl_list_add(tmp->list.prev, &timeout->list);
804 			return 0;
805 		}
806 	}
807 	dl_list_add_tail(&eloop.timeout, &timeout->list);
808 
809 	return 0;
810 }
811 
812 
eloop_remove_timeout(struct eloop_timeout * timeout)813 static void eloop_remove_timeout(struct eloop_timeout *timeout)
814 {
815 	dl_list_del(&timeout->list);
816 	wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
817 	wpa_trace_remove_ref(timeout, user, timeout->user_data);
818 	os_free(timeout);
819 }
820 
821 
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)822 int eloop_cancel_timeout(eloop_timeout_handler handler,
823 			 void *eloop_data, void *user_data)
824 {
825 	struct eloop_timeout *timeout, *prev;
826 	int removed = 0;
827 
828 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
829 			      struct eloop_timeout, list) {
830 		if (timeout->handler == handler &&
831 		    (timeout->eloop_data == eloop_data ||
832 		     eloop_data == ELOOP_ALL_CTX) &&
833 		    (timeout->user_data == user_data ||
834 		     user_data == ELOOP_ALL_CTX)) {
835 			eloop_remove_timeout(timeout);
836 			removed++;
837 		}
838 	}
839 
840 	return removed;
841 }
842 
843 
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)844 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
845 			     void *eloop_data, void *user_data,
846 			     struct os_reltime *remaining)
847 {
848 	struct eloop_timeout *timeout, *prev;
849 	int removed = 0;
850 	struct os_reltime now;
851 
852 	os_get_reltime(&now);
853 	remaining->sec = remaining->usec = 0;
854 
855 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
856 			      struct eloop_timeout, list) {
857 		if (timeout->handler == handler &&
858 		    (timeout->eloop_data == eloop_data) &&
859 		    (timeout->user_data == user_data)) {
860 			removed = 1;
861 			if (os_reltime_before(&now, &timeout->time))
862 				os_reltime_sub(&timeout->time, &now, remaining);
863 			eloop_remove_timeout(timeout);
864 			break;
865 		}
866 	}
867 	return removed;
868 }
869 
870 
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)871 int eloop_is_timeout_registered(eloop_timeout_handler handler,
872 				void *eloop_data, void *user_data)
873 {
874 	struct eloop_timeout *tmp;
875 
876 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
877 		if (tmp->handler == handler &&
878 		    tmp->eloop_data == eloop_data &&
879 		    tmp->user_data == user_data)
880 			return 1;
881 	}
882 
883 	return 0;
884 }
885 
886 
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)887 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
888 			  eloop_timeout_handler handler, void *eloop_data,
889 			  void *user_data)
890 {
891 	struct os_reltime now, requested, remaining;
892 	struct eloop_timeout *tmp;
893 
894 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
895 		if (tmp->handler == handler &&
896 		    tmp->eloop_data == eloop_data &&
897 		    tmp->user_data == user_data) {
898 			requested.sec = req_secs;
899 			requested.usec = req_usecs;
900 			os_get_reltime(&now);
901 			os_reltime_sub(&tmp->time, &now, &remaining);
902 			if (os_reltime_before(&requested, &remaining)) {
903 				eloop_cancel_timeout(handler, eloop_data,
904 						     user_data);
905 				eloop_register_timeout(requested.sec,
906 						       requested.usec,
907 						       handler, eloop_data,
908 						       user_data);
909 				return 1;
910 			}
911 			return 0;
912 		}
913 	}
914 
915 	return -1;
916 }
917 
918 
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)919 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
920 			    eloop_timeout_handler handler, void *eloop_data,
921 			    void *user_data)
922 {
923 	struct os_reltime now, requested, remaining;
924 	struct eloop_timeout *tmp;
925 
926 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
927 		if (tmp->handler == handler &&
928 		    tmp->eloop_data == eloop_data &&
929 		    tmp->user_data == user_data) {
930 			requested.sec = req_secs;
931 			requested.usec = req_usecs;
932 			os_get_reltime(&now);
933 			os_reltime_sub(&tmp->time, &now, &remaining);
934 			if (os_reltime_before(&remaining, &requested)) {
935 				eloop_cancel_timeout(handler, eloop_data,
936 						     user_data);
937 				eloop_register_timeout(requested.sec,
938 						       requested.usec,
939 						       handler, eloop_data,
940 						       user_data);
941 				return 1;
942 			}
943 			return 0;
944 		}
945 	}
946 
947 	return -1;
948 }
949 
950 
951 #ifndef CONFIG_NATIVE_WINDOWS
eloop_handle_alarm(int sig)952 static void eloop_handle_alarm(int sig)
953 {
954 	wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
955 		   "two seconds. Looks like there\n"
956 		   "is a bug that ends up in a busy loop that "
957 		   "prevents clean shutdown.\n"
958 		   "Killing program forcefully.\n");
959 	exit(1);
960 }
961 #endif /* CONFIG_NATIVE_WINDOWS */
962 
963 
eloop_handle_signal(int sig)964 static void eloop_handle_signal(int sig)
965 {
966 	int i;
967 
968 #ifndef CONFIG_NATIVE_WINDOWS
969 	if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
970 		/* Use SIGALRM to break out from potential busy loops that
971 		 * would not allow the program to be killed. */
972 		eloop.pending_terminate = 1;
973 		signal(SIGALRM, eloop_handle_alarm);
974 		alarm(2);
975 	}
976 #endif /* CONFIG_NATIVE_WINDOWS */
977 
978 	eloop.signaled++;
979 	for (i = 0; i < eloop.signal_count; i++) {
980 		if (eloop.signals[i].sig == sig) {
981 			eloop.signals[i].signaled++;
982 			break;
983 		}
984 	}
985 }
986 
987 
eloop_process_pending_signals(void)988 static void eloop_process_pending_signals(void)
989 {
990 	int i;
991 
992 	if (eloop.signaled == 0)
993 		return;
994 	eloop.signaled = 0;
995 
996 	if (eloop.pending_terminate) {
997 #ifndef CONFIG_NATIVE_WINDOWS
998 		alarm(0);
999 #endif /* CONFIG_NATIVE_WINDOWS */
1000 		eloop.pending_terminate = 0;
1001 	}
1002 
1003 	for (i = 0; i < eloop.signal_count; i++) {
1004 		if (eloop.signals[i].signaled) {
1005 			eloop.signals[i].signaled = 0;
1006 			eloop.signals[i].handler(eloop.signals[i].sig,
1007 						 eloop.signals[i].user_data);
1008 		}
1009 	}
1010 }
1011 
1012 
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1013 int eloop_register_signal(int sig, eloop_signal_handler handler,
1014 			  void *user_data)
1015 {
1016 	struct eloop_signal *tmp;
1017 
1018 	tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1019 			       sizeof(struct eloop_signal));
1020 	if (tmp == NULL)
1021 		return -1;
1022 
1023 	tmp[eloop.signal_count].sig = sig;
1024 	tmp[eloop.signal_count].user_data = user_data;
1025 	tmp[eloop.signal_count].handler = handler;
1026 	tmp[eloop.signal_count].signaled = 0;
1027 	eloop.signal_count++;
1028 	eloop.signals = tmp;
1029 	signal(sig, eloop_handle_signal);
1030 
1031 	return 0;
1032 }
1033 
1034 
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1035 int eloop_register_signal_terminate(eloop_signal_handler handler,
1036 				    void *user_data)
1037 {
1038 	int ret = eloop_register_signal(SIGINT, handler, user_data);
1039 	if (ret == 0)
1040 		ret = eloop_register_signal(SIGTERM, handler, user_data);
1041 	return ret;
1042 }
1043 
1044 
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1045 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1046 				   void *user_data)
1047 {
1048 #ifdef CONFIG_NATIVE_WINDOWS
1049 	return 0;
1050 #else /* CONFIG_NATIVE_WINDOWS */
1051 	return eloop_register_signal(SIGHUP, handler, user_data);
1052 #endif /* CONFIG_NATIVE_WINDOWS */
1053 }
1054 
1055 
eloop_run(void)1056 void eloop_run(void)
1057 {
1058 #ifdef CONFIG_ELOOP_POLL
1059 	int num_poll_fds;
1060 	int timeout_ms = 0;
1061 #endif /* CONFIG_ELOOP_POLL */
1062 #ifdef CONFIG_ELOOP_SELECT
1063 	fd_set *rfds, *wfds, *efds;
1064 	struct timeval _tv;
1065 #endif /* CONFIG_ELOOP_SELECT */
1066 #ifdef CONFIG_ELOOP_EPOLL
1067 	int timeout_ms = -1;
1068 #endif /* CONFIG_ELOOP_EPOLL */
1069 #ifdef CONFIG_ELOOP_KQUEUE
1070 	struct timespec ts;
1071 #endif /* CONFIG_ELOOP_KQUEUE */
1072 	int res;
1073 	struct os_reltime tv, now;
1074 
1075 #ifdef CONFIG_ELOOP_SELECT
1076 	rfds = os_malloc(sizeof(*rfds));
1077 	wfds = os_malloc(sizeof(*wfds));
1078 	efds = os_malloc(sizeof(*efds));
1079 	if (rfds == NULL || wfds == NULL || efds == NULL)
1080 		goto out;
1081 #endif /* CONFIG_ELOOP_SELECT */
1082 
1083 	while (!eloop.terminate &&
1084 	       (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1085 		eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1086 		struct eloop_timeout *timeout;
1087 
1088 		if (eloop.pending_terminate) {
1089 			/*
1090 			 * This may happen in some corner cases where a signal
1091 			 * is received during a blocking operation. We need to
1092 			 * process the pending signals and exit if requested to
1093 			 * avoid hitting the SIGALRM limit if the blocking
1094 			 * operation took more than two seconds.
1095 			 */
1096 			eloop_process_pending_signals();
1097 			if (eloop.terminate)
1098 				break;
1099 		}
1100 
1101 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1102 					list);
1103 		if (timeout) {
1104 			os_get_reltime(&now);
1105 			if (os_reltime_before(&now, &timeout->time))
1106 				os_reltime_sub(&timeout->time, &now, &tv);
1107 			else
1108 				tv.sec = tv.usec = 0;
1109 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1110 			timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1111 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1112 #ifdef CONFIG_ELOOP_SELECT
1113 			_tv.tv_sec = tv.sec;
1114 			_tv.tv_usec = tv.usec;
1115 #endif /* CONFIG_ELOOP_SELECT */
1116 #ifdef CONFIG_ELOOP_KQUEUE
1117 			ts.tv_sec = tv.sec;
1118 			ts.tv_nsec = tv.usec * 1000L;
1119 #endif /* CONFIG_ELOOP_KQUEUE */
1120 		}
1121 
1122 #ifdef CONFIG_ELOOP_POLL
1123 		num_poll_fds = eloop_sock_table_set_fds(
1124 			&eloop.readers, &eloop.writers, &eloop.exceptions,
1125 			eloop.pollfds, eloop.pollfds_map,
1126 			eloop.max_pollfd_map);
1127 		res = poll(eloop.pollfds, num_poll_fds,
1128 			   timeout ? timeout_ms : -1);
1129 #endif /* CONFIG_ELOOP_POLL */
1130 #ifdef CONFIG_ELOOP_SELECT
1131 		eloop_sock_table_set_fds(&eloop.readers, rfds);
1132 		eloop_sock_table_set_fds(&eloop.writers, wfds);
1133 		eloop_sock_table_set_fds(&eloop.exceptions, efds);
1134 		res = select(eloop.max_sock + 1, rfds, wfds, efds,
1135 			     timeout ? &_tv : NULL);
1136 #endif /* CONFIG_ELOOP_SELECT */
1137 #ifdef CONFIG_ELOOP_EPOLL
1138 		if (eloop.count == 0) {
1139 			res = 0;
1140 		} else {
1141 			res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1142 					 eloop.count, timeout_ms);
1143 		}
1144 #endif /* CONFIG_ELOOP_EPOLL */
1145 #ifdef CONFIG_ELOOP_KQUEUE
1146 		if (eloop.count == 0) {
1147 			res = 0;
1148 		} else {
1149 			res = kevent(eloop.kqueuefd, NULL, 0,
1150 				     eloop.kqueue_events, eloop.kqueue_nevents,
1151 				     timeout ? &ts : NULL);
1152 		}
1153 #endif /* CONFIG_ELOOP_KQUEUE */
1154 		if (res < 0 && errno != EINTR && errno != 0) {
1155 			wpa_printf(MSG_ERROR, "eloop: %s: %s",
1156 #ifdef CONFIG_ELOOP_POLL
1157 				   "poll"
1158 #endif /* CONFIG_ELOOP_POLL */
1159 #ifdef CONFIG_ELOOP_SELECT
1160 				   "select"
1161 #endif /* CONFIG_ELOOP_SELECT */
1162 #ifdef CONFIG_ELOOP_EPOLL
1163 				   "epoll"
1164 #endif /* CONFIG_ELOOP_EPOLL */
1165 #ifdef CONFIG_ELOOP_KQUEUE
1166 				   "kqueue"
1167 #endif /* CONFIG_ELOOP_EKQUEUE */
1168 
1169 				   , strerror(errno));
1170 			goto out;
1171 		}
1172 
1173 		eloop.readers.changed = 0;
1174 		eloop.writers.changed = 0;
1175 		eloop.exceptions.changed = 0;
1176 
1177 		eloop_process_pending_signals();
1178 
1179 
1180 		/* check if some registered timeouts have occurred */
1181 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1182 					list);
1183 		if (timeout) {
1184 			os_get_reltime(&now);
1185 			if (!os_reltime_before(&now, &timeout->time)) {
1186 				void *eloop_data = timeout->eloop_data;
1187 				void *user_data = timeout->user_data;
1188 				eloop_timeout_handler handler =
1189 					timeout->handler;
1190 				eloop_remove_timeout(timeout);
1191 				handler(eloop_data, user_data);
1192 			}
1193 
1194 		}
1195 
1196 		if (res <= 0)
1197 			continue;
1198 
1199 		if (eloop.readers.changed ||
1200 		    eloop.writers.changed ||
1201 		    eloop.exceptions.changed) {
1202 			 /*
1203 			  * Sockets may have been closed and reopened with the
1204 			  * same FD in the signal or timeout handlers, so we
1205 			  * must skip the previous results and check again
1206 			  * whether any of the currently registered sockets have
1207 			  * events.
1208 			  */
1209 			continue;
1210 		}
1211 
1212 #ifdef CONFIG_ELOOP_POLL
1213 		eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1214 					  &eloop.exceptions, eloop.pollfds_map,
1215 					  eloop.max_pollfd_map);
1216 #endif /* CONFIG_ELOOP_POLL */
1217 #ifdef CONFIG_ELOOP_SELECT
1218 		eloop_sock_table_dispatch(&eloop.readers, rfds);
1219 		eloop_sock_table_dispatch(&eloop.writers, wfds);
1220 		eloop_sock_table_dispatch(&eloop.exceptions, efds);
1221 #endif /* CONFIG_ELOOP_SELECT */
1222 #ifdef CONFIG_ELOOP_EPOLL
1223 		eloop_sock_table_dispatch(eloop.epoll_events, res);
1224 #endif /* CONFIG_ELOOP_EPOLL */
1225 #ifdef CONFIG_ELOOP_KQUEUE
1226 		eloop_sock_table_dispatch(eloop.kqueue_events, res);
1227 #endif /* CONFIG_ELOOP_KQUEUE */
1228 	}
1229 
1230 	eloop.terminate = 0;
1231 out:
1232 #ifdef CONFIG_ELOOP_SELECT
1233 	os_free(rfds);
1234 	os_free(wfds);
1235 	os_free(efds);
1236 #endif /* CONFIG_ELOOP_SELECT */
1237 	return;
1238 }
1239 
1240 
eloop_terminate(void)1241 void eloop_terminate(void)
1242 {
1243 	eloop.terminate = 1;
1244 }
1245 
1246 
eloop_destroy(void)1247 void eloop_destroy(void)
1248 {
1249 	struct eloop_timeout *timeout, *prev;
1250 	struct os_reltime now;
1251 
1252 	os_get_reltime(&now);
1253 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1254 			      struct eloop_timeout, list) {
1255 		int sec, usec;
1256 		sec = timeout->time.sec - now.sec;
1257 		usec = timeout->time.usec - now.usec;
1258 		if (timeout->time.usec < now.usec) {
1259 			sec--;
1260 			usec += 1000000;
1261 		}
1262 		wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1263 			   "eloop_data=%p user_data=%p handler=%p",
1264 			   sec, usec, timeout->eloop_data, timeout->user_data,
1265 			   timeout->handler);
1266 		wpa_trace_dump_funcname("eloop unregistered timeout handler",
1267 					timeout->handler);
1268 		wpa_trace_dump("eloop timeout", timeout);
1269 		eloop_remove_timeout(timeout);
1270 	}
1271 	eloop_sock_table_destroy(&eloop.readers);
1272 	eloop_sock_table_destroy(&eloop.writers);
1273 	eloop_sock_table_destroy(&eloop.exceptions);
1274 	os_free(eloop.signals);
1275 
1276 #ifdef CONFIG_ELOOP_POLL
1277 	os_free(eloop.pollfds);
1278 	os_free(eloop.pollfds_map);
1279 #endif /* CONFIG_ELOOP_POLL */
1280 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1281 	os_free(eloop.fd_table);
1282 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1283 #ifdef CONFIG_ELOOP_EPOLL
1284 	os_free(eloop.epoll_events);
1285 	close(eloop.epollfd);
1286 #endif /* CONFIG_ELOOP_EPOLL */
1287 #ifdef CONFIG_ELOOP_KQUEUE
1288 	os_free(eloop.kqueue_events);
1289 	close(eloop.kqueuefd);
1290 #endif /* CONFIG_ELOOP_KQUEUE */
1291 }
1292 
1293 
eloop_terminated(void)1294 int eloop_terminated(void)
1295 {
1296 	return eloop.terminate || eloop.pending_terminate;
1297 }
1298 
1299 
eloop_wait_for_read_sock(int sock)1300 void eloop_wait_for_read_sock(int sock)
1301 {
1302 #ifdef CONFIG_ELOOP_POLL
1303 	struct pollfd pfd;
1304 
1305 	if (sock < 0)
1306 		return;
1307 
1308 	os_memset(&pfd, 0, sizeof(pfd));
1309 	pfd.fd = sock;
1310 	pfd.events = POLLIN;
1311 
1312 	poll(&pfd, 1, -1);
1313 #endif /* CONFIG_ELOOP_POLL */
1314 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1315 	/*
1316 	 * We can use epoll() here. But epoll() requres 4 system calls.
1317 	 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1318 	 * epoll fd. So select() is better for performance here.
1319 	 */
1320 	fd_set rfds;
1321 
1322 	if (sock < 0)
1323 		return;
1324 
1325 	FD_ZERO(&rfds);
1326 	FD_SET(sock, &rfds);
1327 	select(sock + 1, &rfds, NULL, NULL, NULL);
1328 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1329 #ifdef CONFIG_ELOOP_KQUEUE
1330 	int kfd;
1331 	struct kevent ke1, ke2;
1332 
1333 	kfd = kqueue();
1334 	if (kfd == -1)
1335 		return;
1336 	EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1337 	kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1338 	close(kfd);
1339 #endif /* CONFIG_ELOOP_KQUEUE */
1340 }
1341 
1342 #ifdef CONFIG_ELOOP_SELECT
1343 #undef CONFIG_ELOOP_SELECT
1344 #endif /* CONFIG_ELOOP_SELECT */
1345