• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Event loop based on select() loop
3  * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4  *
5  * This software may be distributed under the terms of the BSD license.
6  * See README for more details.
7  */
8 
9 #include "includes.h"
10 #include <assert.h>
11 
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16 
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20 
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24 
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26     !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29 
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33 
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37 
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41 
42 enum eloop_ctrl_fd_index {
43 	ELOOP_CTRL_FD_READ = 0,
44 	ELOOP_CTRL_FD_WRITE,
45 
46 	ELOOP_CTRL_FD_BUTT
47 };
48 
49 struct eloop_sock {
50 	int sock;
51 	void *eloop_data;
52 	void *user_data;
53 	eloop_sock_handler handler;
54 	WPA_TRACE_REF(eloop);
55 	WPA_TRACE_REF(user);
56 	WPA_TRACE_INFO
57 };
58 
59 struct eloop_timeout {
60 	struct dl_list list;
61 	struct os_reltime time;
62 	void *eloop_data;
63 	void *user_data;
64 	eloop_timeout_handler handler;
65 	WPA_TRACE_REF(eloop);
66 	WPA_TRACE_REF(user);
67 	WPA_TRACE_INFO
68 };
69 
70 struct eloop_signal {
71 	int sig;
72 	void *user_data;
73 	eloop_signal_handler handler;
74 	int signaled;
75 };
76 
77 struct eloop_sock_table {
78 	int count;
79 	struct eloop_sock *table;
80 	eloop_event_type type;
81 	int changed;
82 };
83 
84 struct eloop_data {
85 	int max_sock;
86 
87 	int count; /* sum of all table counts */
88 #ifdef CONFIG_ELOOP_POLL
89 	int max_pollfd_map; /* number of pollfds_map currently allocated */
90 	int max_poll_fds; /* number of pollfds currently allocated */
91 	struct pollfd *pollfds;
92 	struct pollfd **pollfds_map;
93 #endif /* CONFIG_ELOOP_POLL */
94 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
95 	int max_fd;
96 	struct eloop_sock *fd_table;
97 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
98 #ifdef CONFIG_ELOOP_EPOLL
99 	int epollfd;
100 	int epoll_max_event_num;
101 	struct epoll_event *epoll_events;
102 #endif /* CONFIG_ELOOP_EPOLL */
103 #ifdef CONFIG_ELOOP_KQUEUE
104 	int kqueuefd;
105 	int kqueue_nevents;
106 	struct kevent *kqueue_events;
107 #endif /* CONFIG_ELOOP_KQUEUE */
108 	struct eloop_sock_table readers;
109 	struct eloop_sock_table writers;
110 	struct eloop_sock_table exceptions;
111 
112 	struct dl_list timeout;
113 
114 	int ctrl_fd[ELOOP_CTRL_FD_BUTT];
115 
116 	int signal_count;
117 	struct eloop_signal *signals;
118 	int signaled;
119 	int pending_terminate;
120 
121 	int terminate;
122 };
123 
124 static struct eloop_data eloop;
125 
126 
127 #ifdef WPA_TRACE
128 
eloop_sigsegv_handler(int sig)129 static void eloop_sigsegv_handler(int sig)
130 {
131 	wpa_trace_show("eloop SIGSEGV");
132 	abort();
133 }
134 
eloop_trace_sock_add_ref(struct eloop_sock_table * table)135 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
136 {
137 	int i;
138 	if (table == NULL || table->table == NULL)
139 		return;
140 	for (i = 0; i < table->count; i++) {
141 		wpa_trace_add_ref(&table->table[i], eloop,
142 				  table->table[i].eloop_data);
143 		wpa_trace_add_ref(&table->table[i], user,
144 				  table->table[i].user_data);
145 	}
146 }
147 
148 
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)149 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
150 {
151 	int i;
152 	if (table == NULL || table->table == NULL)
153 		return;
154 	for (i = 0; i < table->count; i++) {
155 		wpa_trace_remove_ref(&table->table[i], eloop,
156 				     table->table[i].eloop_data);
157 		wpa_trace_remove_ref(&table->table[i], user,
158 				     table->table[i].user_data);
159 	}
160 }
161 
162 #else /* WPA_TRACE */
163 
164 #define eloop_trace_sock_add_ref(table) do { } while (0)
165 #define eloop_trace_sock_remove_ref(table) do { } while (0)
166 
167 #endif /* WPA_TRACE */
168 
eloop_ctrl_read_handler(int sock,void * eloop_ctx,void * sock_ctx)169 static void eloop_ctrl_read_handler(int sock, void *eloop_ctx, void *sock_ctx)
170 {
171 	int8_t buf;
172 
173 	(void)eloop_ctx;
174 	(void)sock_ctx;
175 	if (sock != eloop.ctrl_fd[ELOOP_CTRL_FD_READ]) {
176 		wpa_printf(MSG_ERROR, "%s: socket is mismatched.", __func__);
177 		return;
178 	}
179 
180 	if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
181 		read(eloop.ctrl_fd[ELOOP_CTRL_FD_READ], &buf, 1);
182 	} else {
183 		wpa_printf(MSG_ERROR, "%s: pipe read end was closed", __func__);
184 	}
185 }
186 
eloop_ctrl_init()187 static void eloop_ctrl_init()
188 {
189 	int ret;
190 
191 	ret = pipe(eloop.ctrl_fd);
192 	if (ret != 0) {
193 		wpa_printf(MSG_ERROR, "%s: pipe failed: %s", __func__, strerror(errno));
194 		return;
195 	}
196 	eloop_register_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ],
197 				 eloop_ctrl_read_handler, NULL, NULL);
198 
199 	wpa_printf(MSG_INFO, "eloop_ctrl_init: %d", ret);
200 }
201 
eloop_ctrl_deinit()202 static void eloop_ctrl_deinit()
203 {
204 	if (eloop.ctrl_fd[ELOOP_CTRL_FD_READ] != -1) {
205 		eloop_unregister_read_sock(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
206 	}
207 	close(eloop.ctrl_fd[ELOOP_CTRL_FD_READ]);
208 	close(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE]);
209 	eloop.ctrl_fd[ELOOP_CTRL_FD_READ] = -1;
210 	eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] = -1;
211 
212 	wpa_printf(MSG_INFO, "eloop_ctrl_deinit done");
213 }
214 
eloop_wakeup()215 static int eloop_wakeup()
216 {
217 	int ret = -1;
218 	uint8_t buf = '0'; // no meaning
219 
220 	if (eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE] != -1) {
221 		ret = write(eloop.ctrl_fd[ELOOP_CTRL_FD_WRITE], &buf, 1);
222 	} else {
223 		wpa_printf(MSG_ERROR, "%s: pipe write end was closed", __func__);
224 	}
225 	return ret;
226 }
227 
eloop_init(void)228 int eloop_init(void)
229 {
230 	os_memset(&eloop, 0, sizeof(eloop));
231 	dl_list_init(&eloop.timeout);
232 #ifdef CONFIG_ELOOP_EPOLL
233 	eloop.epollfd = epoll_create1(0);
234 	if (eloop.epollfd < 0) {
235 		wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
236 			   __func__, strerror(errno));
237 		return -1;
238 	}
239 #endif /* CONFIG_ELOOP_EPOLL */
240 #ifdef CONFIG_ELOOP_KQUEUE
241 	eloop.kqueuefd = kqueue();
242 	if (eloop.kqueuefd < 0) {
243 		wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
244 			   __func__, strerror(errno));
245 		return -1;
246 	}
247 #endif /* CONFIG_ELOOP_KQUEUE */
248 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
249 	eloop.readers.type = EVENT_TYPE_READ;
250 	eloop.writers.type = EVENT_TYPE_WRITE;
251 	eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
252 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
253 #ifdef WPA_TRACE
254 	signal(SIGSEGV, eloop_sigsegv_handler);
255 #endif /* WPA_TRACE */
256 
257 	eloop_ctrl_init();
258 	return 0;
259 }
260 
261 
262 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)263 static int eloop_sock_queue(int sock, eloop_event_type type)
264 {
265 	struct epoll_event ev;
266 
267 	os_memset(&ev, 0, sizeof(ev));
268 	switch (type) {
269 	case EVENT_TYPE_READ:
270 		ev.events = EPOLLIN;
271 		break;
272 	case EVENT_TYPE_WRITE:
273 		ev.events = EPOLLOUT;
274 		break;
275 	/*
276 	 * Exceptions are always checked when using epoll, but I suppose it's
277 	 * possible that someone registered a socket *only* for exception
278 	 * handling.
279 	 */
280 	case EVENT_TYPE_EXCEPTION:
281 		ev.events = EPOLLERR | EPOLLHUP;
282 		break;
283 	}
284 	ev.data.fd = sock;
285 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
286 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
287 			   __func__, sock, strerror(errno));
288 		return -1;
289 	}
290 	return 0;
291 }
292 #endif /* CONFIG_ELOOP_EPOLL */
293 
294 
295 #ifdef CONFIG_ELOOP_KQUEUE
296 
event_type_kevent_filter(eloop_event_type type)297 static short event_type_kevent_filter(eloop_event_type type)
298 {
299 	switch (type) {
300 	case EVENT_TYPE_READ:
301 		return EVFILT_READ;
302 	case EVENT_TYPE_WRITE:
303 		return EVFILT_WRITE;
304 	default:
305 		return 0;
306 	}
307 }
308 
309 
eloop_sock_queue(int sock,eloop_event_type type)310 static int eloop_sock_queue(int sock, eloop_event_type type)
311 {
312 	struct kevent ke;
313 
314 	EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
315 	if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
316 		wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
317 			   __func__, sock, strerror(errno));
318 		return -1;
319 	}
320 	return 0;
321 }
322 
323 #endif /* CONFIG_ELOOP_KQUEUE */
324 
325 
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)326 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
327                                      int sock, eloop_sock_handler handler,
328                                      void *eloop_data, void *user_data)
329 {
330 #ifdef CONFIG_ELOOP_EPOLL
331 	struct epoll_event *temp_events;
332 #endif /* CONFIG_ELOOP_EPOLL */
333 #ifdef CONFIG_ELOOP_KQUEUE
334 	struct kevent *temp_events;
335 #endif /* CONFIG_ELOOP_EPOLL */
336 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
337 	struct eloop_sock *temp_table;
338 	int next;
339 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
340 	struct eloop_sock *tmp;
341 	int new_max_sock;
342 
343 	if (sock > eloop.max_sock)
344 		new_max_sock = sock;
345 	else
346 		new_max_sock = eloop.max_sock;
347 
348 	if (table == NULL)
349 		return -1;
350 
351 #ifdef CONFIG_ELOOP_POLL
352 	if (new_max_sock >= eloop.max_pollfd_map) {
353 		struct pollfd **nmap;
354 		nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
355 					sizeof(struct pollfd *));
356 		if (nmap == NULL)
357 			return -1;
358 
359 		eloop.max_pollfd_map = new_max_sock + 50;
360 		eloop.pollfds_map = nmap;
361 	}
362 
363 	if (eloop.count + 1 > eloop.max_poll_fds) {
364 		struct pollfd *n;
365 		int nmax = eloop.count + 1 + 50;
366 		n = os_realloc_array(eloop.pollfds, nmax,
367 				     sizeof(struct pollfd));
368 		if (n == NULL)
369 			return -1;
370 
371 		eloop.max_poll_fds = nmax;
372 		eloop.pollfds = n;
373 	}
374 #endif /* CONFIG_ELOOP_POLL */
375 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
376 	if (new_max_sock >= eloop.max_fd) {
377 		next = new_max_sock + 16;
378 		temp_table = os_realloc_array(eloop.fd_table, next,
379 					      sizeof(struct eloop_sock));
380 		if (temp_table == NULL)
381 			return -1;
382 
383 		eloop.max_fd = next;
384 		eloop.fd_table = temp_table;
385 	}
386 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
387 
388 #ifdef CONFIG_ELOOP_EPOLL
389 	if (eloop.count + 1 > eloop.epoll_max_event_num) {
390 		next = eloop.epoll_max_event_num == 0 ? 8 :
391 			eloop.epoll_max_event_num * 2;
392 		temp_events = os_realloc_array(eloop.epoll_events, next,
393 					       sizeof(struct epoll_event));
394 		if (temp_events == NULL) {
395 			wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
396 				   __func__, strerror(errno));
397 			return -1;
398 		}
399 
400 		eloop.epoll_max_event_num = next;
401 		eloop.epoll_events = temp_events;
402 	}
403 #endif /* CONFIG_ELOOP_EPOLL */
404 #ifdef CONFIG_ELOOP_KQUEUE
405 	if (eloop.count + 1 > eloop.kqueue_nevents) {
406 		next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
407 		temp_events = os_malloc(next * sizeof(*temp_events));
408 		if (!temp_events) {
409 			wpa_printf(MSG_ERROR,
410 				   "%s: malloc for kqueue failed: %s",
411 				   __func__, strerror(errno));
412 			return -1;
413 		}
414 
415 		os_free(eloop.kqueue_events);
416 		eloop.kqueue_events = temp_events;
417 		eloop.kqueue_nevents = next;
418 	}
419 #endif /* CONFIG_ELOOP_KQUEUE */
420 
421 	eloop_trace_sock_remove_ref(table);
422 	tmp = os_realloc_array(table->table, table->count + 1,
423 			       sizeof(struct eloop_sock));
424 	if (tmp == NULL) {
425 		eloop_trace_sock_add_ref(table);
426 		return -1;
427 	}
428 
429 	tmp[table->count].sock = sock;
430 	tmp[table->count].eloop_data = eloop_data;
431 	tmp[table->count].user_data = user_data;
432 	tmp[table->count].handler = handler;
433 	wpa_trace_record(&tmp[table->count]);
434 	table->count++;
435 	table->table = tmp;
436 	eloop.max_sock = new_max_sock;
437 	eloop.count++;
438 	table->changed = 1;
439 	eloop_trace_sock_add_ref(table);
440 
441 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
442 	if (eloop_sock_queue(sock, table->type) < 0)
443 		return -1;
444 	os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
445 		  sizeof(struct eloop_sock));
446 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
447 	return 0;
448 }
449 
450 
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)451 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
452                                          int sock)
453 {
454 #ifdef CONFIG_ELOOP_KQUEUE
455 	struct kevent ke;
456 #endif /* CONFIG_ELOOP_KQUEUE */
457 	int i;
458 
459 	if (table == NULL || table->table == NULL || table->count == 0)
460 		return;
461 
462 	for (i = 0; i < table->count; i++) {
463 		if (table->table[i].sock == sock)
464 			break;
465 	}
466 	if (i == table->count)
467 		return;
468 	eloop_trace_sock_remove_ref(table);
469 	if (i != table->count - 1) {
470 		os_memmove(&table->table[i], &table->table[i + 1],
471 			   (table->count - i - 1) *
472 			   sizeof(struct eloop_sock));
473 	}
474 	table->count--;
475 	eloop.count--;
476 	table->changed = 1;
477 	eloop_trace_sock_add_ref(table);
478 #ifdef CONFIG_ELOOP_EPOLL
479 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
480 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
481 			   __func__, sock, strerror(errno));
482 		return;
483 	}
484 	os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
485 #endif /* CONFIG_ELOOP_EPOLL */
486 #ifdef CONFIG_ELOOP_KQUEUE
487 	EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
488 	       0, 0);
489 	if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
490 		wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
491 			   __func__, sock, strerror(errno));
492 		return;
493 	}
494 	os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
495 #endif /* CONFIG_ELOOP_KQUEUE */
496 }
497 
498 
499 #ifdef CONFIG_ELOOP_POLL
500 
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)501 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
502 {
503 	if (fd < mx && fd >= 0)
504 		return pollfds_map[fd];
505 	return NULL;
506 }
507 
508 
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)509 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
510 				    struct eloop_sock_table *writers,
511 				    struct eloop_sock_table *exceptions,
512 				    struct pollfd *pollfds,
513 				    struct pollfd **pollfds_map,
514 				    int max_pollfd_map)
515 {
516 	int i;
517 	int nxt = 0;
518 	int fd;
519 	struct pollfd *pfd;
520 
521 	/* Clear pollfd lookup map. It will be re-populated below. */
522 	os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
523 
524 	if (readers && readers->table) {
525 		for (i = 0; i < readers->count; i++) {
526 			fd = readers->table[i].sock;
527 			assert(fd >= 0 && fd < max_pollfd_map);
528 			pollfds[nxt].fd = fd;
529 			pollfds[nxt].events = POLLIN;
530 			pollfds[nxt].revents = 0;
531 			pollfds_map[fd] = &(pollfds[nxt]);
532 			nxt++;
533 		}
534 	}
535 
536 	if (writers && writers->table) {
537 		for (i = 0; i < writers->count; i++) {
538 			/*
539 			 * See if we already added this descriptor, update it
540 			 * if so.
541 			 */
542 			fd = writers->table[i].sock;
543 			assert(fd >= 0 && fd < max_pollfd_map);
544 			pfd = pollfds_map[fd];
545 			if (!pfd) {
546 				pfd = &(pollfds[nxt]);
547 				pfd->events = 0;
548 				pfd->fd = fd;
549 				pollfds[i].revents = 0;
550 				pollfds_map[fd] = pfd;
551 				nxt++;
552 			}
553 			pfd->events |= POLLOUT;
554 		}
555 	}
556 
557 	/*
558 	 * Exceptions are always checked when using poll, but I suppose it's
559 	 * possible that someone registered a socket *only* for exception
560 	 * handling. Set the POLLIN bit in this case.
561 	 */
562 	if (exceptions && exceptions->table) {
563 		for (i = 0; i < exceptions->count; i++) {
564 			/*
565 			 * See if we already added this descriptor, just use it
566 			 * if so.
567 			 */
568 			fd = exceptions->table[i].sock;
569 			assert(fd >= 0 && fd < max_pollfd_map);
570 			pfd = pollfds_map[fd];
571 			if (!pfd) {
572 				pfd = &(pollfds[nxt]);
573 				pfd->events = POLLIN;
574 				pfd->fd = fd;
575 				pollfds[i].revents = 0;
576 				pollfds_map[fd] = pfd;
577 				nxt++;
578 			}
579 		}
580 	}
581 
582 	return nxt;
583 }
584 
585 
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)586 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
587 					   struct pollfd **pollfds_map,
588 					   int max_pollfd_map,
589 					   short int revents)
590 {
591 	int i;
592 	struct pollfd *pfd;
593 
594 	if (!table || !table->table)
595 		return 0;
596 
597 	table->changed = 0;
598 	for (i = 0; i < table->count; i++) {
599 		pfd = find_pollfd(pollfds_map, table->table[i].sock,
600 				  max_pollfd_map);
601 		if (!pfd)
602 			continue;
603 
604 		if (!(pfd->revents & revents))
605 			continue;
606 
607 		table->table[i].handler(table->table[i].sock,
608 					table->table[i].eloop_data,
609 					table->table[i].user_data);
610 		if (table->changed)
611 			return 1;
612 	}
613 
614 	return 0;
615 }
616 
617 
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)618 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
619 				      struct eloop_sock_table *writers,
620 				      struct eloop_sock_table *exceptions,
621 				      struct pollfd **pollfds_map,
622 				      int max_pollfd_map)
623 {
624 	if (eloop_sock_table_dispatch_table(readers, pollfds_map,
625 					    max_pollfd_map, POLLIN | POLLERR |
626 					    POLLHUP))
627 		return; /* pollfds may be invalid at this point */
628 
629 	if (eloop_sock_table_dispatch_table(writers, pollfds_map,
630 					    max_pollfd_map, POLLOUT))
631 		return; /* pollfds may be invalid at this point */
632 
633 	eloop_sock_table_dispatch_table(exceptions, pollfds_map,
634 					max_pollfd_map, POLLERR | POLLHUP);
635 }
636 
637 #endif /* CONFIG_ELOOP_POLL */
638 
639 #ifdef CONFIG_ELOOP_SELECT
640 
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)641 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
642 				     fd_set *fds)
643 {
644 	int i;
645 
646 	FD_ZERO(fds);
647 
648 	if (table->table == NULL)
649 		return;
650 
651 	for (i = 0; i < table->count; i++) {
652 		assert(table->table[i].sock >= 0);
653 		FD_SET(table->table[i].sock, fds);
654 	}
655 }
656 
657 
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)658 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
659 				      fd_set *fds)
660 {
661 	int i;
662 
663 	if (table == NULL || table->table == NULL)
664 		return;
665 
666 	table->changed = 0;
667 	for (i = 0; i < table->count; i++) {
668 		if (FD_ISSET(table->table[i].sock, fds)) {
669 			table->table[i].handler(table->table[i].sock,
670 						table->table[i].eloop_data,
671 						table->table[i].user_data);
672 			if (table->changed)
673 				break;
674 		}
675 	}
676 }
677 
678 #endif /* CONFIG_ELOOP_SELECT */
679 
680 
681 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)682 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
683 {
684 	struct eloop_sock *table;
685 	int i;
686 
687 	for (i = 0; i < nfds; i++) {
688 		table = &eloop.fd_table[events[i].data.fd];
689 		if (table->handler == NULL)
690 			continue;
691 		table->handler(table->sock, table->eloop_data,
692 			       table->user_data);
693 		if (eloop.readers.changed ||
694 		    eloop.writers.changed ||
695 		    eloop.exceptions.changed)
696 			break;
697 	}
698 }
699 #endif /* CONFIG_ELOOP_EPOLL */
700 
701 
702 #ifdef CONFIG_ELOOP_KQUEUE
703 
eloop_sock_table_dispatch(struct kevent * events,int nfds)704 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
705 {
706 	struct eloop_sock *table;
707 	int i;
708 
709 	for (i = 0; i < nfds; i++) {
710 		table = &eloop.fd_table[events[i].ident];
711 		if (table->handler == NULL)
712 			continue;
713 		table->handler(table->sock, table->eloop_data,
714 			       table->user_data);
715 		if (eloop.readers.changed ||
716 		    eloop.writers.changed ||
717 		    eloop.exceptions.changed)
718 			break;
719 	}
720 }
721 
722 
eloop_sock_table_requeue(struct eloop_sock_table * table)723 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
724 {
725 	int i, r;
726 
727 	r = 0;
728 	for (i = 0; i < table->count && table->table; i++) {
729 		if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
730 			r = -1;
731 	}
732 	return r;
733 }
734 
735 #endif /* CONFIG_ELOOP_KQUEUE */
736 
737 
eloop_sock_requeue(void)738 int eloop_sock_requeue(void)
739 {
740 	int r = 0;
741 
742 #ifdef CONFIG_ELOOP_KQUEUE
743 	close(eloop.kqueuefd);
744 	eloop.kqueuefd = kqueue();
745 	if (eloop.kqueuefd < 0) {
746 		wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
747 			   __func__, strerror(errno));
748 		return -1;
749 	}
750 
751 	if (eloop_sock_table_requeue(&eloop.readers) < 0)
752 		r = -1;
753 	if (eloop_sock_table_requeue(&eloop.writers) < 0)
754 		r = -1;
755 	if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
756 		r = -1;
757 #endif /* CONFIG_ELOOP_KQUEUE */
758 
759 	return r;
760 }
761 
762 
eloop_sock_table_destroy(struct eloop_sock_table * table)763 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
764 {
765 	if (table) {
766 		int i;
767 		for (i = 0; i < table->count && table->table; i++) {
768 			wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
769 				   "sock=%d eloop_data=%p user_data=%p "
770 				   "handler=%p",
771 				   table->table[i].sock,
772 				   table->table[i].eloop_data,
773 				   table->table[i].user_data,
774 				   table->table[i].handler);
775 			wpa_trace_dump_funcname("eloop unregistered socket "
776 						"handler",
777 						table->table[i].handler);
778 			wpa_trace_dump("eloop sock", &table->table[i]);
779 		}
780 		os_free(table->table);
781 	}
782 }
783 
784 
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)785 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
786 			     void *eloop_data, void *user_data)
787 {
788 	return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
789 				   eloop_data, user_data);
790 }
791 
792 
eloop_unregister_read_sock(int sock)793 void eloop_unregister_read_sock(int sock)
794 {
795 	eloop_unregister_sock(sock, EVENT_TYPE_READ);
796 }
797 
798 
eloop_get_sock_table(eloop_event_type type)799 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
800 {
801 	switch (type) {
802 	case EVENT_TYPE_READ:
803 		return &eloop.readers;
804 	case EVENT_TYPE_WRITE:
805 		return &eloop.writers;
806 	case EVENT_TYPE_EXCEPTION:
807 		return &eloop.exceptions;
808 	}
809 
810 	return NULL;
811 }
812 
813 
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)814 int eloop_register_sock(int sock, eloop_event_type type,
815 			eloop_sock_handler handler,
816 			void *eloop_data, void *user_data)
817 {
818 	struct eloop_sock_table *table;
819 
820 	assert(sock >= 0);
821 	table = eloop_get_sock_table(type);
822 	return eloop_sock_table_add_sock(table, sock, handler,
823 					 eloop_data, user_data);
824 }
825 
826 
eloop_unregister_sock(int sock,eloop_event_type type)827 void eloop_unregister_sock(int sock, eloop_event_type type)
828 {
829 	struct eloop_sock_table *table;
830 
831 	table = eloop_get_sock_table(type);
832 	eloop_sock_table_remove_sock(table, sock);
833 }
834 
835 
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)836 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
837 			   eloop_timeout_handler handler,
838 			   void *eloop_data, void *user_data)
839 {
840 	struct eloop_timeout *timeout, *tmp;
841 	os_time_t now_sec;
842 
843 	timeout = os_zalloc(sizeof(*timeout));
844 	if (timeout == NULL)
845 		return -1;
846 	if (os_get_reltime(&timeout->time) < 0) {
847 		os_free(timeout);
848 		return -1;
849 	}
850 	now_sec = timeout->time.sec;
851 	timeout->time.sec += secs;
852 	if (timeout->time.sec < now_sec) {
853 		/*
854 		 * Integer overflow - assume long enough timeout to be assumed
855 		 * to be infinite, i.e., the timeout would never happen.
856 		 */
857 		wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
858 			   "ever happen - ignore it", secs);
859 		os_free(timeout);
860 		return 0;
861 	}
862 	timeout->time.usec += usecs;
863 	while (timeout->time.usec >= 1000000) {
864 		timeout->time.sec++;
865 		timeout->time.usec -= 1000000;
866 	}
867 	timeout->eloop_data = eloop_data;
868 	timeout->user_data = user_data;
869 	timeout->handler = handler;
870 	wpa_trace_add_ref(timeout, eloop, eloop_data);
871 	wpa_trace_add_ref(timeout, user, user_data);
872 	wpa_trace_record(timeout);
873 
874 	/* Maintain timeouts in order of increasing time */
875 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
876 		if (os_reltime_before(&timeout->time, &tmp->time)) {
877 			dl_list_add(tmp->list.prev, &timeout->list);
878 			(void)eloop_wakeup();
879 			return 0;
880 		}
881 	}
882 	dl_list_add_tail(&eloop.timeout, &timeout->list);
883 	(void)eloop_wakeup();
884 
885 	return 0;
886 }
887 
888 
eloop_remove_timeout(struct eloop_timeout * timeout)889 static void eloop_remove_timeout(struct eloop_timeout *timeout)
890 {
891 	dl_list_del(&timeout->list);
892 	wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
893 	wpa_trace_remove_ref(timeout, user, timeout->user_data);
894 	os_free(timeout);
895 }
896 
897 
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)898 int eloop_cancel_timeout(eloop_timeout_handler handler,
899 			 void *eloop_data, void *user_data)
900 {
901 	struct eloop_timeout *timeout, *prev;
902 	int removed = 0;
903 
904 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
905 			      struct eloop_timeout, list) {
906 		if (timeout->handler == handler &&
907 		    (timeout->eloop_data == eloop_data ||
908 		     eloop_data == ELOOP_ALL_CTX) &&
909 		    (timeout->user_data == user_data ||
910 		     user_data == ELOOP_ALL_CTX)) {
911 			eloop_remove_timeout(timeout);
912 			removed++;
913 		}
914 	}
915 
916 	return removed;
917 }
918 
919 
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)920 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
921 			     void *eloop_data, void *user_data,
922 			     struct os_reltime *remaining)
923 {
924 	struct eloop_timeout *timeout, *prev;
925 	int removed = 0;
926 	struct os_reltime now;
927 
928 	os_get_reltime(&now);
929 	remaining->sec = remaining->usec = 0;
930 
931 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
932 			      struct eloop_timeout, list) {
933 		if (timeout->handler == handler &&
934 		    (timeout->eloop_data == eloop_data) &&
935 		    (timeout->user_data == user_data)) {
936 			removed = 1;
937 			if (os_reltime_before(&now, &timeout->time))
938 				os_reltime_sub(&timeout->time, &now, remaining);
939 			eloop_remove_timeout(timeout);
940 			break;
941 		}
942 	}
943 	return removed;
944 }
945 
946 
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)947 int eloop_is_timeout_registered(eloop_timeout_handler handler,
948 				void *eloop_data, void *user_data)
949 {
950 	struct eloop_timeout *tmp;
951 
952 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
953 		if (tmp->handler == handler &&
954 		    tmp->eloop_data == eloop_data &&
955 		    tmp->user_data == user_data)
956 			return 1;
957 	}
958 
959 	return 0;
960 }
961 
962 
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)963 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
964 			  eloop_timeout_handler handler, void *eloop_data,
965 			  void *user_data)
966 {
967 	struct os_reltime now, requested, remaining;
968 	struct eloop_timeout *tmp;
969 
970 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
971 		if (tmp->handler == handler &&
972 		    tmp->eloop_data == eloop_data &&
973 		    tmp->user_data == user_data) {
974 			requested.sec = req_secs;
975 			requested.usec = req_usecs;
976 			os_get_reltime(&now);
977 			os_reltime_sub(&tmp->time, &now, &remaining);
978 			if (os_reltime_before(&requested, &remaining)) {
979 				eloop_cancel_timeout(handler, eloop_data,
980 						     user_data);
981 				eloop_register_timeout(requested.sec,
982 						       requested.usec,
983 						       handler, eloop_data,
984 						       user_data);
985 				return 1;
986 			}
987 			return 0;
988 		}
989 	}
990 
991 	return -1;
992 }
993 
994 
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)995 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
996 			    eloop_timeout_handler handler, void *eloop_data,
997 			    void *user_data)
998 {
999 	struct os_reltime now, requested, remaining;
1000 	struct eloop_timeout *tmp;
1001 
1002 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
1003 		if (tmp->handler == handler &&
1004 		    tmp->eloop_data == eloop_data &&
1005 		    tmp->user_data == user_data) {
1006 			requested.sec = req_secs;
1007 			requested.usec = req_usecs;
1008 			os_get_reltime(&now);
1009 			os_reltime_sub(&tmp->time, &now, &remaining);
1010 			if (os_reltime_before(&remaining, &requested)) {
1011 				eloop_cancel_timeout(handler, eloop_data,
1012 						     user_data);
1013 				eloop_register_timeout(requested.sec,
1014 						       requested.usec,
1015 						       handler, eloop_data,
1016 						       user_data);
1017 				return 1;
1018 			}
1019 			return 0;
1020 		}
1021 	}
1022 
1023 	return -1;
1024 }
1025 
1026 
1027 #ifndef CONFIG_NATIVE_WINDOWS
eloop_handle_alarm(int sig)1028 static void eloop_handle_alarm(int sig)
1029 {
1030 	wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
1031 		   "two seconds. Looks like there\n"
1032 		   "is a bug that ends up in a busy loop that "
1033 		   "prevents clean shutdown.\n"
1034 		   "Killing program forcefully.\n");
1035 	exit(1);
1036 }
1037 #endif /* CONFIG_NATIVE_WINDOWS */
1038 
1039 
eloop_handle_signal(int sig)1040 static void eloop_handle_signal(int sig)
1041 {
1042 	int i;
1043 
1044 #ifndef CONFIG_NATIVE_WINDOWS
1045 	if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
1046 		/* Use SIGALRM to break out from potential busy loops that
1047 		 * would not allow the program to be killed. */
1048 		eloop.pending_terminate = 1;
1049 		signal(SIGALRM, eloop_handle_alarm);
1050 		alarm(2);
1051 	}
1052 #endif /* CONFIG_NATIVE_WINDOWS */
1053 
1054 	eloop.signaled++;
1055 	for (i = 0; i < eloop.signal_count; i++) {
1056 		if (eloop.signals[i].sig == sig) {
1057 			eloop.signals[i].signaled++;
1058 			break;
1059 		}
1060 	}
1061 	(void)eloop_wakeup();
1062 }
1063 
1064 
eloop_process_pending_signals(void)1065 static void eloop_process_pending_signals(void)
1066 {
1067 	int i;
1068 
1069 	if (eloop.signaled == 0)
1070 		return;
1071 	eloop.signaled = 0;
1072 
1073 	if (eloop.pending_terminate) {
1074 #ifndef CONFIG_NATIVE_WINDOWS
1075 		alarm(0);
1076 #endif /* CONFIG_NATIVE_WINDOWS */
1077 		eloop.pending_terminate = 0;
1078 	}
1079 
1080 	for (i = 0; i < eloop.signal_count; i++) {
1081 		if (eloop.signals[i].signaled) {
1082 			eloop.signals[i].signaled = 0;
1083 			eloop.signals[i].handler(eloop.signals[i].sig,
1084 						 eloop.signals[i].user_data);
1085 		}
1086 	}
1087 }
1088 
1089 
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1090 int eloop_register_signal(int sig, eloop_signal_handler handler,
1091 			  void *user_data)
1092 {
1093 	struct eloop_signal *tmp;
1094 
1095 	tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1096 			       sizeof(struct eloop_signal));
1097 	if (tmp == NULL)
1098 		return -1;
1099 
1100 	tmp[eloop.signal_count].sig = sig;
1101 	tmp[eloop.signal_count].user_data = user_data;
1102 	tmp[eloop.signal_count].handler = handler;
1103 	tmp[eloop.signal_count].signaled = 0;
1104 	eloop.signal_count++;
1105 	eloop.signals = tmp;
1106 	signal(sig, eloop_handle_signal);
1107 
1108 	return 0;
1109 }
1110 
1111 
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1112 int eloop_register_signal_terminate(eloop_signal_handler handler,
1113 				    void *user_data)
1114 {
1115 	int ret = eloop_register_signal(SIGINT, handler, user_data);
1116 	if (ret == 0)
1117 		ret = eloop_register_signal(SIGTERM, handler, user_data);
1118 	return ret;
1119 }
1120 
1121 
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1122 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1123 				   void *user_data)
1124 {
1125 #ifdef CONFIG_NATIVE_WINDOWS
1126 	return 0;
1127 #else /* CONFIG_NATIVE_WINDOWS */
1128 	return eloop_register_signal(SIGHUP, handler, user_data);
1129 #endif /* CONFIG_NATIVE_WINDOWS */
1130 }
1131 
1132 
eloop_run(void)1133 void eloop_run(void)
1134 {
1135 #ifdef CONFIG_ELOOP_POLL
1136 	int num_poll_fds;
1137 	int timeout_ms = 0;
1138 #endif /* CONFIG_ELOOP_POLL */
1139 #ifdef CONFIG_ELOOP_SELECT
1140 	fd_set *rfds, *wfds, *efds;
1141 	struct timeval _tv;
1142 #endif /* CONFIG_ELOOP_SELECT */
1143 #ifdef CONFIG_ELOOP_EPOLL
1144 	int timeout_ms = -1;
1145 #endif /* CONFIG_ELOOP_EPOLL */
1146 #ifdef CONFIG_ELOOP_KQUEUE
1147 	struct timespec ts;
1148 #endif /* CONFIG_ELOOP_KQUEUE */
1149 	int res;
1150 	struct os_reltime tv, now;
1151 
1152 #ifdef CONFIG_ELOOP_SELECT
1153 	rfds = os_malloc(sizeof(*rfds));
1154 	wfds = os_malloc(sizeof(*wfds));
1155 	efds = os_malloc(sizeof(*efds));
1156 	if (rfds == NULL || wfds == NULL || efds == NULL)
1157 		goto out;
1158 #endif /* CONFIG_ELOOP_SELECT */
1159 
1160 	while (!eloop.terminate &&
1161 	       (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1162 		eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1163 		struct eloop_timeout *timeout;
1164 
1165 		if (eloop.pending_terminate) {
1166 			/*
1167 			 * This may happen in some corner cases where a signal
1168 			 * is received during a blocking operation. We need to
1169 			 * process the pending signals and exit if requested to
1170 			 * avoid hitting the SIGALRM limit if the blocking
1171 			 * operation took more than two seconds.
1172 			 */
1173 			eloop_process_pending_signals();
1174 			if (eloop.terminate)
1175 				break;
1176 		}
1177 
1178 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1179 					list);
1180 		if (timeout) {
1181 			os_get_reltime(&now);
1182 			if (os_reltime_before(&now, &timeout->time))
1183 				os_reltime_sub(&timeout->time, &now, &tv);
1184 			else
1185 				tv.sec = tv.usec = 0;
1186 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1187 			timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1188 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1189 #ifdef CONFIG_ELOOP_SELECT
1190 			_tv.tv_sec = tv.sec;
1191 			_tv.tv_usec = tv.usec;
1192 #endif /* CONFIG_ELOOP_SELECT */
1193 #ifdef CONFIG_ELOOP_KQUEUE
1194 			ts.tv_sec = tv.sec;
1195 			ts.tv_nsec = tv.usec * 1000L;
1196 #endif /* CONFIG_ELOOP_KQUEUE */
1197 		}
1198 
1199 #ifdef CONFIG_ELOOP_POLL
1200 		num_poll_fds = eloop_sock_table_set_fds(
1201 			&eloop.readers, &eloop.writers, &eloop.exceptions,
1202 			eloop.pollfds, eloop.pollfds_map,
1203 			eloop.max_pollfd_map);
1204 		res = poll(eloop.pollfds, num_poll_fds,
1205 			   timeout ? timeout_ms : -1);
1206 #endif /* CONFIG_ELOOP_POLL */
1207 #ifdef CONFIG_ELOOP_SELECT
1208 		eloop_sock_table_set_fds(&eloop.readers, rfds);
1209 		eloop_sock_table_set_fds(&eloop.writers, wfds);
1210 		eloop_sock_table_set_fds(&eloop.exceptions, efds);
1211 		res = select(eloop.max_sock + 1, rfds, wfds, efds,
1212 			     timeout ? &_tv : NULL);
1213 #endif /* CONFIG_ELOOP_SELECT */
1214 #ifdef CONFIG_ELOOP_EPOLL
1215 		if (eloop.count == 0) {
1216 			res = 0;
1217 		} else {
1218 			res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1219 					 eloop.count, timeout_ms);
1220 		}
1221 #endif /* CONFIG_ELOOP_EPOLL */
1222 #ifdef CONFIG_ELOOP_KQUEUE
1223 		if (eloop.count == 0) {
1224 			res = 0;
1225 		} else {
1226 			res = kevent(eloop.kqueuefd, NULL, 0,
1227 				     eloop.kqueue_events, eloop.kqueue_nevents,
1228 				     timeout ? &ts : NULL);
1229 		}
1230 #endif /* CONFIG_ELOOP_KQUEUE */
1231 		if (res < 0 && errno != EINTR && errno != 0) {
1232 			wpa_printf(MSG_ERROR, "eloop: %s: %s",
1233 #ifdef CONFIG_ELOOP_POLL
1234 				   "poll"
1235 #endif /* CONFIG_ELOOP_POLL */
1236 #ifdef CONFIG_ELOOP_SELECT
1237 				   "select"
1238 #endif /* CONFIG_ELOOP_SELECT */
1239 #ifdef CONFIG_ELOOP_EPOLL
1240 				   "epoll"
1241 #endif /* CONFIG_ELOOP_EPOLL */
1242 #ifdef CONFIG_ELOOP_KQUEUE
1243 				   "kqueue"
1244 #endif /* CONFIG_ELOOP_EKQUEUE */
1245 
1246 				   , strerror(errno));
1247 			goto out;
1248 		}
1249 
1250 		eloop.readers.changed = 0;
1251 		eloop.writers.changed = 0;
1252 		eloop.exceptions.changed = 0;
1253 
1254 		eloop_process_pending_signals();
1255 
1256 
1257 		/* check if some registered timeouts have occurred */
1258 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1259 					list);
1260 		if (timeout) {
1261 			os_get_reltime(&now);
1262 			if (!os_reltime_before(&now, &timeout->time)) {
1263 				void *eloop_data = timeout->eloop_data;
1264 				void *user_data = timeout->user_data;
1265 				eloop_timeout_handler handler =
1266 					timeout->handler;
1267 				eloop_remove_timeout(timeout);
1268 				handler(eloop_data, user_data);
1269 			}
1270 
1271 		}
1272 
1273 		if (res <= 0)
1274 			continue;
1275 
1276 		if (eloop.readers.changed ||
1277 		    eloop.writers.changed ||
1278 		    eloop.exceptions.changed) {
1279 			 /*
1280 			  * Sockets may have been closed and reopened with the
1281 			  * same FD in the signal or timeout handlers, so we
1282 			  * must skip the previous results and check again
1283 			  * whether any of the currently registered sockets have
1284 			  * events.
1285 			  */
1286 			continue;
1287 		}
1288 
1289 #ifdef CONFIG_ELOOP_POLL
1290 		eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1291 					  &eloop.exceptions, eloop.pollfds_map,
1292 					  eloop.max_pollfd_map);
1293 #endif /* CONFIG_ELOOP_POLL */
1294 #ifdef CONFIG_ELOOP_SELECT
1295 		eloop_sock_table_dispatch(&eloop.readers, rfds);
1296 		eloop_sock_table_dispatch(&eloop.writers, wfds);
1297 		eloop_sock_table_dispatch(&eloop.exceptions, efds);
1298 #endif /* CONFIG_ELOOP_SELECT */
1299 #ifdef CONFIG_ELOOP_EPOLL
1300 		eloop_sock_table_dispatch(eloop.epoll_events, res);
1301 #endif /* CONFIG_ELOOP_EPOLL */
1302 #ifdef CONFIG_ELOOP_KQUEUE
1303 		eloop_sock_table_dispatch(eloop.kqueue_events, res);
1304 #endif /* CONFIG_ELOOP_KQUEUE */
1305 	}
1306 
1307 	eloop.terminate = 0;
1308 out:
1309 #ifdef CONFIG_ELOOP_SELECT
1310 	os_free(rfds);
1311 	os_free(wfds);
1312 	os_free(efds);
1313 #endif /* CONFIG_ELOOP_SELECT */
1314 	return;
1315 }
1316 
1317 
eloop_terminate(void)1318 void eloop_terminate(void)
1319 {
1320 	eloop.terminate = 1;
1321 }
1322 
1323 
eloop_destroy(void)1324 void eloop_destroy(void)
1325 {
1326 	struct eloop_timeout *timeout, *prev;
1327 	struct os_reltime now;
1328 
1329 	os_get_reltime(&now);
1330 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1331 			      struct eloop_timeout, list) {
1332 		int sec, usec;
1333 		sec = timeout->time.sec - now.sec;
1334 		usec = timeout->time.usec - now.usec;
1335 		if (timeout->time.usec < now.usec) {
1336 			sec--;
1337 			usec += 1000000;
1338 		}
1339 		wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1340 			   "eloop_data=%p user_data=%p handler=%p",
1341 			   sec, usec, timeout->eloop_data, timeout->user_data,
1342 			   timeout->handler);
1343 		wpa_trace_dump_funcname("eloop unregistered timeout handler",
1344 					timeout->handler);
1345 		wpa_trace_dump("eloop timeout", timeout);
1346 		eloop_remove_timeout(timeout);
1347 	}
1348 	eloop_ctrl_deinit();
1349 	eloop_sock_table_destroy(&eloop.readers);
1350 	eloop_sock_table_destroy(&eloop.writers);
1351 	eloop_sock_table_destroy(&eloop.exceptions);
1352 	os_free(eloop.signals);
1353 
1354 #ifdef CONFIG_ELOOP_POLL
1355 	os_free(eloop.pollfds);
1356 	os_free(eloop.pollfds_map);
1357 #endif /* CONFIG_ELOOP_POLL */
1358 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1359 	os_free(eloop.fd_table);
1360 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1361 #ifdef CONFIG_ELOOP_EPOLL
1362 	os_free(eloop.epoll_events);
1363 	close(eloop.epollfd);
1364 #endif /* CONFIG_ELOOP_EPOLL */
1365 #ifdef CONFIG_ELOOP_KQUEUE
1366 	os_free(eloop.kqueue_events);
1367 	close(eloop.kqueuefd);
1368 #endif /* CONFIG_ELOOP_KQUEUE */
1369 }
1370 
1371 
eloop_terminated(void)1372 int eloop_terminated(void)
1373 {
1374 	return eloop.terminate || eloop.pending_terminate;
1375 }
1376 
1377 
eloop_wait_for_read_sock(int sock)1378 void eloop_wait_for_read_sock(int sock)
1379 {
1380 #ifdef CONFIG_ELOOP_POLL
1381 	struct pollfd pfd;
1382 
1383 	if (sock < 0)
1384 		return;
1385 
1386 	os_memset(&pfd, 0, sizeof(pfd));
1387 	pfd.fd = sock;
1388 	pfd.events = POLLIN;
1389 
1390 	poll(&pfd, 1, -1);
1391 #endif /* CONFIG_ELOOP_POLL */
1392 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1393 	/*
1394 	 * We can use epoll() here. But epoll() requres 4 system calls.
1395 	 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1396 	 * epoll fd. So select() is better for performance here.
1397 	 */
1398 	fd_set rfds;
1399 
1400 	if (sock < 0)
1401 		return;
1402 
1403 	FD_ZERO(&rfds);
1404 	FD_SET(sock, &rfds);
1405 	select(sock + 1, &rfds, NULL, NULL, NULL);
1406 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1407 #ifdef CONFIG_ELOOP_KQUEUE
1408 	int kfd;
1409 	struct kevent ke1, ke2;
1410 
1411 	kfd = kqueue();
1412 	if (kfd == -1)
1413 		return;
1414 	EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1415 	kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1416 	close(kfd);
1417 #endif /* CONFIG_ELOOP_KQUEUE */
1418 }
1419 
1420 #ifdef CONFIG_ELOOP_SELECT
1421 #undef CONFIG_ELOOP_SELECT
1422 #endif /* CONFIG_ELOOP_SELECT */
1423