1 /*
2 * dhcpcd - DHCP client daemon
3 * Copyright (c) 2006-2015 Roy Marples <roy@marples.name>
4 * All rights reserved
5
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/time.h>
29
30 #include <errno.h>
31 #include <limits.h>
32 #include <signal.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36
37 #include "config.h"
38 #include "common.h"
39 #include "dhcpcd.h"
40 #include "eloop.h"
41
42 #if defined(HAVE_KQUEUE)
43 #include <sys/event.h>
44 #include <fcntl.h>
45 #ifdef __NetBSD__
46 /* udata is void * except on NetBSD
47 * lengths are int except on NetBSD */
48 #define UPTR(x) ((intptr_t)(x))
49 #define LENC(x) (x)
50 #else
51 #define UPTR(x) (x)
52 #define LENC(x) ((int)(x))
53 #endif
54 #define eloop_event_setup_fds(ctx)
55 #elif defined(HAVE_EPOLL)
56 #include <sys/epoll.h>
57 #define eloop_event_setup_fds(ctx)
58 #else
59 #include <poll.h>
60 static void
eloop_event_setup_fds(struct eloop_ctx * ctx)61 eloop_event_setup_fds(struct eloop_ctx *ctx)
62 {
63 struct eloop_event *e;
64 size_t i;
65
66 i = 0;
67 TAILQ_FOREACH(e, &ctx->events, next) {
68 ctx->fds[i].fd = e->fd;
69 ctx->fds[i].events = 0;
70 if (e->read_cb)
71 ctx->fds[i].events |= POLLIN;
72 if (e->write_cb)
73 ctx->fds[i].events |= POLLOUT;
74 ctx->fds[i].revents = 0;
75 e->pollfd = &ctx->fds[i];
76 i++;
77 }
78 }
79 #endif
80
81 int
eloop_event_add(struct eloop_ctx * ctx,int fd,void (* read_cb)(void *),void * read_cb_arg,void (* write_cb)(void *),void * write_cb_arg)82 eloop_event_add(struct eloop_ctx *ctx, int fd,
83 void (*read_cb)(void *), void *read_cb_arg,
84 void (*write_cb)(void *), void *write_cb_arg)
85 {
86 struct eloop_event *e;
87 #if defined(HAVE_KQUEUE)
88 struct kevent ke[2];
89 #elif defined(HAVE_EPOLL)
90 struct epoll_event epe;
91 #else
92 struct pollfd *nfds;
93 #endif
94
95 #ifdef HAVE_EPOLL
96 memset(&epe, 0, sizeof(epe));
97 epe.data.fd = fd;
98 epe.events = EPOLLIN;
99 if (write_cb)
100 epe.events |= EPOLLOUT;
101 #endif
102
103 /* We should only have one callback monitoring the fd */
104 TAILQ_FOREACH(e, &ctx->events, next) {
105 if (e->fd == fd) {
106 int error;
107
108 #if defined(HAVE_KQUEUE)
109 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
110 0, 0, UPTR(e));
111 if (write_cb)
112 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
113 EV_ADD, 0, 0, UPTR(e));
114 else if (e->write_cb)
115 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
116 EV_DELETE, 0, 0, UPTR(e));
117 error = kevent(ctx->poll_fd, ke,
118 e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
119 #elif defined(HAVE_EPOLL)
120 epe.data.ptr = e;
121 error = epoll_ctl(ctx->poll_fd, EPOLL_CTL_MOD,
122 fd, &epe);
123 #else
124 error = 0;
125 #endif
126 if (read_cb) {
127 e->read_cb = read_cb;
128 e->read_cb_arg = read_cb_arg;
129 }
130 if (write_cb) {
131 e->write_cb = write_cb;
132 e->write_cb_arg = write_cb_arg;
133 }
134 eloop_event_setup_fds(ctx);
135 return error;
136 }
137 }
138
139 /* Allocate a new event if no free ones already allocated */
140 if ((e = TAILQ_FIRST(&ctx->free_events))) {
141 TAILQ_REMOVE(&ctx->free_events, e, next);
142 } else {
143 e = malloc(sizeof(*e));
144 if (e == NULL)
145 goto err;
146 }
147
148 /* Ensure we can actually listen to it */
149 ctx->events_len++;
150 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL)
151 if (ctx->events_len > ctx->fds_len) {
152 nfds = realloc(ctx->fds, sizeof(*ctx->fds) * (ctx->fds_len+5));
153 if (nfds == NULL)
154 goto err;
155 ctx->fds_len += 5;
156 ctx->fds = nfds;
157 }
158 #endif
159
160 /* Now populate the structure and add it to the list */
161 e->fd = fd;
162 e->read_cb = read_cb;
163 e->read_cb_arg = read_cb_arg;
164 e->write_cb = write_cb;
165 e->write_cb_arg = write_cb_arg;
166
167 #if defined(HAVE_KQUEUE)
168 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD, 0, 0, UPTR(e));
169 if (write_cb)
170 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
171 EV_ADD, 0, 0, UPTR(e));
172 if (kevent(ctx->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
173 goto err;
174 #elif defined(HAVE_EPOLL)
175 epe.data.ptr = e;
176 if (epoll_ctl(ctx->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
177 goto err;
178 #endif
179
180 /* The order of events should not matter.
181 * However, some PPP servers love to close the link right after
182 * sending their final message. So to ensure dhcpcd processes this
183 * message (which is likely to be that the DHCP addresses are wrong)
184 * we insert new events at the queue head as the link fd will be
185 * the first event added. */
186 TAILQ_INSERT_HEAD(&ctx->events, e, next);
187 eloop_event_setup_fds(ctx);
188 return 0;
189
190 err:
191 logger(ctx->ctx, LOG_ERR, "%s: %m", __func__);
192 if (e) {
193 ctx->events_len--;
194 TAILQ_INSERT_TAIL(&ctx->free_events, e, next);
195 }
196 return -1;
197 }
198
199 void
eloop_event_delete(struct eloop_ctx * ctx,int fd,int write_only)200 eloop_event_delete(struct eloop_ctx *ctx, int fd, int write_only)
201 {
202 struct eloop_event *e;
203 #if defined(HAVE_KQUEUE)
204 struct kevent ke[2];
205 #elif defined(HAVE_EPOLL)
206 struct epoll_event epe;
207 #endif
208
209 TAILQ_FOREACH(e, &ctx->events, next) {
210 if (e->fd == fd) {
211 if (write_only) {
212 if (e->write_cb) {
213 e->write_cb = NULL;
214 e->write_cb_arg = NULL;
215 #if defined(HAVE_KQUEUE)
216 EV_SET(&ke[0], (uintptr_t)fd,
217 EVFILT_WRITE, EV_DELETE,
218 0, 0, UPTR(NULL));
219 kevent(ctx->poll_fd, ke, 1, NULL, 0,
220 NULL);
221 #elif defined(HAVE_EPOLL)
222 memset(&epe, 0, sizeof(epe));
223 epe.data.fd = e->fd;
224 epe.data.ptr = e;
225 epe.events = EPOLLIN;
226 epoll_ctl(ctx->poll_fd, EPOLL_CTL_MOD,
227 fd, &epe);
228 #endif
229 }
230
231 } else {
232 TAILQ_REMOVE(&ctx->events, e, next);
233 #if defined(HAVE_KQUEUE)
234 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
235 EV_DELETE, 0, 0, UPTR(NULL));
236 if (e->write_cb)
237 EV_SET(&ke[1], (uintptr_t)fd,
238 EVFILT_WRITE, EV_DELETE,
239 0, 0, UPTR(NULL));
240 kevent(ctx->poll_fd, ke, e->write_cb ? 2 : 1,
241 NULL, 0, NULL);
242 #elif defined(HAVE_EPOLL)
243 /* NULL event is safe because we
244 * rely on epoll_pwait which as added
245 * after the delete without event was fixed. */
246 epoll_ctl(ctx->poll_fd, EPOLL_CTL_DEL,
247 fd, NULL);
248 #endif
249 TAILQ_INSERT_TAIL(&ctx->free_events, e, next);
250 ctx->events_len--;
251 }
252 eloop_event_setup_fds(ctx);
253 break;
254 }
255 }
256 }
257
258 int
eloop_q_timeout_add_tv(struct eloop_ctx * ctx,int queue,const struct timespec * when,void (* callback)(void *),void * arg)259 eloop_q_timeout_add_tv(struct eloop_ctx *ctx, int queue,
260 const struct timespec *when, void (*callback)(void *), void *arg)
261 {
262 struct timespec now, w;
263 struct eloop_timeout *t, *tt = NULL;
264
265 get_monotonic(&now);
266 timespecadd(&now, when, &w);
267 /* Check for time_t overflow. */
268 if (timespeccmp(&w, &now, <)) {
269 errno = ERANGE;
270 return -1;
271 }
272
273 /* Remove existing timeout if present */
274 TAILQ_FOREACH(t, &ctx->timeouts, next) {
275 if (t->callback == callback && t->arg == arg) {
276 TAILQ_REMOVE(&ctx->timeouts, t, next);
277 break;
278 }
279 }
280
281 if (t == NULL) {
282 /* No existing, so allocate or grab one from the free pool */
283 if ((t = TAILQ_FIRST(&ctx->free_timeouts))) {
284 TAILQ_REMOVE(&ctx->free_timeouts, t, next);
285 } else {
286 t = malloc(sizeof(*t));
287 if (t == NULL) {
288 logger(ctx->ctx, LOG_ERR, "%s: %m", __func__);
289 return -1;
290 }
291 }
292 }
293
294 t->when = w;
295 t->callback = callback;
296 t->arg = arg;
297 t->queue = queue;
298
299 /* The timeout list should be in chronological order,
300 * soonest first. */
301 TAILQ_FOREACH(tt, &ctx->timeouts, next) {
302 if (timespeccmp(&t->when, &tt->when, <)) {
303 TAILQ_INSERT_BEFORE(tt, t, next);
304 return 0;
305 }
306 }
307 TAILQ_INSERT_TAIL(&ctx->timeouts, t, next);
308 return 0;
309 }
310
311 int
eloop_q_timeout_add_sec(struct eloop_ctx * ctx,int queue,time_t when,void (* callback)(void *),void * arg)312 eloop_q_timeout_add_sec(struct eloop_ctx *ctx, int queue, time_t when,
313 void (*callback)(void *), void *arg)
314 {
315 struct timespec tv;
316
317 tv.tv_sec = when;
318 tv.tv_nsec = 0;
319 return eloop_q_timeout_add_tv(ctx, queue, &tv, callback, arg);
320 }
321
322 #if !defined(HAVE_KQUEUE)
323 int
eloop_timeout_add_now(struct eloop_ctx * ctx,void (* callback)(void *),void * arg)324 eloop_timeout_add_now(struct eloop_ctx *ctx,
325 void (*callback)(void *), void *arg)
326 {
327
328 if (ctx->timeout0 != NULL) {
329 logger(ctx->ctx, LOG_WARNING,
330 "%s: timeout0 already set", __func__);
331 return eloop_q_timeout_add_sec(ctx, 0, 0, callback, arg);
332 }
333
334 ctx->timeout0 = callback;
335 ctx->timeout0_arg = arg;
336 return 0;
337 }
338 #endif
339
340 void
eloop_q_timeout_delete(struct eloop_ctx * ctx,int queue,void (* callback)(void *),void * arg)341 eloop_q_timeout_delete(struct eloop_ctx *ctx, int queue,
342 void (*callback)(void *), void *arg)
343 {
344 struct eloop_timeout *t, *tt;
345
346 TAILQ_FOREACH_SAFE(t, &ctx->timeouts, next, tt) {
347 if ((queue == 0 || t->queue == queue) &&
348 t->arg == arg &&
349 (!callback || t->callback == callback))
350 {
351 TAILQ_REMOVE(&ctx->timeouts, t, next);
352 TAILQ_INSERT_TAIL(&ctx->free_timeouts, t, next);
353 }
354 }
355 }
356
357 void
eloop_exit(struct eloop_ctx * ctx,int code)358 eloop_exit(struct eloop_ctx *ctx, int code)
359 {
360
361 ctx->exitcode = code;
362 ctx->exitnow = 1;
363 }
364
365 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
366 static int
eloop_open(struct eloop_ctx * ctx)367 eloop_open(struct eloop_ctx *ctx)
368 {
369 #if defined(HAVE_KQUEUE1)
370 return (ctx->poll_fd = kqueue1(O_CLOEXEC));
371 #elif defined(HAVE_KQUEUE)
372 int i;
373
374 if ((ctx->poll_fd = kqueue()) == -1)
375 return -1;
376 if ((i = fcntl(ctx->poll_fd, F_GETFD, 0)) == -1 ||
377 fcntl(ctx->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
378 {
379 close(ctx->poll_fd);
380 ctx->poll_fd = -1;
381 return -1;
382 }
383
384 return ctx->poll_fd;
385 #elif defined (HAVE_EPOLL)
386 return (ctx->poll_fd = epoll_create1(EPOLL_CLOEXEC));
387 #endif
388 }
389
390 int
eloop_requeue(struct eloop_ctx * ctx)391 eloop_requeue(struct eloop_ctx *ctx)
392 {
393 struct eloop_event *e;
394 int error;
395 #if defined(HAVE_KQUEUE)
396 size_t i;
397 struct kevent *ke;
398 #elif defined(HAVE_EPOLL)
399 struct epoll_event epe;
400 #endif
401
402 if (ctx->poll_fd != -1)
403 close(ctx->poll_fd);
404 if (eloop_open(ctx) == -1)
405 return -1;
406 #if defined (HAVE_KQUEUE)
407 i = 0;
408 while (dhcpcd_handlesigs[i])
409 i++;
410 TAILQ_FOREACH(e, &ctx->events, next) {
411 i++;
412 if (e->write_cb)
413 i++;
414 }
415
416 if ((ke = malloc(sizeof(*ke) * i)) == NULL)
417 return -1;
418
419 for (i = 0; dhcpcd_handlesigs[i]; i++)
420 EV_SET(&ke[i], (uintptr_t)dhcpcd_handlesigs[i],
421 EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
422
423 TAILQ_FOREACH(e, &ctx->events, next) {
424 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
425 EV_ADD, 0, 0, UPTR(e));
426 i++;
427 if (e->write_cb) {
428 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
429 EV_ADD, 0, 0, UPTR(e));
430 i++;
431 }
432 }
433
434 error = kevent(ctx->poll_fd, ke, LENC(i), NULL, 0, NULL);
435 free(ke);
436
437 #elif defined(HAVE_EPOLL)
438
439 error = 0;
440 TAILQ_FOREACH(e, &ctx->events, next) {
441 memset(&epe, 0, sizeof(epe));
442 epe.data.fd = e->fd;
443 epe.events = EPOLLIN;
444 if (e->write_cb)
445 epe.events |= EPOLLOUT;
446 epe.data.ptr = e;
447 if (epoll_ctl(ctx->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
448 error = -1;
449 }
450 #endif
451
452 return error;
453 }
454 #endif
455
456 struct eloop_ctx *
eloop_init(struct dhcpcd_ctx * dctx)457 eloop_init(struct dhcpcd_ctx *dctx)
458 {
459 struct eloop_ctx *ctx;
460 struct timespec now;
461
462 /* Check we have a working monotonic clock. */
463 if (get_monotonic(&now) == -1)
464 return NULL;
465
466 ctx = calloc(1, sizeof(*ctx));
467 if (ctx) {
468 ctx->ctx = dctx;
469 TAILQ_INIT(&ctx->events);
470 TAILQ_INIT(&ctx->free_events);
471 TAILQ_INIT(&ctx->timeouts);
472 TAILQ_INIT(&ctx->free_timeouts);
473 ctx->exitcode = EXIT_FAILURE;
474 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
475 ctx->poll_fd = -1;
476 #endif
477 if (eloop_requeue(ctx) == -1) {
478 free(ctx);
479 return NULL;
480 }
481 }
482
483 return ctx;
484 }
485
eloop_free(struct eloop_ctx * ctx)486 void eloop_free(struct eloop_ctx *ctx)
487 {
488 struct eloop_event *e;
489 struct eloop_timeout *t;
490
491 if (ctx == NULL)
492 return;
493
494 while ((e = TAILQ_FIRST(&ctx->events))) {
495 TAILQ_REMOVE(&ctx->events, e, next);
496 free(e);
497 }
498 while ((e = TAILQ_FIRST(&ctx->free_events))) {
499 TAILQ_REMOVE(&ctx->free_events, e, next);
500 free(e);
501 }
502 while ((t = TAILQ_FIRST(&ctx->timeouts))) {
503 TAILQ_REMOVE(&ctx->timeouts, t, next);
504 free(t);
505 }
506 while ((t = TAILQ_FIRST(&ctx->free_timeouts))) {
507 TAILQ_REMOVE(&ctx->free_timeouts, t, next);
508 free(t);
509 }
510 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
511 close(ctx->poll_fd);
512 #else
513 free(ctx->fds);
514 #endif
515 free(ctx);
516 }
517
518 int
eloop_start(struct eloop_ctx * ctx)519 eloop_start(struct eloop_ctx *ctx)
520 {
521 int n;
522 struct eloop_event *e;
523 struct eloop_timeout *t;
524 struct timespec now, ts, *tsp;
525 void (*t0)(void *);
526 #if defined(HAVE_EPOLL) || !defined(USE_SIGNALS)
527 int timeout;
528 #endif
529 #if defined(HAVE_KQUEUE)
530 struct kevent ke;
531 #elif defined(HAVE_EPOLL)
532 struct epoll_event epe;
533 #endif
534
535 for (;;) {
536 if (ctx->exitnow)
537 break;
538
539 /* Run all timeouts first */
540 if (ctx->timeout0) {
541 t0 = ctx->timeout0;
542 ctx->timeout0 = NULL;
543 t0(ctx->timeout0_arg);
544 continue;
545 }
546 if ((t = TAILQ_FIRST(&ctx->timeouts))) {
547 get_monotonic(&now);
548 if (timespeccmp(&now, &t->when, >)) {
549 TAILQ_REMOVE(&ctx->timeouts, t, next);
550 t->callback(t->arg);
551 TAILQ_INSERT_TAIL(&ctx->free_timeouts, t, next);
552 continue;
553 }
554 timespecsub(&t->when, &now, &ts);
555 tsp = &ts;
556 } else
557 /* No timeouts, so wait forever */
558 tsp = NULL;
559
560 if (tsp == NULL && ctx->events_len == 0) {
561 logger(ctx->ctx, LOG_ERR, "nothing to do");
562 break;
563 }
564
565 #if defined(HAVE_EPOLL) || !defined(USE_SIGNALS)
566 if (tsp == NULL)
567 timeout = -1;
568 else if (tsp->tv_sec > INT_MAX / 1000 ||
569 (tsp->tv_sec == INT_MAX / 1000 &&
570 (tsp->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))
571 timeout = INT_MAX;
572 else
573 timeout = (int)(tsp->tv_sec * 1000 +
574 (tsp->tv_nsec + 999999) / 1000000);
575 #endif
576
577 #if defined(HAVE_KQUEUE)
578 n = kevent(ctx->poll_fd, NULL, 0, &ke, 1, tsp);
579 #elif defined(HAVE_EPOLL)
580 #ifdef USE_SIGNALS
581 n = epoll_pwait(ctx->poll_fd, &epe, 1, timeout,
582 &ctx->ctx->sigset);
583 #else
584 n = epoll_wait(ctx->poll_fd, &epe, 1, timeout);
585 #endif
586 #else
587 #ifdef USE_SIGNALS
588 n = pollts(ctx->fds, (nfds_t)ctx->events_len, tsp,
589 &ctx->ctx->sigset);
590 #else
591 n = poll(ctx->fds, (nfds_t)ctx->events_len, timeout);
592 #endif
593 #endif
594 if (n == -1) {
595 if (errno == EINTR)
596 continue;
597 logger(ctx->ctx, LOG_ERR, "poll: %m");
598 break;
599 }
600
601 /* Process any triggered events.
602 * We go back to the start after calling each callback incase
603 * the current event or next event is removed. */
604 #if defined(HAVE_KQUEUE)
605 if (n) {
606 if (ke.filter == EVFILT_SIGNAL) {
607 struct dhcpcd_siginfo si;
608
609 si.signo = (int)ke.ident;
610 dhcpcd_handle_signal(&si);
611 continue;
612 }
613 e = (struct eloop_event *)ke.udata;
614 if (ke.filter == EVFILT_WRITE) {
615 e->write_cb(e->write_cb_arg);
616 continue;
617 } else if (ke.filter == EVFILT_READ) {
618 e->read_cb(e->read_cb_arg);
619 continue;
620 }
621 }
622 #elif defined(HAVE_EPOLL)
623 if (n) {
624 e = (struct eloop_event *)epe.data.ptr;
625 if (epe.events & EPOLLOUT && e->write_cb) {
626 e->write_cb(e->write_cb_arg);
627 continue;
628 }
629 if (epe.events &
630 (EPOLLIN | EPOLLERR | EPOLLHUP))
631 {
632 e->read_cb(e->read_cb_arg);
633 continue;
634 }
635 }
636 #else
637 if (n > 0) {
638 TAILQ_FOREACH(e, &ctx->events, next) {
639 if (e->pollfd->revents & POLLOUT &&
640 e->write_cb)
641 {
642 e->write_cb(e->write_cb_arg);
643 break;
644 }
645 if (e->pollfd->revents) {
646 e->read_cb(e->read_cb_arg);
647 break;
648 }
649 }
650 }
651 #endif
652 }
653
654 return ctx->exitcode;
655 }
656