1 /*
2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26 #include "event2/event-config.h"
27 #include "evconfig-private.h"
28
29 #ifdef _WIN32
30 #include <winsock2.h>
31 #define WIN32_LEAN_AND_MEAN
32 #include <windows.h>
33 #undef WIN32_LEAN_AND_MEAN
34 #endif
35 #include <sys/types.h>
36 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
37 #include <sys/time.h>
38 #endif
39 #include <sys/queue.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #ifndef _WIN32
43 #include <unistd.h>
44 #endif
45 #include <errno.h>
46 #include <limits.h>
47 #include <signal.h>
48 #include <string.h>
49 #include <time.h>
50
51 #include "event-internal.h"
52 #include "evmap-internal.h"
53 #include "mm-internal.h"
54 #include "changelist-internal.h"
55
56 /** An entry for an evmap_io list: notes all the events that want to read or
57 write on a given fd, and the number of each.
58 */
59 struct evmap_io {
60 struct event_dlist events;
61 ev_uint16_t nread;
62 ev_uint16_t nwrite;
63 ev_uint16_t nclose;
64 };
65
66 /* An entry for an evmap_signal list: notes all the events that want to know
67 when a signal triggers. */
68 struct evmap_signal {
69 struct event_dlist events;
70 };
71
72 /* On some platforms, fds start at 0 and increment by 1 as they are
73 allocated, and old numbers get used. For these platforms, we
74 implement io maps just like signal maps: as an array of pointers to
75 struct evmap_io. But on other platforms (windows), sockets are not
76 0-indexed, not necessarily consecutive, and not necessarily reused.
77 There, we use a hashtable to implement evmap_io.
78 */
79 #ifdef EVMAP_USE_HT
80 struct event_map_entry {
81 HT_ENTRY(event_map_entry) map_node;
82 evutil_socket_t fd;
83 union { /* This is a union in case we need to make more things that can
84 be in the hashtable. */
85 struct evmap_io evmap_io;
86 } ent;
87 };
88
89 /* Helper used by the event_io_map hashtable code; tries to return a good hash
90 * of the fd in e->fd. */
91 static inline unsigned
hashsocket(struct event_map_entry * e)92 hashsocket(struct event_map_entry *e)
93 {
94 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 * matter. Our hashtable implementation really likes low-order bits,
96 * though, so let's do the rotate-and-add trick. */
97 unsigned h = (unsigned) e->fd;
98 h += (h >> 2) | (h << 30);
99 return h;
100 }
101
102 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103 * have the same e->fd. */
104 static inline int
eqsocket(struct event_map_entry * e1,struct event_map_entry * e2)105 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
106 {
107 return e1->fd == e2->fd;
108 }
109
HT_PROTOTYPE(event_io_map,event_map_entry,map_node,hashsocket,eqsocket)110 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112 0.5, mm_malloc, mm_realloc, mm_free)
113
114 #define GET_IO_SLOT(x, map, slot, type) \
115 do { \
116 struct event_map_entry key_, *ent_; \
117 key_.fd = slot; \
118 ent_ = HT_FIND(event_io_map, map, &key_); \
119 (x) = ent_ ? &ent_->ent.type : NULL; \
120 } while (0);
121
122 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
123 do { \
124 struct event_map_entry key_, *ent_; \
125 key_.fd = slot; \
126 HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
127 event_map_entry, &key_, ptr, \
128 { \
129 ent_ = *ptr; \
130 }, \
131 { \
132 ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 if (EVUTIL_UNLIKELY(ent_ == NULL)) \
134 return (-1); \
135 ent_->fd = slot; \
136 (ctor)(&ent_->ent.type); \
137 HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
138 }); \
139 (x) = &ent_->ent.type; \
140 } while (0)
141
142 void evmap_io_initmap_(struct event_io_map *ctx)
143 {
144 HT_INIT(event_io_map, ctx);
145 }
146
evmap_io_clear_(struct event_io_map * ctx)147 void evmap_io_clear_(struct event_io_map *ctx)
148 {
149 struct event_map_entry **ent, **next, *this;
150 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
151 this = *ent;
152 next = HT_NEXT_RMV(event_io_map, ctx, ent);
153 mm_free(this);
154 }
155 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
156 }
157 #endif
158
159 /* Set the variable 'x' to the field in event_map 'map' with fields of type
160 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
161 if there are no entries for 'slot'. Does no bounds-checking. */
162 #define GET_SIGNAL_SLOT(x, map, slot, type) \
163 (x) = (struct type *)((map)->entries[slot])
164 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165 by allocating enough memory for a 'struct type', and initializing the new
166 value by calling the function 'ctor' on it. Makes the function
167 return -1 on allocation failure.
168 */
169 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
170 do { \
171 if ((map)->entries[slot] == NULL) { \
172 (map)->entries[slot] = \
173 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
175 return (-1); \
176 (ctor)((struct type *)(map)->entries[slot]); \
177 } \
178 (x) = (struct type *)((map)->entries[slot]); \
179 } while (0)
180
181 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
182 as thin aliases over the SIGNAL_SLOT versions. */
183 #ifndef EVMAP_USE_HT
184 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
186 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187 #define FDINFO_OFFSET sizeof(struct evmap_io)
188 void
evmap_io_initmap_(struct event_io_map * ctx)189 evmap_io_initmap_(struct event_io_map* ctx)
190 {
191 evmap_signal_initmap_(ctx);
192 }
193 void
evmap_io_clear_(struct event_io_map * ctx)194 evmap_io_clear_(struct event_io_map* ctx)
195 {
196 evmap_signal_clear_(ctx);
197 }
198 #endif
199
200
201 /** Expand 'map' with new entries of width 'msize' until it is big enough
202 to store a value in 'slot'.
203 */
204 static int
evmap_make_space(struct event_signal_map * map,int slot,int msize)205 evmap_make_space(struct event_signal_map *map, int slot, int msize)
206 {
207 if (map->nentries <= slot) {
208 int nentries = map->nentries ? map->nentries : 32;
209 void **tmp;
210
211 if (slot > INT_MAX / 2)
212 return (-1);
213
214 while (nentries <= slot)
215 nentries <<= 1;
216
217 if (nentries > INT_MAX / msize)
218 return (-1);
219
220 tmp = (void **)mm_realloc(map->entries, nentries * msize);
221 if (tmp == NULL)
222 return (-1);
223
224 memset(&tmp[map->nentries], 0,
225 (nentries - map->nentries) * msize);
226
227 map->nentries = nentries;
228 map->entries = tmp;
229 }
230
231 return (0);
232 }
233
234 void
evmap_signal_initmap_(struct event_signal_map * ctx)235 evmap_signal_initmap_(struct event_signal_map *ctx)
236 {
237 ctx->nentries = 0;
238 ctx->entries = NULL;
239 }
240
241 void
evmap_signal_clear_(struct event_signal_map * ctx)242 evmap_signal_clear_(struct event_signal_map *ctx)
243 {
244 if (ctx->entries != NULL) {
245 int i;
246 for (i = 0; i < ctx->nentries; ++i) {
247 if (ctx->entries[i] != NULL)
248 mm_free(ctx->entries[i]);
249 }
250 mm_free(ctx->entries);
251 ctx->entries = NULL;
252 }
253 ctx->nentries = 0;
254 }
255
256
257 /* code specific to file descriptors */
258
259 /** Constructor for struct evmap_io */
260 static void
evmap_io_init(struct evmap_io * entry)261 evmap_io_init(struct evmap_io *entry)
262 {
263 LIST_INIT(&entry->events);
264 entry->nread = 0;
265 entry->nwrite = 0;
266 entry->nclose = 0;
267 }
268
269
270 /* return -1 on error, 0 on success if nothing changed in the event backend,
271 * and 1 on success if something did. */
272 int
evmap_io_add_(struct event_base * base,evutil_socket_t fd,struct event * ev)273 evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
274 {
275 const struct eventop *evsel = base->evsel;
276 struct event_io_map *io = &base->io;
277 struct evmap_io *ctx = NULL;
278 int nread, nwrite, nclose, retval = 0;
279 short res = 0, old = 0;
280 struct event *old_ev;
281
282 EVUTIL_ASSERT(fd == ev->ev_fd);
283
284 if (fd < 0)
285 return 0;
286
287 #ifndef EVMAP_USE_HT
288 if (fd >= io->nentries) {
289 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
290 return (-1);
291 }
292 #endif
293 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
294 evsel->fdinfo_len);
295
296 nread = ctx->nread;
297 nwrite = ctx->nwrite;
298 nclose = ctx->nclose;
299
300 if (nread)
301 old |= EV_READ;
302 if (nwrite)
303 old |= EV_WRITE;
304 if (nclose)
305 old |= EV_CLOSED;
306
307 if (ev->ev_events & EV_READ) {
308 if (++nread == 1)
309 res |= EV_READ;
310 }
311 if (ev->ev_events & EV_WRITE) {
312 if (++nwrite == 1)
313 res |= EV_WRITE;
314 }
315 if (ev->ev_events & EV_CLOSED) {
316 if (++nclose == 1)
317 res |= EV_CLOSED;
318 }
319 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
320 event_warnx("Too many events reading or writing on fd %d",
321 (int)fd);
322 return -1;
323 }
324 if (EVENT_DEBUG_MODE_IS_ON() &&
325 (old_ev = LIST_FIRST(&ctx->events)) &&
326 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
327 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
328 " events on fd %d", (int)fd);
329 return -1;
330 }
331
332 if (res) {
333 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
334 /* XXX(niels): we cannot mix edge-triggered and
335 * level-triggered, we should probably assert on
336 * this. */
337 if (evsel->add(base, ev->ev_fd,
338 old, (ev->ev_events & EV_ET) | res, extra) == -1)
339 return (-1);
340 retval = 1;
341 }
342
343 ctx->nread = (ev_uint16_t) nread;
344 ctx->nwrite = (ev_uint16_t) nwrite;
345 ctx->nclose = (ev_uint16_t) nclose;
346 LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
347
348 return (retval);
349 }
350
351 /* return -1 on error, 0 on success if nothing changed in the event backend,
352 * and 1 on success if something did. */
353 int
evmap_io_del_(struct event_base * base,evutil_socket_t fd,struct event * ev)354 evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
355 {
356 const struct eventop *evsel = base->evsel;
357 struct event_io_map *io = &base->io;
358 struct evmap_io *ctx;
359 int nread, nwrite, nclose, retval = 0;
360 short res = 0, old = 0;
361
362 if (fd < 0)
363 return 0;
364
365 EVUTIL_ASSERT(fd == ev->ev_fd);
366
367 #ifndef EVMAP_USE_HT
368 if (fd >= io->nentries)
369 return (-1);
370 #endif
371
372 GET_IO_SLOT(ctx, io, fd, evmap_io);
373
374 nread = ctx->nread;
375 nwrite = ctx->nwrite;
376 nclose = ctx->nclose;
377
378 if (nread)
379 old |= EV_READ;
380 if (nwrite)
381 old |= EV_WRITE;
382 if (nclose)
383 old |= EV_CLOSED;
384
385 if (ev->ev_events & EV_READ) {
386 if (--nread == 0)
387 res |= EV_READ;
388 EVUTIL_ASSERT(nread >= 0);
389 }
390 if (ev->ev_events & EV_WRITE) {
391 if (--nwrite == 0)
392 res |= EV_WRITE;
393 EVUTIL_ASSERT(nwrite >= 0);
394 }
395 if (ev->ev_events & EV_CLOSED) {
396 if (--nclose == 0)
397 res |= EV_CLOSED;
398 EVUTIL_ASSERT(nclose >= 0);
399 }
400
401 if (res) {
402 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
403 if (evsel->del(base, ev->ev_fd,
404 old, (ev->ev_events & EV_ET) | res, extra) == -1) {
405 retval = -1;
406 } else {
407 retval = 1;
408 }
409 }
410
411 ctx->nread = nread;
412 ctx->nwrite = nwrite;
413 ctx->nclose = nclose;
414 LIST_REMOVE(ev, ev_io_next);
415
416 return (retval);
417 }
418
419 void
evmap_io_active_(struct event_base * base,evutil_socket_t fd,short events)420 evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
421 {
422 struct event_io_map *io = &base->io;
423 struct evmap_io *ctx;
424 struct event *ev;
425
426 #ifndef EVMAP_USE_HT
427 if (fd < 0 || fd >= io->nentries)
428 return;
429 #endif
430 GET_IO_SLOT(ctx, io, fd, evmap_io);
431
432 if (NULL == ctx)
433 return;
434 LIST_FOREACH(ev, &ctx->events, ev_io_next) {
435 if (ev->ev_events & (events & ~EV_ET))
436 event_active_nolock_(ev, ev->ev_events & events, 1);
437 }
438 }
439
440 /* code specific to signals */
441
442 static void
evmap_signal_init(struct evmap_signal * entry)443 evmap_signal_init(struct evmap_signal *entry)
444 {
445 LIST_INIT(&entry->events);
446 }
447
448
449 int
evmap_signal_add_(struct event_base * base,int sig,struct event * ev)450 evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
451 {
452 const struct eventop *evsel = base->evsigsel;
453 struct event_signal_map *map = &base->sigmap;
454 struct evmap_signal *ctx = NULL;
455
456 if (sig < 0 || sig >= NSIG)
457 return (-1);
458
459 if (sig >= map->nentries) {
460 if (evmap_make_space(
461 map, sig, sizeof(struct evmap_signal *)) == -1)
462 return (-1);
463 }
464 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
465 base->evsigsel->fdinfo_len);
466
467 if (LIST_EMPTY(&ctx->events)) {
468 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
469 == -1)
470 return (-1);
471 }
472
473 LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
474
475 return (1);
476 }
477
478 int
evmap_signal_del_(struct event_base * base,int sig,struct event * ev)479 evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
480 {
481 const struct eventop *evsel = base->evsigsel;
482 struct event_signal_map *map = &base->sigmap;
483 struct evmap_signal *ctx;
484
485 if (sig < 0 || sig >= map->nentries)
486 return (-1);
487
488 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
489
490 LIST_REMOVE(ev, ev_signal_next);
491
492 if (LIST_FIRST(&ctx->events) == NULL) {
493 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
494 return (-1);
495 }
496
497 return (1);
498 }
499
500 void
evmap_signal_active_(struct event_base * base,evutil_socket_t sig,int ncalls)501 evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
502 {
503 struct event_signal_map *map = &base->sigmap;
504 struct evmap_signal *ctx;
505 struct event *ev;
506
507 if (sig < 0 || sig >= map->nentries)
508 return;
509 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
510
511 if (!ctx)
512 return;
513 LIST_FOREACH(ev, &ctx->events, ev_signal_next)
514 event_active_nolock_(ev, EV_SIGNAL, ncalls);
515 }
516
517 void *
evmap_io_get_fdinfo_(struct event_io_map * map,evutil_socket_t fd)518 evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
519 {
520 struct evmap_io *ctx;
521 GET_IO_SLOT(ctx, map, fd, evmap_io);
522 if (ctx)
523 return ((char*)ctx) + sizeof(struct evmap_io);
524 else
525 return NULL;
526 }
527
528 /* Callback type for evmap_io_foreach_fd */
529 typedef int (*evmap_io_foreach_fd_cb)(
530 struct event_base *, evutil_socket_t, struct evmap_io *, void *);
531
532 /* Multipurpose helper function: Iterate over every file descriptor event_base
533 * for which we could have EV_READ or EV_WRITE events. For each such fd, call
534 * fn(base, signum, evmap_io, arg), where fn is the user-provided
535 * function, base is the event_base, signum is the signal number, evmap_io
536 * is an evmap_io structure containing a list of events pending on the
537 * file descriptor, and arg is the user-supplied argument.
538 *
539 * If fn returns 0, continue on to the next signal. Otherwise, return the same
540 * value that fn returned.
541 *
542 * Note that there is no guarantee that the file descriptors will be processed
543 * in any particular order.
544 */
545 static int
evmap_io_foreach_fd(struct event_base * base,evmap_io_foreach_fd_cb fn,void * arg)546 evmap_io_foreach_fd(struct event_base *base,
547 evmap_io_foreach_fd_cb fn,
548 void *arg)
549 {
550 evutil_socket_t fd;
551 struct event_io_map *iomap = &base->io;
552 int r = 0;
553 #ifdef EVMAP_USE_HT
554 struct event_map_entry **mapent;
555 HT_FOREACH(mapent, event_io_map, iomap) {
556 struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
557 fd = (*mapent)->fd;
558 #else
559 for (fd = 0; fd < iomap->nentries; ++fd) {
560 struct evmap_io *ctx = iomap->entries[fd];
561 if (!ctx)
562 continue;
563 #endif
564 if ((r = fn(base, fd, ctx, arg)))
565 break;
566 }
567 return r;
568 }
569
570 /* Callback type for evmap_signal_foreach_signal */
571 typedef int (*evmap_signal_foreach_signal_cb)(
572 struct event_base *, int, struct evmap_signal *, void *);
573
574 /* Multipurpose helper function: Iterate over every signal number in the
575 * event_base for which we could have signal events. For each such signal,
576 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
577 * function, base is the event_base, signum is the signal number, evmap_signal
578 * is an evmap_signal structure containing a list of events pending on the
579 * signal, and arg is the user-supplied argument.
580 *
581 * If fn returns 0, continue on to the next signal. Otherwise, return the same
582 * value that fn returned.
583 */
584 static int
585 evmap_signal_foreach_signal(struct event_base *base,
586 evmap_signal_foreach_signal_cb fn,
587 void *arg)
588 {
589 struct event_signal_map *sigmap = &base->sigmap;
590 int r = 0;
591 int signum;
592
593 for (signum = 0; signum < sigmap->nentries; ++signum) {
594 struct evmap_signal *ctx = sigmap->entries[signum];
595 if (!ctx)
596 continue;
597 if ((r = fn(base, signum, ctx, arg)))
598 break;
599 }
600 return r;
601 }
602
603 /* Helper for evmap_reinit_: tell the backend to add every fd for which we have
604 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
605 * EV_ET. */
606 static int
607 evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
608 struct evmap_io *ctx, void *arg)
609 {
610 const struct eventop *evsel = base->evsel;
611 void *extra;
612 int *result = arg;
613 short events = 0;
614 struct event *ev;
615 EVUTIL_ASSERT(ctx);
616
617 extra = ((char*)ctx) + sizeof(struct evmap_io);
618 if (ctx->nread)
619 events |= EV_READ;
620 if (ctx->nwrite)
621 events |= EV_WRITE;
622 if (ctx->nclose)
623 events |= EV_CLOSED;
624 if (evsel->fdinfo_len)
625 memset(extra, 0, evsel->fdinfo_len);
626 if (events &&
627 (ev = LIST_FIRST(&ctx->events)) &&
628 (ev->ev_events & EV_ET))
629 events |= EV_ET;
630 if (evsel->add(base, fd, 0, events, extra) == -1)
631 *result = -1;
632
633 return 0;
634 }
635
636 /* Helper for evmap_reinit_: tell the backend to add every signal for which we
637 * have pending events. */
638 static int
639 evmap_signal_reinit_iter_fn(struct event_base *base,
640 int signum, struct evmap_signal *ctx, void *arg)
641 {
642 const struct eventop *evsel = base->evsigsel;
643 int *result = arg;
644
645 if (!LIST_EMPTY(&ctx->events)) {
646 if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
647 *result = -1;
648 }
649 return 0;
650 }
651
652 int
653 evmap_reinit_(struct event_base *base)
654 {
655 int result = 0;
656
657 evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
658 if (result < 0)
659 return -1;
660 evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
661 if (result < 0)
662 return -1;
663 return 0;
664 }
665
666 /* Helper for evmap_delete_all_: delete every event in an event_dlist. */
667 static int
668 delete_all_in_dlist(struct event_dlist *dlist)
669 {
670 struct event *ev;
671 while ((ev = LIST_FIRST(dlist)))
672 event_del(ev);
673 return 0;
674 }
675
676 /* Helper for evmap_delete_all_: delete every event pending on an fd. */
677 static int
678 evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
679 struct evmap_io *io_info, void *arg)
680 {
681 return delete_all_in_dlist(&io_info->events);
682 }
683
684 /* Helper for evmap_delete_all_: delete every event pending on a signal. */
685 static int
686 evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
687 struct evmap_signal *sig_info, void *arg)
688 {
689 return delete_all_in_dlist(&sig_info->events);
690 }
691
692 void
693 evmap_delete_all_(struct event_base *base)
694 {
695 evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
696 evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
697 }
698
699 /** Per-fd structure for use with changelists. It keeps track, for each fd or
700 * signal using the changelist, of where its entry in the changelist is.
701 */
702 struct event_changelist_fdinfo {
703 int idxplus1; /* this is the index +1, so that memset(0) will make it
704 * a no-such-element */
705 };
706
707 void
708 event_changelist_init_(struct event_changelist *changelist)
709 {
710 changelist->changes = NULL;
711 changelist->changes_size = 0;
712 changelist->n_changes = 0;
713 }
714
715 /** Helper: return the changelist_fdinfo corresponding to a given change. */
716 static inline struct event_changelist_fdinfo *
717 event_change_get_fdinfo(struct event_base *base,
718 const struct event_change *change)
719 {
720 char *ptr;
721 if (change->read_change & EV_CHANGE_SIGNAL) {
722 struct evmap_signal *ctx;
723 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
724 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
725 } else {
726 struct evmap_io *ctx;
727 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
728 ptr = ((char*)ctx) + sizeof(struct evmap_io);
729 }
730 return (void*)ptr;
731 }
732
733 /** Callback helper for event_changelist_assert_ok */
734 static int
735 event_changelist_assert_ok_foreach_iter_fn(
736 struct event_base *base,
737 evutil_socket_t fd, struct evmap_io *io, void *arg)
738 {
739 struct event_changelist *changelist = &base->changelist;
740 struct event_changelist_fdinfo *f;
741 f = (void*)
742 ( ((char*)io) + sizeof(struct evmap_io) );
743 if (f->idxplus1) {
744 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
745 EVUTIL_ASSERT(c->fd == fd);
746 }
747 return 0;
748 }
749
750 /** Make sure that the changelist is consistent with the evmap structures. */
751 static void
752 event_changelist_assert_ok(struct event_base *base)
753 {
754 int i;
755 struct event_changelist *changelist = &base->changelist;
756
757 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
758 for (i = 0; i < changelist->n_changes; ++i) {
759 struct event_change *c = &changelist->changes[i];
760 struct event_changelist_fdinfo *f;
761 EVUTIL_ASSERT(c->fd >= 0);
762 f = event_change_get_fdinfo(base, c);
763 EVUTIL_ASSERT(f);
764 EVUTIL_ASSERT(f->idxplus1 == i + 1);
765 }
766
767 evmap_io_foreach_fd(base,
768 event_changelist_assert_ok_foreach_iter_fn,
769 NULL);
770 }
771
772 #ifdef DEBUG_CHANGELIST
773 #define event_changelist_check(base) event_changelist_assert_ok((base))
774 #else
775 #define event_changelist_check(base) ((void)0)
776 #endif
777
778 void
779 event_changelist_remove_all_(struct event_changelist *changelist,
780 struct event_base *base)
781 {
782 int i;
783
784 event_changelist_check(base);
785
786 for (i = 0; i < changelist->n_changes; ++i) {
787 struct event_change *ch = &changelist->changes[i];
788 struct event_changelist_fdinfo *fdinfo =
789 event_change_get_fdinfo(base, ch);
790 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
791 fdinfo->idxplus1 = 0;
792 }
793
794 changelist->n_changes = 0;
795
796 event_changelist_check(base);
797 }
798
799 void
800 event_changelist_freemem_(struct event_changelist *changelist)
801 {
802 if (changelist->changes)
803 mm_free(changelist->changes);
804 event_changelist_init_(changelist); /* zero it all out. */
805 }
806
807 /** Increase the size of 'changelist' to hold more changes. */
808 static int
809 event_changelist_grow(struct event_changelist *changelist)
810 {
811 int new_size;
812 struct event_change *new_changes;
813 if (changelist->changes_size < 64)
814 new_size = 64;
815 else
816 new_size = changelist->changes_size * 2;
817
818 new_changes = mm_realloc(changelist->changes,
819 new_size * sizeof(struct event_change));
820
821 if (EVUTIL_UNLIKELY(new_changes == NULL))
822 return (-1);
823
824 changelist->changes = new_changes;
825 changelist->changes_size = new_size;
826
827 return (0);
828 }
829
830 /** Return a pointer to the changelist entry for the file descriptor or signal
831 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
832 * old_events field to old_events.
833 */
834 static struct event_change *
835 event_changelist_get_or_construct(struct event_changelist *changelist,
836 evutil_socket_t fd,
837 short old_events,
838 struct event_changelist_fdinfo *fdinfo)
839 {
840 struct event_change *change;
841
842 if (fdinfo->idxplus1 == 0) {
843 int idx;
844 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
845
846 if (changelist->n_changes == changelist->changes_size) {
847 if (event_changelist_grow(changelist) < 0)
848 return NULL;
849 }
850
851 idx = changelist->n_changes++;
852 change = &changelist->changes[idx];
853 fdinfo->idxplus1 = idx + 1;
854
855 memset(change, 0, sizeof(struct event_change));
856 change->fd = fd;
857 change->old_events = old_events;
858 } else {
859 change = &changelist->changes[fdinfo->idxplus1 - 1];
860 EVUTIL_ASSERT(change->fd == fd);
861 }
862 return change;
863 }
864
865 int
866 event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
867 void *p)
868 {
869 struct event_changelist *changelist = &base->changelist;
870 struct event_changelist_fdinfo *fdinfo = p;
871 struct event_change *change;
872 ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
873
874 event_changelist_check(base);
875
876 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
877 if (!change)
878 return -1;
879
880 /* An add replaces any previous delete, but doesn't result in a no-op,
881 * since the delete might fail (because the fd had been closed since
882 * the last add, for instance. */
883
884 if (events & (EV_READ|EV_SIGNAL))
885 change->read_change = evchange;
886 if (events & EV_WRITE)
887 change->write_change = evchange;
888 if (events & EV_CLOSED)
889 change->close_change = evchange;
890
891 event_changelist_check(base);
892 return (0);
893 }
894
895 int
896 event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
897 void *p)
898 {
899 struct event_changelist *changelist = &base->changelist;
900 struct event_changelist_fdinfo *fdinfo = p;
901 struct event_change *change;
902 ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);
903
904 event_changelist_check(base);
905 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
906 event_changelist_check(base);
907 if (!change)
908 return -1;
909
910 /* A delete on an event set that doesn't contain the event to be
911 deleted produces a no-op. This effectively emoves any previous
912 uncommitted add, rather than replacing it: on those platforms where
913 "add, delete, dispatch" is not the same as "no-op, dispatch", we
914 want the no-op behavior.
915
916 If we have a no-op item, we could remove it it from the list
917 entirely, but really there's not much point: skipping the no-op
918 change when we do the dispatch later is far cheaper than rejuggling
919 the array now.
920
921 As this stands, it also lets through deletions of events that are
922 not currently set.
923 */
924
925 if (events & (EV_READ|EV_SIGNAL)) {
926 if (!(change->old_events & (EV_READ | EV_SIGNAL)))
927 change->read_change = 0;
928 else
929 change->read_change = del;
930 }
931 if (events & EV_WRITE) {
932 if (!(change->old_events & EV_WRITE))
933 change->write_change = 0;
934 else
935 change->write_change = del;
936 }
937 if (events & EV_CLOSED) {
938 if (!(change->old_events & EV_CLOSED))
939 change->close_change = 0;
940 else
941 change->close_change = del;
942 }
943
944 event_changelist_check(base);
945 return (0);
946 }
947
948 /* Helper for evmap_check_integrity_: verify that all of the events pending on
949 * given fd are set up correctly, and that the nread and nwrite counts on that
950 * fd are correct. */
951 static int
952 evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
953 struct evmap_io *io_info, void *arg)
954 {
955 struct event *ev;
956 int n_read = 0, n_write = 0, n_close = 0;
957
958 /* First, make sure the list itself isn't corrupt. Otherwise,
959 * running LIST_FOREACH could be an exciting adventure. */
960 EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
961
962 LIST_FOREACH(ev, &io_info->events, ev_io_next) {
963 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
964 EVUTIL_ASSERT(ev->ev_fd == fd);
965 EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
966 EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
967 if (ev->ev_events & EV_READ)
968 ++n_read;
969 if (ev->ev_events & EV_WRITE)
970 ++n_write;
971 if (ev->ev_events & EV_CLOSED)
972 ++n_close;
973 }
974
975 EVUTIL_ASSERT(n_read == io_info->nread);
976 EVUTIL_ASSERT(n_write == io_info->nwrite);
977 EVUTIL_ASSERT(n_close == io_info->nclose);
978
979 return 0;
980 }
981
982 /* Helper for evmap_check_integrity_: verify that all of the events pending
983 * on given signal are set up correctly. */
984 static int
985 evmap_signal_check_integrity_fn(struct event_base *base,
986 int signum, struct evmap_signal *sig_info, void *arg)
987 {
988 struct event *ev;
989 /* First, make sure the list itself isn't corrupt. */
990 EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
991
992 LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
993 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
994 EVUTIL_ASSERT(ev->ev_fd == signum);
995 EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
996 EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
997 }
998 return 0;
999 }
1000
1001 void
1002 evmap_check_integrity_(struct event_base *base)
1003 {
1004 evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
1005 evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
1006
1007 if (base->evsel->add == event_changelist_add_)
1008 event_changelist_assert_ok(base);
1009 }
1010
1011 /* Helper type for evmap_foreach_event_: Bundles a function to call on every
1012 * event, and the user-provided void* to use as its third argument. */
1013 struct evmap_foreach_event_helper {
1014 event_base_foreach_event_cb fn;
1015 void *arg;
1016 };
1017
1018 /* Helper for evmap_foreach_event_: calls a provided function on every event
1019 * pending on a given fd. */
1020 static int
1021 evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
1022 struct evmap_io *io_info, void *arg)
1023 {
1024 struct evmap_foreach_event_helper *h = arg;
1025 struct event *ev;
1026 int r;
1027 LIST_FOREACH(ev, &io_info->events, ev_io_next) {
1028 if ((r = h->fn(base, ev, h->arg)))
1029 return r;
1030 }
1031 return 0;
1032 }
1033
1034 /* Helper for evmap_foreach_event_: calls a provided function on every event
1035 * pending on a given signal. */
1036 static int
1037 evmap_signal_foreach_event_fn(struct event_base *base, int signum,
1038 struct evmap_signal *sig_info, void *arg)
1039 {
1040 struct event *ev;
1041 struct evmap_foreach_event_helper *h = arg;
1042 int r;
1043 LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
1044 if ((r = h->fn(base, ev, h->arg)))
1045 return r;
1046 }
1047 return 0;
1048 }
1049
1050 int
1051 evmap_foreach_event_(struct event_base *base,
1052 event_base_foreach_event_cb fn, void *arg)
1053 {
1054 struct evmap_foreach_event_helper h;
1055 int r;
1056 h.fn = fn;
1057 h.arg = arg;
1058 if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1059 return r;
1060 return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1061 }
1062
1063