1 /*
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38 #include <sys/time.h>
39 #endif
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
43 #endif
44 #include <stdio.h>
45 #include <stdlib.h>
46 #ifdef EVENT__HAVE_UNISTD_H
47 #include <unistd.h>
48 #endif
49 #include <ctype.h>
50 #include <errno.h>
51 #include <signal.h>
52 #include <string.h>
53 #include <time.h>
54 #include <limits.h>
55 #ifdef EVENT__HAVE_FCNTL_H
56 #include <fcntl.h>
57 #endif
58
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event-internal.h"
63 #include "defer-internal.h"
64 #include "evthread-internal.h"
65 #include "event2/thread.h"
66 #include "event2/util.h"
67 #include "log-internal.h"
68 #include "evmap-internal.h"
69 #include "iocp-internal.h"
70 #include "changelist-internal.h"
71 #define HT_NO_CACHE_HASH_VALUES
72 #include "ht-internal.h"
73 #include "util-internal.h"
74
75
76 #ifdef EVENT__HAVE_WORKING_KQUEUE
77 #include "kqueue-internal.h"
78 #endif
79
80 #ifdef EVENT__HAVE_EVENT_PORTS
81 extern const struct eventop evportops;
82 #endif
83 #ifdef EVENT__HAVE_SELECT
84 extern const struct eventop selectops;
85 #endif
86 #ifdef EVENT__HAVE_POLL
87 extern const struct eventop pollops;
88 #endif
89 #ifdef EVENT__HAVE_EPOLL
90 extern const struct eventop epollops;
91 #endif
92 #ifdef EVENT__HAVE_WORKING_KQUEUE
93 extern const struct eventop kqops;
94 #endif
95 #ifdef EVENT__HAVE_DEVPOLL
96 extern const struct eventop devpollops;
97 #endif
98 #ifdef _WIN32
99 extern const struct eventop win32ops;
100 #endif
101
102 /* Array of backends in order of preference. */
103 static const struct eventop *eventops[] = {
104 #ifdef EVENT__HAVE_EVENT_PORTS
105 &evportops,
106 #endif
107 #ifdef EVENT__HAVE_WORKING_KQUEUE
108 &kqops,
109 #endif
110 #ifdef EVENT__HAVE_EPOLL
111 &epollops,
112 #endif
113 #ifdef EVENT__HAVE_DEVPOLL
114 &devpollops,
115 #endif
116 #ifdef EVENT__HAVE_POLL
117 &pollops,
118 #endif
119 #ifdef EVENT__HAVE_SELECT
120 &selectops,
121 #endif
122 #ifdef _WIN32
123 &win32ops,
124 #endif
125 NULL
126 };
127
128 /* Global state; deprecated */
129 EVENT2_EXPORT_SYMBOL
130 struct event_base *event_global_current_base_ = NULL;
131 #define current_base event_global_current_base_
132
133 /* Global state */
134
135 static void *event_self_cbarg_ptr_ = NULL;
136
137 /* Prototypes */
138 static void event_queue_insert_active(struct event_base *, struct event_callback *);
139 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
140 static void event_queue_insert_timeout(struct event_base *, struct event *);
141 static void event_queue_insert_inserted(struct event_base *, struct event *);
142 static void event_queue_remove_active(struct event_base *, struct event_callback *);
143 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
144 static void event_queue_remove_timeout(struct event_base *, struct event *);
145 static void event_queue_remove_inserted(struct event_base *, struct event *);
146 static void event_queue_make_later_events_active(struct event_base *base);
147
148 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
149 static int event_del_(struct event *ev, int blocking);
150
151 #ifdef USE_REINSERT_TIMEOUT
152 /* This code seems buggy; only turn it on if we find out what the trouble is. */
153 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
154 #endif
155
156 static int event_haveevents(struct event_base *);
157
158 static int event_process_active(struct event_base *);
159
160 static int timeout_next(struct event_base *, struct timeval **);
161 static void timeout_process(struct event_base *);
162
163 static inline void event_signal_closure(struct event_base *, struct event *ev);
164 static inline void event_persist_closure(struct event_base *, struct event *ev);
165
166 static int evthread_notify_base(struct event_base *base);
167
168 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
169 struct event *ev);
170
171 #ifndef EVENT__DISABLE_DEBUG_MODE
172 /* These functions implement a hashtable of which 'struct event *' structures
173 * have been setup or added. We don't want to trust the content of the struct
174 * event itself, since we're trying to work through cases where an event gets
175 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
176 */
177
178 struct event_debug_entry {
179 HT_ENTRY(event_debug_entry) node;
180 const struct event *ptr;
181 unsigned added : 1;
182 };
183
184 static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)185 hash_debug_entry(const struct event_debug_entry *e)
186 {
187 /* We need to do this silliness to convince compilers that we
188 * honestly mean to cast e->ptr to an integer, and discard any
189 * part of it that doesn't fit in an unsigned.
190 */
191 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
192 /* Our hashtable implementation is pretty sensitive to low bits,
193 * and every struct event is over 64 bytes in size, so we can
194 * just say >>6. */
195 return (u >> 6);
196 }
197
198 static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)199 eq_debug_entry(const struct event_debug_entry *a,
200 const struct event_debug_entry *b)
201 {
202 return a->ptr == b->ptr;
203 }
204
205 int event_debug_mode_on_ = 0;
206
207
208 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
209 /**
210 * @brief debug mode variable which is set for any function/structure that needs
211 * to be shared across threads (if thread support is enabled).
212 *
213 * When and if evthreads are initialized, this variable will be evaluated,
214 * and if set to something other than zero, this means the evthread setup
215 * functions were called out of order.
216 *
217 * See: "Locks and threading" in the documentation.
218 */
219 int event_debug_created_threadable_ctx_ = 0;
220 #endif
221
222 /* Set if it's too late to enable event_debug_mode. */
223 static int event_debug_mode_too_late = 0;
224 #ifndef EVENT__DISABLE_THREAD_SUPPORT
225 static void *event_debug_map_lock_ = NULL;
226 #endif
227 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
228 HT_INITIALIZER();
229
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)230 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
231 eq_debug_entry)
232 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
233 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
234
235 /* record that ev is now setup (that is, ready for an add) */
236 static void event_debug_note_setup_(const struct event *ev)
237 {
238 struct event_debug_entry *dent, find;
239
240 if (!event_debug_mode_on_)
241 goto out;
242
243 find.ptr = ev;
244 EVLOCK_LOCK(event_debug_map_lock_, 0);
245 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
246 if (dent) {
247 dent->added = 0;
248 } else {
249 dent = mm_malloc(sizeof(*dent));
250 if (!dent)
251 event_err(1,
252 "Out of memory in debugging code");
253 dent->ptr = ev;
254 dent->added = 0;
255 HT_INSERT(event_debug_map, &global_debug_map, dent);
256 }
257 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
258
259 out:
260 event_debug_mode_too_late = 1;
261 }
262 /* record that ev is no longer setup */
event_debug_note_teardown_(const struct event * ev)263 static void event_debug_note_teardown_(const struct event *ev)
264 {
265 struct event_debug_entry *dent, find;
266
267 if (!event_debug_mode_on_)
268 goto out;
269
270 find.ptr = ev;
271 EVLOCK_LOCK(event_debug_map_lock_, 0);
272 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
273 if (dent)
274 mm_free(dent);
275 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
276
277 out:
278 event_debug_mode_too_late = 1;
279 }
280 /* Macro: record that ev is now added */
event_debug_note_add_(const struct event * ev)281 static void event_debug_note_add_(const struct event *ev)
282 {
283 struct event_debug_entry *dent,find;
284
285 if (!event_debug_mode_on_)
286 goto out;
287
288 find.ptr = ev;
289 EVLOCK_LOCK(event_debug_map_lock_, 0);
290 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
291 if (dent) {
292 dent->added = 1;
293 } else {
294 event_errx(EVENT_ERR_ABORT_,
295 "%s: noting an add on a non-setup event %p"
296 " (events: 0x%x, fd: "EV_SOCK_FMT
297 ", flags: 0x%x)",
298 __func__, ev, ev->ev_events,
299 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
300 }
301 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
302
303 out:
304 event_debug_mode_too_late = 1;
305 }
306 /* record that ev is no longer added */
event_debug_note_del_(const struct event * ev)307 static void event_debug_note_del_(const struct event *ev)
308 {
309 struct event_debug_entry *dent, find;
310
311 if (!event_debug_mode_on_)
312 goto out;
313
314 find.ptr = ev;
315 EVLOCK_LOCK(event_debug_map_lock_, 0);
316 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
317 if (dent) {
318 dent->added = 0;
319 } else {
320 event_errx(EVENT_ERR_ABORT_,
321 "%s: noting a del on a non-setup event %p"
322 " (events: 0x%x, fd: "EV_SOCK_FMT
323 ", flags: 0x%x)",
324 __func__, ev, ev->ev_events,
325 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
326 }
327 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
328
329 out:
330 event_debug_mode_too_late = 1;
331 }
332 /* assert that ev is setup (i.e., okay to add or inspect) */
event_debug_assert_is_setup_(const struct event * ev)333 static void event_debug_assert_is_setup_(const struct event *ev)
334 {
335 struct event_debug_entry *dent, find;
336
337 if (!event_debug_mode_on_)
338 return;
339
340 find.ptr = ev;
341 EVLOCK_LOCK(event_debug_map_lock_, 0);
342 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
343 if (!dent) {
344 event_errx(EVENT_ERR_ABORT_,
345 "%s called on a non-initialized event %p"
346 " (events: 0x%x, fd: "EV_SOCK_FMT
347 ", flags: 0x%x)",
348 __func__, ev, ev->ev_events,
349 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
350 }
351 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
352 }
353 /* assert that ev is not added (i.e., okay to tear down or set up again) */
event_debug_assert_not_added_(const struct event * ev)354 static void event_debug_assert_not_added_(const struct event *ev)
355 {
356 struct event_debug_entry *dent, find;
357
358 if (!event_debug_mode_on_)
359 return;
360
361 find.ptr = ev;
362 EVLOCK_LOCK(event_debug_map_lock_, 0);
363 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
364 if (dent && dent->added) {
365 event_errx(EVENT_ERR_ABORT_,
366 "%s called on an already added event %p"
367 " (events: 0x%x, fd: "EV_SOCK_FMT", "
368 "flags: 0x%x)",
369 __func__, ev, ev->ev_events,
370 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
371 }
372 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
373 }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)374 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
375 {
376 if (!event_debug_mode_on_)
377 return;
378 if (fd < 0)
379 return;
380
381 #ifndef _WIN32
382 {
383 int flags;
384 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
385 EVUTIL_ASSERT(flags & O_NONBLOCK);
386 }
387 }
388 #endif
389 }
390 #else
event_debug_note_setup_(const struct event * ev)391 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
event_debug_note_teardown_(const struct event * ev)392 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
event_debug_note_add_(const struct event * ev)393 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
event_debug_note_del_(const struct event * ev)394 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
event_debug_assert_is_setup_(const struct event * ev)395 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
event_debug_assert_not_added_(const struct event * ev)396 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)397 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
398 #endif
399
400 #define EVENT_BASE_ASSERT_LOCKED(base) \
401 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
402
403 /* How often (in seconds) do we check for changes in wall clock time relative
404 * to monotonic time? Set this to -1 for 'never.' */
405 #define CLOCK_SYNC_INTERVAL 5
406
407 /** Set 'tp' to the current time according to 'base'. We must hold the lock
408 * on 'base'. If there is a cached time, return it. Otherwise, use
409 * clock_gettime or gettimeofday as appropriate to find out the right time.
410 * Return 0 on success, -1 on failure.
411 */
412 static int
gettime(struct event_base * base,struct timeval * tp)413 gettime(struct event_base *base, struct timeval *tp)
414 {
415 EVENT_BASE_ASSERT_LOCKED(base);
416
417 if (base->tv_cache.tv_sec) {
418 *tp = base->tv_cache;
419 return (0);
420 }
421
422 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
423 return -1;
424 }
425
426 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
427 < tp->tv_sec) {
428 struct timeval tv;
429 evutil_gettimeofday(&tv,NULL);
430 evutil_timersub(&tv, tp, &base->tv_clock_diff);
431 base->last_updated_clock_diff = tp->tv_sec;
432 }
433
434 return 0;
435 }
436
437 int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)438 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
439 {
440 int r;
441 if (!base) {
442 base = current_base;
443 if (!current_base)
444 return evutil_gettimeofday(tv, NULL);
445 }
446
447 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
448 if (base->tv_cache.tv_sec == 0) {
449 r = evutil_gettimeofday(tv, NULL);
450 } else {
451 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
452 r = 0;
453 }
454 EVBASE_RELEASE_LOCK(base, th_base_lock);
455 return r;
456 }
457
458 /** Make 'base' have no current cached time. */
459 static inline void
clear_time_cache(struct event_base * base)460 clear_time_cache(struct event_base *base)
461 {
462 base->tv_cache.tv_sec = 0;
463 }
464
465 /** Replace the cached time in 'base' with the current time. */
466 static inline void
update_time_cache(struct event_base * base)467 update_time_cache(struct event_base *base)
468 {
469 base->tv_cache.tv_sec = 0;
470 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
471 gettime(base, &base->tv_cache);
472 }
473
474 int
event_base_update_cache_time(struct event_base * base)475 event_base_update_cache_time(struct event_base *base)
476 {
477
478 if (!base) {
479 base = current_base;
480 if (!current_base)
481 return -1;
482 }
483
484 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
485 if (base->running_loop)
486 update_time_cache(base);
487 EVBASE_RELEASE_LOCK(base, th_base_lock);
488 return 0;
489 }
490
491 static inline struct event *
event_callback_to_event(struct event_callback * evcb)492 event_callback_to_event(struct event_callback *evcb)
493 {
494 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
495 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
496 }
497
498 static inline struct event_callback *
event_to_event_callback(struct event * ev)499 event_to_event_callback(struct event *ev)
500 {
501 return &ev->ev_evcallback;
502 }
503
504 struct event_base *
event_init(void)505 event_init(void)
506 {
507 struct event_base *base = event_base_new_with_config(NULL);
508
509 if (base == NULL) {
510 event_errx(1, "%s: Unable to construct event_base", __func__);
511 return NULL;
512 }
513
514 current_base = base;
515
516 return (base);
517 }
518
519 struct event_base *
event_base_new(void)520 event_base_new(void)
521 {
522 struct event_base *base = NULL;
523 struct event_config *cfg = event_config_new();
524 if (cfg) {
525 base = event_base_new_with_config(cfg);
526 event_config_free(cfg);
527 }
528 return base;
529 }
530
531 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
532 * avoid. */
533 static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)534 event_config_is_avoided_method(const struct event_config *cfg,
535 const char *method)
536 {
537 struct event_config_entry *entry;
538
539 TAILQ_FOREACH(entry, &cfg->entries, next) {
540 if (entry->avoid_method != NULL &&
541 strcmp(entry->avoid_method, method) == 0)
542 return (1);
543 }
544
545 return (0);
546 }
547
548 /** Return true iff 'method' is disabled according to the environment. */
549 static int
event_is_method_disabled(const char * name)550 event_is_method_disabled(const char *name)
551 {
552 char environment[64];
553 int i;
554
555 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
556 for (i = 8; environment[i] != '\0'; ++i)
557 environment[i] = EVUTIL_TOUPPER_(environment[i]);
558 /* Note that evutil_getenv_() ignores the environment entirely if
559 * we're setuid */
560 return (evutil_getenv_(environment) != NULL);
561 }
562
563 int
event_base_get_features(const struct event_base * base)564 event_base_get_features(const struct event_base *base)
565 {
566 return base->evsel->features;
567 }
568
569 void
event_enable_debug_mode(void)570 event_enable_debug_mode(void)
571 {
572 #ifndef EVENT__DISABLE_DEBUG_MODE
573 if (event_debug_mode_on_)
574 event_errx(1, "%s was called twice!", __func__);
575 if (event_debug_mode_too_late)
576 event_errx(1, "%s must be called *before* creating any events "
577 "or event_bases",__func__);
578
579 event_debug_mode_on_ = 1;
580
581 HT_INIT(event_debug_map, &global_debug_map);
582 #endif
583 }
584
585 void
event_disable_debug_mode(void)586 event_disable_debug_mode(void)
587 {
588 #ifndef EVENT__DISABLE_DEBUG_MODE
589 struct event_debug_entry **ent, *victim;
590
591 EVLOCK_LOCK(event_debug_map_lock_, 0);
592 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
593 victim = *ent;
594 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
595 mm_free(victim);
596 }
597 HT_CLEAR(event_debug_map, &global_debug_map);
598 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
599
600 event_debug_mode_on_ = 0;
601 #endif
602 }
603
604 struct event_base *
event_base_new_with_config(const struct event_config * cfg)605 event_base_new_with_config(const struct event_config *cfg)
606 {
607 int i;
608 struct event_base *base;
609 int should_check_environment;
610
611 #ifndef EVENT__DISABLE_DEBUG_MODE
612 event_debug_mode_too_late = 1;
613 #endif
614
615 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
616 event_warn("%s: calloc", __func__);
617 return NULL;
618 }
619
620 if (cfg)
621 base->flags = cfg->flags;
622
623 should_check_environment =
624 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
625
626 {
627 struct timeval tmp;
628 int precise_time =
629 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
630 int flags;
631 if (should_check_environment && !precise_time) {
632 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
633 if (precise_time) {
634 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
635 }
636 }
637 flags = precise_time ? EV_MONOT_PRECISE : 0;
638 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
639
640 gettime(base, &tmp);
641 }
642
643 min_heap_ctor_(&base->timeheap);
644
645 base->sig.ev_signal_pair[0] = -1;
646 base->sig.ev_signal_pair[1] = -1;
647 base->th_notify_fd[0] = -1;
648 base->th_notify_fd[1] = -1;
649
650 TAILQ_INIT(&base->active_later_queue);
651
652 evmap_io_initmap_(&base->io);
653 evmap_signal_initmap_(&base->sigmap);
654 event_changelist_init_(&base->changelist);
655
656 base->evbase = NULL;
657
658 if (cfg) {
659 memcpy(&base->max_dispatch_time,
660 &cfg->max_dispatch_interval, sizeof(struct timeval));
661 base->limit_callbacks_after_prio =
662 cfg->limit_callbacks_after_prio;
663 } else {
664 base->max_dispatch_time.tv_sec = -1;
665 base->limit_callbacks_after_prio = 1;
666 }
667 if (cfg && cfg->max_dispatch_callbacks >= 0) {
668 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
669 } else {
670 base->max_dispatch_callbacks = INT_MAX;
671 }
672 if (base->max_dispatch_callbacks == INT_MAX &&
673 base->max_dispatch_time.tv_sec == -1)
674 base->limit_callbacks_after_prio = INT_MAX;
675
676 for (i = 0; eventops[i] && !base->evbase; i++) {
677 if (cfg != NULL) {
678 /* determine if this backend should be avoided */
679 if (event_config_is_avoided_method(cfg,
680 eventops[i]->name))
681 continue;
682 if ((eventops[i]->features & cfg->require_features)
683 != cfg->require_features)
684 continue;
685 }
686
687 /* also obey the environment variables */
688 if (should_check_environment &&
689 event_is_method_disabled(eventops[i]->name))
690 continue;
691
692 base->evsel = eventops[i];
693
694 base->evbase = base->evsel->init(base);
695 }
696
697 if (base->evbase == NULL) {
698 event_warnx("%s: no event mechanism available",
699 __func__);
700 base->evsel = NULL;
701 event_base_free(base);
702 return NULL;
703 }
704
705 if (evutil_getenv_("EVENT_SHOW_METHOD"))
706 event_msgx("libevent using: %s", base->evsel->name);
707
708 /* allocate a single active event queue */
709 if (event_base_priority_init(base, 1) < 0) {
710 event_base_free(base);
711 return NULL;
712 }
713
714 /* prepare for threading */
715
716 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
717 event_debug_created_threadable_ctx_ = 1;
718 #endif
719
720 #ifndef EVENT__DISABLE_THREAD_SUPPORT
721 if (EVTHREAD_LOCKING_ENABLED() &&
722 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
723 int r;
724 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
725 EVTHREAD_ALLOC_COND(base->current_event_cond);
726 r = evthread_make_base_notifiable(base);
727 if (r<0) {
728 event_warnx("%s: Unable to make base notifiable.", __func__);
729 event_base_free(base);
730 return NULL;
731 }
732 }
733 #endif
734
735 #ifdef _WIN32
736 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
737 event_base_start_iocp_(base, cfg->n_cpus_hint);
738 #endif
739
740 return (base);
741 }
742
743 int
event_base_start_iocp_(struct event_base * base,int n_cpus)744 event_base_start_iocp_(struct event_base *base, int n_cpus)
745 {
746 #ifdef _WIN32
747 if (base->iocp)
748 return 0;
749 base->iocp = event_iocp_port_launch_(n_cpus);
750 if (!base->iocp) {
751 event_warnx("%s: Couldn't launch IOCP", __func__);
752 return -1;
753 }
754 return 0;
755 #else
756 return -1;
757 #endif
758 }
759
760 void
event_base_stop_iocp_(struct event_base * base)761 event_base_stop_iocp_(struct event_base *base)
762 {
763 #ifdef _WIN32
764 int rv;
765
766 if (!base->iocp)
767 return;
768 rv = event_iocp_shutdown_(base->iocp, -1);
769 EVUTIL_ASSERT(rv >= 0);
770 base->iocp = NULL;
771 #endif
772 }
773
774 static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)775 event_base_cancel_single_callback_(struct event_base *base,
776 struct event_callback *evcb,
777 int run_finalizers)
778 {
779 int result = 0;
780
781 if (evcb->evcb_flags & EVLIST_INIT) {
782 struct event *ev = event_callback_to_event(evcb);
783 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
784 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
785 result = 1;
786 }
787 } else {
788 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
789 event_callback_cancel_nolock_(base, evcb, 1);
790 EVBASE_RELEASE_LOCK(base, th_base_lock);
791 result = 1;
792 }
793
794 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
795 switch (evcb->evcb_closure) {
796 case EV_CLOSURE_EVENT_FINALIZE:
797 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
798 struct event *ev = event_callback_to_event(evcb);
799 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
800 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
801 mm_free(ev);
802 break;
803 }
804 case EV_CLOSURE_CB_FINALIZE:
805 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
806 break;
807 default:
808 break;
809 }
810 }
811 return result;
812 }
813
event_base_free_queues_(struct event_base * base,int run_finalizers)814 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
815 {
816 int deleted = 0, i;
817
818 for (i = 0; i < base->nactivequeues; ++i) {
819 struct event_callback *evcb, *next;
820 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
821 next = TAILQ_NEXT(evcb, evcb_active_next);
822 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
823 evcb = next;
824 }
825 }
826
827 {
828 struct event_callback *evcb;
829 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
830 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
831 }
832 }
833
834 return deleted;
835 }
836
837 static void
event_base_free_(struct event_base * base,int run_finalizers)838 event_base_free_(struct event_base *base, int run_finalizers)
839 {
840 int i, n_deleted=0;
841 struct event *ev;
842 /* XXXX grab the lock? If there is contention when one thread frees
843 * the base, then the contending thread will be very sad soon. */
844
845 /* event_base_free(NULL) is how to free the current_base if we
846 * made it with event_init and forgot to hold a reference to it. */
847 if (base == NULL && current_base)
848 base = current_base;
849 /* Don't actually free NULL. */
850 if (base == NULL) {
851 event_warnx("%s: no base to free", __func__);
852 return;
853 }
854 /* XXX(niels) - check for internal events first */
855
856 #ifdef _WIN32
857 event_base_stop_iocp_(base);
858 #endif
859
860 /* threading fds if we have them */
861 if (base->th_notify_fd[0] != -1) {
862 event_del(&base->th_notify);
863 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
864 if (base->th_notify_fd[1] != -1)
865 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
866 base->th_notify_fd[0] = -1;
867 base->th_notify_fd[1] = -1;
868 event_debug_unassign(&base->th_notify);
869 }
870
871 /* Delete all non-internal events. */
872 evmap_delete_all_(base);
873
874 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
875 event_del(ev);
876 ++n_deleted;
877 }
878 for (i = 0; i < base->n_common_timeouts; ++i) {
879 struct common_timeout_list *ctl =
880 base->common_timeout_queues[i];
881 event_del(&ctl->timeout_event); /* Internal; doesn't count */
882 event_debug_unassign(&ctl->timeout_event);
883 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
884 struct event *next = TAILQ_NEXT(ev,
885 ev_timeout_pos.ev_next_with_common_timeout);
886 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
887 event_del(ev);
888 ++n_deleted;
889 }
890 ev = next;
891 }
892 mm_free(ctl);
893 }
894 if (base->common_timeout_queues)
895 mm_free(base->common_timeout_queues);
896
897 for (;;) {
898 /* For finalizers we can register yet another finalizer out from
899 * finalizer, and iff finalizer will be in active_later_queue we can
900 * add finalizer to activequeues, and we will have events in
901 * activequeues after this function returns, which is not what we want
902 * (we even have an assertion for this).
903 *
904 * A simple case is bufferevent with underlying (i.e. filters).
905 */
906 int i = event_base_free_queues_(base, run_finalizers);
907 event_debug(("%s: %d events freed", __func__, i));
908 if (!i) {
909 break;
910 }
911 n_deleted += i;
912 }
913
914 if (n_deleted)
915 event_debug(("%s: %d events were still set in base",
916 __func__, n_deleted));
917
918 while (LIST_FIRST(&base->once_events)) {
919 struct event_once *eonce = LIST_FIRST(&base->once_events);
920 LIST_REMOVE(eonce, next_once);
921 mm_free(eonce);
922 }
923
924 if (base->evsel != NULL && base->evsel->dealloc != NULL)
925 base->evsel->dealloc(base);
926
927 for (i = 0; i < base->nactivequeues; ++i)
928 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
929
930 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
931 min_heap_dtor_(&base->timeheap);
932
933 mm_free(base->activequeues);
934
935 evmap_io_clear_(&base->io);
936 evmap_signal_clear_(&base->sigmap);
937 event_changelist_freemem_(&base->changelist);
938
939 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
940 EVTHREAD_FREE_COND(base->current_event_cond);
941
942 /* If we're freeing current_base, there won't be a current_base. */
943 if (base == current_base)
944 current_base = NULL;
945 mm_free(base);
946 }
947
948 void
event_base_free_nofinalize(struct event_base * base)949 event_base_free_nofinalize(struct event_base *base)
950 {
951 event_base_free_(base, 0);
952 }
953
954 void
event_base_free(struct event_base * base)955 event_base_free(struct event_base *base)
956 {
957 event_base_free_(base, 1);
958 }
959
960 /* Fake eventop; used to disable the backend temporarily inside event_reinit
961 * so that we can call event_del() on an event without telling the backend.
962 */
963 static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)964 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
965 short events, void *fdinfo)
966 {
967 return 0;
968 }
969 const struct eventop nil_eventop = {
970 "nil",
971 NULL, /* init: unused. */
972 NULL, /* add: unused. */
973 nil_backend_del, /* del: used, so needs to be killed. */
974 NULL, /* dispatch: unused. */
975 NULL, /* dealloc: unused. */
976 0, 0, 0
977 };
978
979 /* reinitialize the event base after a fork */
980 int
event_reinit(struct event_base * base)981 event_reinit(struct event_base *base)
982 {
983 const struct eventop *evsel;
984 int res = 0;
985 int was_notifiable = 0;
986 int had_signal_added = 0;
987
988 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
989
990 evsel = base->evsel;
991
992 /* check if this event mechanism requires reinit on the backend */
993 if (evsel->need_reinit) {
994 /* We're going to call event_del() on our notify events (the
995 * ones that tell about signals and wakeup events). But we
996 * don't actually want to tell the backend to change its
997 * state, since it might still share some resource (a kqueue,
998 * an epoll fd) with the parent process, and we don't want to
999 * delete the fds from _that_ backend, we temporarily stub out
1000 * the evsel with a replacement.
1001 */
1002 base->evsel = &nil_eventop;
1003 }
1004
1005 /* We need to re-create a new signal-notification fd and a new
1006 * thread-notification fd. Otherwise, we'll still share those with
1007 * the parent process, which would make any notification sent to them
1008 * get received by one or both of the event loops, more or less at
1009 * random.
1010 */
1011 if (base->sig.ev_signal_added) {
1012 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1013 event_debug_unassign(&base->sig.ev_signal);
1014 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1015 had_signal_added = 1;
1016 base->sig.ev_signal_added = 0;
1017 }
1018 if (base->sig.ev_signal_pair[0] != -1)
1019 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1020 if (base->sig.ev_signal_pair[1] != -1)
1021 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1022 if (base->th_notify_fn != NULL) {
1023 was_notifiable = 1;
1024 base->th_notify_fn = NULL;
1025 }
1026 if (base->th_notify_fd[0] != -1) {
1027 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1028 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1029 if (base->th_notify_fd[1] != -1)
1030 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1031 base->th_notify_fd[0] = -1;
1032 base->th_notify_fd[1] = -1;
1033 event_debug_unassign(&base->th_notify);
1034 }
1035
1036 /* Replace the original evsel. */
1037 base->evsel = evsel;
1038
1039 if (evsel->need_reinit) {
1040 /* Reconstruct the backend through brute-force, so that we do
1041 * not share any structures with the parent process. For some
1042 * backends, this is necessary: epoll and kqueue, for
1043 * instance, have events associated with a kernel
1044 * structure. If didn't reinitialize, we'd share that
1045 * structure with the parent process, and any changes made by
1046 * the parent would affect our backend's behavior (and vice
1047 * versa).
1048 */
1049 if (base->evsel->dealloc != NULL)
1050 base->evsel->dealloc(base);
1051 base->evbase = evsel->init(base);
1052 if (base->evbase == NULL) {
1053 event_errx(1,
1054 "%s: could not reinitialize event mechanism",
1055 __func__);
1056 res = -1;
1057 goto done;
1058 }
1059
1060 /* Empty out the changelist (if any): we are starting from a
1061 * blank slate. */
1062 event_changelist_freemem_(&base->changelist);
1063
1064 /* Tell the event maps to re-inform the backend about all
1065 * pending events. This will make the signal notification
1066 * event get re-created if necessary. */
1067 if (evmap_reinit_(base) < 0)
1068 res = -1;
1069 } else {
1070 res = evsig_init_(base);
1071 if (res == 0 && had_signal_added) {
1072 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1073 if (res == 0)
1074 base->sig.ev_signal_added = 1;
1075 }
1076 }
1077
1078 /* If we were notifiable before, and nothing just exploded, become
1079 * notifiable again. */
1080 if (was_notifiable && res == 0)
1081 res = evthread_make_base_notifiable_nolock_(base);
1082
1083 done:
1084 EVBASE_RELEASE_LOCK(base, th_base_lock);
1085 return (res);
1086 }
1087
1088 /* Get the monotonic time for this event_base' timer */
1089 int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)1090 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1091 {
1092 int rv = -1;
1093
1094 if (base && tv) {
1095 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1096 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1097 EVBASE_RELEASE_LOCK(base, th_base_lock);
1098 }
1099
1100 return rv;
1101 }
1102
1103 const char **
event_get_supported_methods(void)1104 event_get_supported_methods(void)
1105 {
1106 static const char **methods = NULL;
1107 const struct eventop **method;
1108 const char **tmp;
1109 int i = 0, k;
1110
1111 /* count all methods */
1112 for (method = &eventops[0]; *method != NULL; ++method) {
1113 ++i;
1114 }
1115
1116 /* allocate one more than we need for the NULL pointer */
1117 tmp = mm_calloc((i + 1), sizeof(char *));
1118 if (tmp == NULL)
1119 return (NULL);
1120
1121 /* populate the array with the supported methods */
1122 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1123 tmp[i++] = eventops[k]->name;
1124 }
1125 tmp[i] = NULL;
1126
1127 if (methods != NULL)
1128 mm_free((char**)methods);
1129
1130 methods = tmp;
1131
1132 return (methods);
1133 }
1134
1135 struct event_config *
event_config_new(void)1136 event_config_new(void)
1137 {
1138 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1139
1140 if (cfg == NULL)
1141 return (NULL);
1142
1143 TAILQ_INIT(&cfg->entries);
1144 cfg->max_dispatch_interval.tv_sec = -1;
1145 cfg->max_dispatch_callbacks = INT_MAX;
1146 cfg->limit_callbacks_after_prio = 1;
1147
1148 return (cfg);
1149 }
1150
1151 static void
event_config_entry_free(struct event_config_entry * entry)1152 event_config_entry_free(struct event_config_entry *entry)
1153 {
1154 if (entry->avoid_method != NULL)
1155 mm_free((char *)entry->avoid_method);
1156 mm_free(entry);
1157 }
1158
1159 void
event_config_free(struct event_config * cfg)1160 event_config_free(struct event_config *cfg)
1161 {
1162 struct event_config_entry *entry;
1163
1164 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1165 TAILQ_REMOVE(&cfg->entries, entry, next);
1166 event_config_entry_free(entry);
1167 }
1168 mm_free(cfg);
1169 }
1170
1171 int
event_config_set_flag(struct event_config * cfg,int flag)1172 event_config_set_flag(struct event_config *cfg, int flag)
1173 {
1174 if (!cfg)
1175 return -1;
1176 cfg->flags |= flag;
1177 return 0;
1178 }
1179
1180 int
event_config_avoid_method(struct event_config * cfg,const char * method)1181 event_config_avoid_method(struct event_config *cfg, const char *method)
1182 {
1183 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1184 if (entry == NULL)
1185 return (-1);
1186
1187 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1188 mm_free(entry);
1189 return (-1);
1190 }
1191
1192 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1193
1194 return (0);
1195 }
1196
1197 int
event_config_require_features(struct event_config * cfg,int features)1198 event_config_require_features(struct event_config *cfg,
1199 int features)
1200 {
1201 if (!cfg)
1202 return (-1);
1203 cfg->require_features = features;
1204 return (0);
1205 }
1206
1207 int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)1208 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1209 {
1210 if (!cfg)
1211 return (-1);
1212 cfg->n_cpus_hint = cpus;
1213 return (0);
1214 }
1215
1216 int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)1217 event_config_set_max_dispatch_interval(struct event_config *cfg,
1218 const struct timeval *max_interval, int max_callbacks, int min_priority)
1219 {
1220 if (max_interval)
1221 memcpy(&cfg->max_dispatch_interval, max_interval,
1222 sizeof(struct timeval));
1223 else
1224 cfg->max_dispatch_interval.tv_sec = -1;
1225 cfg->max_dispatch_callbacks =
1226 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1227 if (min_priority < 0)
1228 min_priority = 0;
1229 cfg->limit_callbacks_after_prio = min_priority;
1230 return (0);
1231 }
1232
1233 int
event_priority_init(int npriorities)1234 event_priority_init(int npriorities)
1235 {
1236 return event_base_priority_init(current_base, npriorities);
1237 }
1238
1239 int
event_base_priority_init(struct event_base * base,int npriorities)1240 event_base_priority_init(struct event_base *base, int npriorities)
1241 {
1242 int i, r;
1243 r = -1;
1244
1245 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1246
1247 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1248 || npriorities >= EVENT_MAX_PRIORITIES)
1249 goto err;
1250
1251 if (npriorities == base->nactivequeues)
1252 goto ok;
1253
1254 if (base->nactivequeues) {
1255 mm_free(base->activequeues);
1256 base->nactivequeues = 0;
1257 }
1258
1259 /* Allocate our priority queues */
1260 base->activequeues = (struct evcallback_list *)
1261 mm_calloc(npriorities, sizeof(struct evcallback_list));
1262 if (base->activequeues == NULL) {
1263 event_warn("%s: calloc", __func__);
1264 goto err;
1265 }
1266 base->nactivequeues = npriorities;
1267
1268 for (i = 0; i < base->nactivequeues; ++i) {
1269 TAILQ_INIT(&base->activequeues[i]);
1270 }
1271
1272 ok:
1273 r = 0;
1274 err:
1275 EVBASE_RELEASE_LOCK(base, th_base_lock);
1276 return (r);
1277 }
1278
1279 int
event_base_get_npriorities(struct event_base * base)1280 event_base_get_npriorities(struct event_base *base)
1281 {
1282
1283 int n;
1284 if (base == NULL)
1285 base = current_base;
1286
1287 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1288 n = base->nactivequeues;
1289 EVBASE_RELEASE_LOCK(base, th_base_lock);
1290 return (n);
1291 }
1292
1293 int
event_base_get_num_events(struct event_base * base,unsigned int type)1294 event_base_get_num_events(struct event_base *base, unsigned int type)
1295 {
1296 int r = 0;
1297
1298 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1299
1300 if (type & EVENT_BASE_COUNT_ACTIVE)
1301 r += base->event_count_active;
1302
1303 if (type & EVENT_BASE_COUNT_VIRTUAL)
1304 r += base->virtual_event_count;
1305
1306 if (type & EVENT_BASE_COUNT_ADDED)
1307 r += base->event_count;
1308
1309 EVBASE_RELEASE_LOCK(base, th_base_lock);
1310
1311 return r;
1312 }
1313
1314 int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)1315 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1316 {
1317 int r = 0;
1318
1319 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1320
1321 if (type & EVENT_BASE_COUNT_ACTIVE) {
1322 r += base->event_count_active_max;
1323 if (clear)
1324 base->event_count_active_max = 0;
1325 }
1326
1327 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1328 r += base->virtual_event_count_max;
1329 if (clear)
1330 base->virtual_event_count_max = 0;
1331 }
1332
1333 if (type & EVENT_BASE_COUNT_ADDED) {
1334 r += base->event_count_max;
1335 if (clear)
1336 base->event_count_max = 0;
1337 }
1338
1339 EVBASE_RELEASE_LOCK(base, th_base_lock);
1340
1341 return r;
1342 }
1343
1344 /* Returns true iff we're currently watching any events. */
1345 static int
event_haveevents(struct event_base * base)1346 event_haveevents(struct event_base *base)
1347 {
1348 /* Caller must hold th_base_lock */
1349 return (base->virtual_event_count > 0 || base->event_count > 0);
1350 }
1351
1352 /* "closure" function called when processing active signal events */
1353 static inline void
event_signal_closure(struct event_base * base,struct event * ev)1354 event_signal_closure(struct event_base *base, struct event *ev)
1355 {
1356 short ncalls;
1357 int should_break;
1358
1359 /* Allows deletes to work */
1360 ncalls = ev->ev_ncalls;
1361 if (ncalls != 0)
1362 ev->ev_pncalls = &ncalls;
1363 EVBASE_RELEASE_LOCK(base, th_base_lock);
1364 while (ncalls) {
1365 ncalls--;
1366 ev->ev_ncalls = ncalls;
1367 if (ncalls == 0)
1368 ev->ev_pncalls = NULL;
1369 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1370
1371 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1372 should_break = base->event_break;
1373 EVBASE_RELEASE_LOCK(base, th_base_lock);
1374
1375 if (should_break) {
1376 if (ncalls != 0)
1377 ev->ev_pncalls = NULL;
1378 return;
1379 }
1380 }
1381 }
1382
1383 /* Common timeouts are special timeouts that are handled as queues rather than
1384 * in the minheap. This is more efficient than the minheap if we happen to
1385 * know that we're going to get several thousands of timeout events all with
1386 * the same timeout value.
1387 *
1388 * Since all our timeout handling code assumes timevals can be copied,
1389 * assigned, etc, we can't use "magic pointer" to encode these common
1390 * timeouts. Searching through a list to see if every timeout is common could
1391 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1392 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1393 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1394 * of index into the event_base's aray of common timeouts.
1395 */
1396
1397 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1398 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1399 #define COMMON_TIMEOUT_IDX_SHIFT 20
1400 #define COMMON_TIMEOUT_MASK 0xf0000000
1401 #define COMMON_TIMEOUT_MAGIC 0x50000000
1402
1403 #define COMMON_TIMEOUT_IDX(tv) \
1404 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1405
1406 /** Return true iff if 'tv' is a common timeout in 'base' */
1407 static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)1408 is_common_timeout(const struct timeval *tv,
1409 const struct event_base *base)
1410 {
1411 int idx;
1412 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1413 return 0;
1414 idx = COMMON_TIMEOUT_IDX(tv);
1415 return idx < base->n_common_timeouts;
1416 }
1417
1418 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1419 * one is a common timeout. */
1420 static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)1421 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1422 {
1423 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1424 (tv2->tv_usec & ~MICROSECONDS_MASK);
1425 }
1426
1427 /** Requires that 'tv' is a common timeout. Return the corresponding
1428 * common_timeout_list. */
1429 static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)1430 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1431 {
1432 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1433 }
1434
1435 #if 0
1436 static inline int
1437 common_timeout_ok(const struct timeval *tv,
1438 struct event_base *base)
1439 {
1440 const struct timeval *expect =
1441 &get_common_timeout_list(base, tv)->duration;
1442 return tv->tv_sec == expect->tv_sec &&
1443 tv->tv_usec == expect->tv_usec;
1444 }
1445 #endif
1446
1447 /* Add the timeout for the first event in given common timeout list to the
1448 * event_base's minheap. */
1449 static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)1450 common_timeout_schedule(struct common_timeout_list *ctl,
1451 const struct timeval *now, struct event *head)
1452 {
1453 struct timeval timeout = head->ev_timeout;
1454 timeout.tv_usec &= MICROSECONDS_MASK;
1455 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1456 }
1457
1458 /* Callback: invoked when the timeout for a common timeout queue triggers.
1459 * This means that (at least) the first event in that queue should be run,
1460 * and the timeout should be rescheduled if there are more events. */
1461 static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)1462 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1463 {
1464 struct timeval now;
1465 struct common_timeout_list *ctl = arg;
1466 struct event_base *base = ctl->base;
1467 struct event *ev = NULL;
1468 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1469 gettime(base, &now);
1470 while (1) {
1471 ev = TAILQ_FIRST(&ctl->events);
1472 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1473 (ev->ev_timeout.tv_sec == now.tv_sec &&
1474 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1475 break;
1476 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1477 event_active_nolock_(ev, EV_TIMEOUT, 1);
1478 }
1479 if (ev)
1480 common_timeout_schedule(ctl, &now, ev);
1481 EVBASE_RELEASE_LOCK(base, th_base_lock);
1482 }
1483
1484 #define MAX_COMMON_TIMEOUTS 256
1485
1486 const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)1487 event_base_init_common_timeout(struct event_base *base,
1488 const struct timeval *duration)
1489 {
1490 int i;
1491 struct timeval tv;
1492 const struct timeval *result=NULL;
1493 struct common_timeout_list *new_ctl;
1494
1495 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1496 if (duration->tv_usec > 1000000) {
1497 memcpy(&tv, duration, sizeof(struct timeval));
1498 if (is_common_timeout(duration, base))
1499 tv.tv_usec &= MICROSECONDS_MASK;
1500 tv.tv_sec += tv.tv_usec / 1000000;
1501 tv.tv_usec %= 1000000;
1502 duration = &tv;
1503 }
1504 for (i = 0; i < base->n_common_timeouts; ++i) {
1505 const struct common_timeout_list *ctl =
1506 base->common_timeout_queues[i];
1507 if (duration->tv_sec == ctl->duration.tv_sec &&
1508 duration->tv_usec ==
1509 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1510 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1511 result = &ctl->duration;
1512 goto done;
1513 }
1514 }
1515 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1516 event_warnx("%s: Too many common timeouts already in use; "
1517 "we only support %d per event_base", __func__,
1518 MAX_COMMON_TIMEOUTS);
1519 goto done;
1520 }
1521 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1522 int n = base->n_common_timeouts < 16 ? 16 :
1523 base->n_common_timeouts*2;
1524 struct common_timeout_list **newqueues =
1525 mm_realloc(base->common_timeout_queues,
1526 n*sizeof(struct common_timeout_queue *));
1527 if (!newqueues) {
1528 event_warn("%s: realloc",__func__);
1529 goto done;
1530 }
1531 base->n_common_timeouts_allocated = n;
1532 base->common_timeout_queues = newqueues;
1533 }
1534 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1535 if (!new_ctl) {
1536 event_warn("%s: calloc",__func__);
1537 goto done;
1538 }
1539 TAILQ_INIT(&new_ctl->events);
1540 new_ctl->duration.tv_sec = duration->tv_sec;
1541 new_ctl->duration.tv_usec =
1542 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1543 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1544 evtimer_assign(&new_ctl->timeout_event, base,
1545 common_timeout_callback, new_ctl);
1546 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1547 event_priority_set(&new_ctl->timeout_event, 0);
1548 new_ctl->base = base;
1549 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1550 result = &new_ctl->duration;
1551
1552 done:
1553 if (result)
1554 EVUTIL_ASSERT(is_common_timeout(result, base));
1555
1556 EVBASE_RELEASE_LOCK(base, th_base_lock);
1557 return result;
1558 }
1559
1560 /* Closure function invoked when we're activating a persistent event. */
1561 static inline void
event_persist_closure(struct event_base * base,struct event * ev)1562 event_persist_closure(struct event_base *base, struct event *ev)
1563 {
1564 void (*evcb_callback)(evutil_socket_t, short, void *);
1565
1566 // Other fields of *ev that must be stored before executing
1567 evutil_socket_t evcb_fd;
1568 short evcb_res;
1569 void *evcb_arg;
1570
1571 /* reschedule the persistent event if we have a timeout. */
1572 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1573 /* If there was a timeout, we want it to run at an interval of
1574 * ev_io_timeout after the last time it was _scheduled_ for,
1575 * not ev_io_timeout after _now_. If it fired for another
1576 * reason, though, the timeout ought to start ticking _now_. */
1577 struct timeval run_at, relative_to, delay, now;
1578 ev_uint32_t usec_mask = 0;
1579 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1580 &ev->ev_io_timeout));
1581 gettime(base, &now);
1582 if (is_common_timeout(&ev->ev_timeout, base)) {
1583 delay = ev->ev_io_timeout;
1584 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1585 delay.tv_usec &= MICROSECONDS_MASK;
1586 if (ev->ev_res & EV_TIMEOUT) {
1587 relative_to = ev->ev_timeout;
1588 relative_to.tv_usec &= MICROSECONDS_MASK;
1589 } else {
1590 relative_to = now;
1591 }
1592 } else {
1593 delay = ev->ev_io_timeout;
1594 if (ev->ev_res & EV_TIMEOUT) {
1595 relative_to = ev->ev_timeout;
1596 } else {
1597 relative_to = now;
1598 }
1599 }
1600 evutil_timeradd(&relative_to, &delay, &run_at);
1601 if (evutil_timercmp(&run_at, &now, <)) {
1602 /* Looks like we missed at least one invocation due to
1603 * a clock jump, not running the event loop for a
1604 * while, really slow callbacks, or
1605 * something. Reschedule relative to now.
1606 */
1607 evutil_timeradd(&now, &delay, &run_at);
1608 }
1609 run_at.tv_usec |= usec_mask;
1610 event_add_nolock_(ev, &run_at, 1);
1611 }
1612
1613 // Save our callback before we release the lock
1614 evcb_callback = ev->ev_callback;
1615 evcb_fd = ev->ev_fd;
1616 evcb_res = ev->ev_res;
1617 evcb_arg = ev->ev_arg;
1618
1619 // Release the lock
1620 EVBASE_RELEASE_LOCK(base, th_base_lock);
1621
1622 // Execute the callback
1623 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1624 }
1625
1626 /*
1627 Helper for event_process_active to process all the events in a single queue,
1628 releasing the lock as we go. This function requires that the lock be held
1629 when it's invoked. Returns -1 if we get a signal or an event_break that
1630 means we should stop processing any active events now. Otherwise returns
1631 the number of non-internal event_callbacks that we processed.
1632 */
1633 static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)1634 event_process_active_single_queue(struct event_base *base,
1635 struct evcallback_list *activeq,
1636 int max_to_process, const struct timeval *endtime)
1637 {
1638 struct event_callback *evcb;
1639 int count = 0;
1640
1641 EVUTIL_ASSERT(activeq != NULL);
1642
1643 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1644 struct event *ev=NULL;
1645 if (evcb->evcb_flags & EVLIST_INIT) {
1646 ev = event_callback_to_event(evcb);
1647
1648 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1649 event_queue_remove_active(base, evcb);
1650 else
1651 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1652 event_debug((
1653 "event_process_active: event: %p, %s%s%scall %p",
1654 ev,
1655 ev->ev_res & EV_READ ? "EV_READ " : " ",
1656 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1657 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1658 ev->ev_callback));
1659 } else {
1660 event_queue_remove_active(base, evcb);
1661 event_debug(("event_process_active: event_callback %p, "
1662 "closure %d, call %p",
1663 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1664 }
1665
1666 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1667 ++count;
1668
1669
1670 base->current_event = evcb;
1671 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1672 base->current_event_waiters = 0;
1673 #endif
1674
1675 switch (evcb->evcb_closure) {
1676 case EV_CLOSURE_EVENT_SIGNAL:
1677 EVUTIL_ASSERT(ev != NULL);
1678 event_signal_closure(base, ev);
1679 break;
1680 case EV_CLOSURE_EVENT_PERSIST:
1681 EVUTIL_ASSERT(ev != NULL);
1682 event_persist_closure(base, ev);
1683 break;
1684 case EV_CLOSURE_EVENT: {
1685 void (*evcb_callback)(evutil_socket_t, short, void *);
1686 short res;
1687 EVUTIL_ASSERT(ev != NULL);
1688 evcb_callback = *ev->ev_callback;
1689 res = ev->ev_res;
1690 EVBASE_RELEASE_LOCK(base, th_base_lock);
1691 evcb_callback(ev->ev_fd, res, ev->ev_arg);
1692 }
1693 break;
1694 case EV_CLOSURE_CB_SELF: {
1695 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1696 EVBASE_RELEASE_LOCK(base, th_base_lock);
1697 evcb_selfcb(evcb, evcb->evcb_arg);
1698 }
1699 break;
1700 case EV_CLOSURE_EVENT_FINALIZE:
1701 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1702 void (*evcb_evfinalize)(struct event *, void *);
1703 int evcb_closure = evcb->evcb_closure;
1704 EVUTIL_ASSERT(ev != NULL);
1705 base->current_event = NULL;
1706 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1707 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1708 EVBASE_RELEASE_LOCK(base, th_base_lock);
1709 event_debug_note_teardown_(ev);
1710 evcb_evfinalize(ev, ev->ev_arg);
1711 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1712 mm_free(ev);
1713 }
1714 break;
1715 case EV_CLOSURE_CB_FINALIZE: {
1716 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1717 base->current_event = NULL;
1718 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1719 EVBASE_RELEASE_LOCK(base, th_base_lock);
1720 evcb_cbfinalize(evcb, evcb->evcb_arg);
1721 }
1722 break;
1723 default:
1724 EVUTIL_ASSERT(0);
1725 }
1726
1727 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1728 base->current_event = NULL;
1729 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1730 if (base->current_event_waiters) {
1731 base->current_event_waiters = 0;
1732 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1733 }
1734 #endif
1735
1736 if (base->event_break)
1737 return -1;
1738 if (count >= max_to_process)
1739 return count;
1740 if (count && endtime) {
1741 struct timeval now;
1742 update_time_cache(base);
1743 gettime(base, &now);
1744 if (evutil_timercmp(&now, endtime, >=))
1745 return count;
1746 }
1747 if (base->event_continue)
1748 break;
1749 }
1750 return count;
1751 }
1752
1753 /*
1754 * Active events are stored in priority queues. Lower priorities are always
1755 * process before higher priorities. Low priority events can starve high
1756 * priority ones.
1757 */
1758
1759 static int
event_process_active(struct event_base * base)1760 event_process_active(struct event_base *base)
1761 {
1762 /* Caller must hold th_base_lock */
1763 struct evcallback_list *activeq = NULL;
1764 int i, c = 0;
1765 const struct timeval *endtime;
1766 struct timeval tv;
1767 const int maxcb = base->max_dispatch_callbacks;
1768 const int limit_after_prio = base->limit_callbacks_after_prio;
1769 if (base->max_dispatch_time.tv_sec >= 0) {
1770 update_time_cache(base);
1771 gettime(base, &tv);
1772 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1773 endtime = &tv;
1774 } else {
1775 endtime = NULL;
1776 }
1777
1778 for (i = 0; i < base->nactivequeues; ++i) {
1779 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1780 base->event_running_priority = i;
1781 activeq = &base->activequeues[i];
1782 if (i < limit_after_prio)
1783 c = event_process_active_single_queue(base, activeq,
1784 INT_MAX, NULL);
1785 else
1786 c = event_process_active_single_queue(base, activeq,
1787 maxcb, endtime);
1788 if (c < 0) {
1789 goto done;
1790 } else if (c > 0)
1791 break; /* Processed a real event; do not
1792 * consider lower-priority events */
1793 /* If we get here, all of the events we processed
1794 * were internal. Continue. */
1795 }
1796 }
1797
1798 done:
1799 base->event_running_priority = -1;
1800
1801 return c;
1802 }
1803
1804 /*
1805 * Wait continuously for events. We exit only if no events are left.
1806 */
1807
1808 int
event_dispatch(void)1809 event_dispatch(void)
1810 {
1811 return (event_loop(0));
1812 }
1813
1814 int
event_base_dispatch(struct event_base * event_base)1815 event_base_dispatch(struct event_base *event_base)
1816 {
1817 return (event_base_loop(event_base, 0));
1818 }
1819
1820 const char *
event_base_get_method(const struct event_base * base)1821 event_base_get_method(const struct event_base *base)
1822 {
1823 EVUTIL_ASSERT(base);
1824 return (base->evsel->name);
1825 }
1826
1827 /** Callback: used to implement event_base_loopexit by telling the event_base
1828 * that it's time to exit its loop. */
1829 static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)1830 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1831 {
1832 struct event_base *base = arg;
1833 base->event_gotterm = 1;
1834 }
1835
1836 int
event_loopexit(const struct timeval * tv)1837 event_loopexit(const struct timeval *tv)
1838 {
1839 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1840 current_base, tv));
1841 }
1842
1843 int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)1844 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1845 {
1846 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1847 event_base, tv));
1848 }
1849
1850 int
event_loopbreak(void)1851 event_loopbreak(void)
1852 {
1853 return (event_base_loopbreak(current_base));
1854 }
1855
1856 int
event_base_loopbreak(struct event_base * event_base)1857 event_base_loopbreak(struct event_base *event_base)
1858 {
1859 int r = 0;
1860 if (event_base == NULL)
1861 return (-1);
1862
1863 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1864 event_base->event_break = 1;
1865
1866 if (EVBASE_NEED_NOTIFY(event_base)) {
1867 r = evthread_notify_base(event_base);
1868 } else {
1869 r = (0);
1870 }
1871 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1872 return r;
1873 }
1874
1875 int
event_base_loopcontinue(struct event_base * event_base)1876 event_base_loopcontinue(struct event_base *event_base)
1877 {
1878 int r = 0;
1879 if (event_base == NULL)
1880 return (-1);
1881
1882 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1883 event_base->event_continue = 1;
1884
1885 if (EVBASE_NEED_NOTIFY(event_base)) {
1886 r = evthread_notify_base(event_base);
1887 } else {
1888 r = (0);
1889 }
1890 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1891 return r;
1892 }
1893
1894 int
event_base_got_break(struct event_base * event_base)1895 event_base_got_break(struct event_base *event_base)
1896 {
1897 int res;
1898 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1899 res = event_base->event_break;
1900 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1901 return res;
1902 }
1903
1904 int
event_base_got_exit(struct event_base * event_base)1905 event_base_got_exit(struct event_base *event_base)
1906 {
1907 int res;
1908 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1909 res = event_base->event_gotterm;
1910 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1911 return res;
1912 }
1913
1914 /* not thread safe */
1915
1916 int
event_loop(int flags)1917 event_loop(int flags)
1918 {
1919 return event_base_loop(current_base, flags);
1920 }
1921
1922 int
event_base_loop(struct event_base * base,int flags)1923 event_base_loop(struct event_base *base, int flags)
1924 {
1925 const struct eventop *evsel = base->evsel;
1926 struct timeval tv;
1927 struct timeval *tv_p;
1928 int res, done, retval = 0;
1929
1930 /* Grab the lock. We will release it inside evsel.dispatch, and again
1931 * as we invoke user callbacks. */
1932 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1933
1934 if (base->running_loop) {
1935 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1936 " can run on each event_base at once.", __func__);
1937 EVBASE_RELEASE_LOCK(base, th_base_lock);
1938 return -1;
1939 }
1940
1941 base->running_loop = 1;
1942
1943 clear_time_cache(base);
1944
1945 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1946 evsig_set_base_(base);
1947
1948 done = 0;
1949
1950 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1951 base->th_owner_id = EVTHREAD_GET_ID();
1952 #endif
1953
1954 base->event_gotterm = base->event_break = 0;
1955
1956 while (!done) {
1957 base->event_continue = 0;
1958 base->n_deferreds_queued = 0;
1959
1960 /* Terminate the loop if we have been asked to */
1961 if (base->event_gotterm) {
1962 break;
1963 }
1964
1965 if (base->event_break) {
1966 break;
1967 }
1968
1969 tv_p = &tv;
1970 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1971 timeout_next(base, &tv_p);
1972 } else {
1973 /*
1974 * if we have active events, we just poll new events
1975 * without waiting.
1976 */
1977 evutil_timerclear(&tv);
1978 }
1979
1980 /* If we have no events, we just exit */
1981 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1982 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1983 event_debug(("%s: no events registered.", __func__));
1984 retval = 1;
1985 goto done;
1986 }
1987
1988 event_queue_make_later_events_active(base);
1989
1990 clear_time_cache(base);
1991
1992 res = evsel->dispatch(base, tv_p);
1993
1994 if (res == -1) {
1995 event_debug(("%s: dispatch returned unsuccessfully.",
1996 __func__));
1997 retval = -1;
1998 goto done;
1999 }
2000
2001 update_time_cache(base);
2002
2003 timeout_process(base);
2004
2005 if (N_ACTIVE_CALLBACKS(base)) {
2006 int n = event_process_active(base);
2007 if ((flags & EVLOOP_ONCE)
2008 && N_ACTIVE_CALLBACKS(base) == 0
2009 && n != 0)
2010 done = 1;
2011 } else if (flags & EVLOOP_NONBLOCK)
2012 done = 1;
2013 }
2014 event_debug(("%s: asked to terminate loop.", __func__));
2015
2016 done:
2017 clear_time_cache(base);
2018 base->running_loop = 0;
2019
2020 EVBASE_RELEASE_LOCK(base, th_base_lock);
2021
2022 return (retval);
2023 }
2024
2025 /* One-time callback to implement event_base_once: invokes the user callback,
2026 * then deletes the allocated storage */
2027 static void
event_once_cb(evutil_socket_t fd,short events,void * arg)2028 event_once_cb(evutil_socket_t fd, short events, void *arg)
2029 {
2030 struct event_once *eonce = arg;
2031
2032 (*eonce->cb)(fd, events, eonce->arg);
2033 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2034 LIST_REMOVE(eonce, next_once);
2035 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2036 event_debug_unassign(&eonce->ev);
2037 mm_free(eonce);
2038 }
2039
2040 /* not threadsafe, event scheduled once. */
2041 int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2042 event_once(evutil_socket_t fd, short events,
2043 void (*callback)(evutil_socket_t, short, void *),
2044 void *arg, const struct timeval *tv)
2045 {
2046 return event_base_once(current_base, fd, events, callback, arg, tv);
2047 }
2048
2049 /* Schedules an event once */
2050 int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2051 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2052 void (*callback)(evutil_socket_t, short, void *),
2053 void *arg, const struct timeval *tv)
2054 {
2055 struct event_once *eonce;
2056 int res = 0;
2057 int activate = 0;
2058
2059 if (!base)
2060 return (-1);
2061
2062 /* We cannot support signals that just fire once, or persistent
2063 * events. */
2064 if (events & (EV_SIGNAL|EV_PERSIST))
2065 return (-1);
2066
2067 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2068 return (-1);
2069
2070 eonce->cb = callback;
2071 eonce->arg = arg;
2072
2073 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2074 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2075
2076 if (tv == NULL || ! evutil_timerisset(tv)) {
2077 /* If the event is going to become active immediately,
2078 * don't put it on the timeout queue. This is one
2079 * idiom for scheduling a callback, so let's make
2080 * it fast (and order-preserving). */
2081 activate = 1;
2082 }
2083 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2084 events &= EV_READ|EV_WRITE|EV_CLOSED;
2085
2086 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2087 } else {
2088 /* Bad event combination */
2089 mm_free(eonce);
2090 return (-1);
2091 }
2092
2093 if (res == 0) {
2094 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2095 if (activate)
2096 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2097 else
2098 res = event_add_nolock_(&eonce->ev, tv, 0);
2099
2100 if (res != 0) {
2101 mm_free(eonce);
2102 return (res);
2103 } else {
2104 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2105 }
2106 EVBASE_RELEASE_LOCK(base, th_base_lock);
2107 }
2108
2109 return (0);
2110 }
2111
2112 int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2113 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2114 {
2115 if (!base)
2116 base = current_base;
2117 if (arg == &event_self_cbarg_ptr_)
2118 arg = ev;
2119
2120 if (!(events & EV_SIGNAL))
2121 event_debug_assert_socket_nonblocking_(fd);
2122 event_debug_assert_not_added_(ev);
2123
2124 ev->ev_base = base;
2125
2126 ev->ev_callback = callback;
2127 ev->ev_arg = arg;
2128 ev->ev_fd = fd;
2129 ev->ev_events = events;
2130 ev->ev_res = 0;
2131 ev->ev_flags = EVLIST_INIT;
2132 ev->ev_ncalls = 0;
2133 ev->ev_pncalls = NULL;
2134
2135 if (events & EV_SIGNAL) {
2136 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2137 event_warnx("%s: EV_SIGNAL is not compatible with "
2138 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2139 return -1;
2140 }
2141 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2142 } else {
2143 if (events & EV_PERSIST) {
2144 evutil_timerclear(&ev->ev_io_timeout);
2145 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2146 } else {
2147 ev->ev_closure = EV_CLOSURE_EVENT;
2148 }
2149 }
2150
2151 min_heap_elem_init_(ev);
2152
2153 if (base != NULL) {
2154 /* by default, we put new events into the middle priority */
2155 ev->ev_pri = base->nactivequeues / 2;
2156 }
2157
2158 event_debug_note_setup_(ev);
2159
2160 return 0;
2161 }
2162
2163 int
event_base_set(struct event_base * base,struct event * ev)2164 event_base_set(struct event_base *base, struct event *ev)
2165 {
2166 /* Only innocent events may be assigned to a different base */
2167 if (ev->ev_flags != EVLIST_INIT)
2168 return (-1);
2169
2170 event_debug_assert_is_setup_(ev);
2171
2172 ev->ev_base = base;
2173 ev->ev_pri = base->nactivequeues/2;
2174
2175 return (0);
2176 }
2177
2178 void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2179 event_set(struct event *ev, evutil_socket_t fd, short events,
2180 void (*callback)(evutil_socket_t, short, void *), void *arg)
2181 {
2182 int r;
2183 r = event_assign(ev, current_base, fd, events, callback, arg);
2184 EVUTIL_ASSERT(r == 0);
2185 }
2186
2187 void *
event_self_cbarg(void)2188 event_self_cbarg(void)
2189 {
2190 return &event_self_cbarg_ptr_;
2191 }
2192
2193 struct event *
event_base_get_running_event(struct event_base * base)2194 event_base_get_running_event(struct event_base *base)
2195 {
2196 struct event *ev = NULL;
2197 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2198 if (EVBASE_IN_THREAD(base)) {
2199 struct event_callback *evcb = base->current_event;
2200 if (evcb->evcb_flags & EVLIST_INIT)
2201 ev = event_callback_to_event(evcb);
2202 }
2203 EVBASE_RELEASE_LOCK(base, th_base_lock);
2204 return ev;
2205 }
2206
2207 struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)2208 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2209 {
2210 struct event *ev;
2211 ev = mm_malloc(sizeof(struct event));
2212 if (ev == NULL)
2213 return (NULL);
2214 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2215 mm_free(ev);
2216 return (NULL);
2217 }
2218
2219 return (ev);
2220 }
2221
2222 void
event_free(struct event * ev)2223 event_free(struct event *ev)
2224 {
2225 /* This is disabled, so that events which have been finalized be a
2226 * valid target for event_free(). That's */
2227 // event_debug_assert_is_setup_(ev);
2228
2229 /* make sure that this event won't be coming back to haunt us. */
2230 event_del(ev);
2231 event_debug_note_teardown_(ev);
2232 mm_free(ev);
2233
2234 }
2235
2236 void
event_debug_unassign(struct event * ev)2237 event_debug_unassign(struct event *ev)
2238 {
2239 event_debug_assert_not_added_(ev);
2240 event_debug_note_teardown_(ev);
2241
2242 ev->ev_flags &= ~EVLIST_INIT;
2243 }
2244
2245 #define EVENT_FINALIZE_FREE_ 0x10000
2246 static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)2247 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2248 {
2249 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2250 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2251
2252 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2253 ev->ev_closure = closure;
2254 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2255 event_active_nolock_(ev, EV_FINALIZE, 1);
2256 ev->ev_flags |= EVLIST_FINALIZING;
2257 return 0;
2258 }
2259
2260 static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2261 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2262 {
2263 int r;
2264 struct event_base *base = ev->ev_base;
2265 if (EVUTIL_FAILURE_CHECK(!base)) {
2266 event_warnx("%s: event has no event_base set.", __func__);
2267 return -1;
2268 }
2269
2270 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2271 r = event_finalize_nolock_(base, flags, ev, cb);
2272 EVBASE_RELEASE_LOCK(base, th_base_lock);
2273 return r;
2274 }
2275
2276 int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2277 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2278 {
2279 return event_finalize_impl_(flags, ev, cb);
2280 }
2281
2282 int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2283 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2284 {
2285 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2286 }
2287
2288 void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2289 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2290 {
2291 struct event *ev = NULL;
2292 if (evcb->evcb_flags & EVLIST_INIT) {
2293 ev = event_callback_to_event(evcb);
2294 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2295 } else {
2296 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2297 }
2298
2299 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2300 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2301 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2302 evcb->evcb_flags |= EVLIST_FINALIZING;
2303 }
2304
2305 void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2306 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2307 {
2308 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2309 event_callback_finalize_nolock_(base, flags, evcb, cb);
2310 EVBASE_RELEASE_LOCK(base, th_base_lock);
2311 }
2312
2313 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2314 * callback will be invoked on *one of them*, after they have *all* been
2315 * finalized. */
2316 int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))2317 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2318 {
2319 int n_pending = 0, i;
2320
2321 if (base == NULL)
2322 base = current_base;
2323
2324 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2325
2326 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2327
2328 /* At most one can be currently executing; the rest we just
2329 * cancel... But we always make sure that the finalize callback
2330 * runs. */
2331 for (i = 0; i < n_cbs; ++i) {
2332 struct event_callback *evcb = evcbs[i];
2333 if (evcb == base->current_event) {
2334 event_callback_finalize_nolock_(base, 0, evcb, cb);
2335 ++n_pending;
2336 } else {
2337 event_callback_cancel_nolock_(base, evcb, 0);
2338 }
2339 }
2340
2341 if (n_pending == 0) {
2342 /* Just do the first one. */
2343 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2344 }
2345
2346 EVBASE_RELEASE_LOCK(base, th_base_lock);
2347 return 0;
2348 }
2349
2350 /*
2351 * Set's the priority of an event - if an event is already scheduled
2352 * changing the priority is going to fail.
2353 */
2354
2355 int
event_priority_set(struct event * ev,int pri)2356 event_priority_set(struct event *ev, int pri)
2357 {
2358 event_debug_assert_is_setup_(ev);
2359
2360 if (ev->ev_flags & EVLIST_ACTIVE)
2361 return (-1);
2362 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2363 return (-1);
2364
2365 ev->ev_pri = pri;
2366
2367 return (0);
2368 }
2369
2370 /*
2371 * Checks if a specific event is pending or scheduled.
2372 */
2373
2374 int
event_pending(const struct event * ev,short event,struct timeval * tv)2375 event_pending(const struct event *ev, short event, struct timeval *tv)
2376 {
2377 int flags = 0;
2378
2379 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2380 event_warnx("%s: event has no event_base set.", __func__);
2381 return 0;
2382 }
2383
2384 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2385 event_debug_assert_is_setup_(ev);
2386
2387 if (ev->ev_flags & EVLIST_INSERTED)
2388 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2389 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2390 flags |= ev->ev_res;
2391 if (ev->ev_flags & EVLIST_TIMEOUT)
2392 flags |= EV_TIMEOUT;
2393
2394 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2395
2396 /* See if there is a timeout that we should report */
2397 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2398 struct timeval tmp = ev->ev_timeout;
2399 tmp.tv_usec &= MICROSECONDS_MASK;
2400 /* correctly remamp to real time */
2401 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2402 }
2403
2404 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2405
2406 return (flags & event);
2407 }
2408
2409 int
event_initialized(const struct event * ev)2410 event_initialized(const struct event *ev)
2411 {
2412 if (!(ev->ev_flags & EVLIST_INIT))
2413 return 0;
2414
2415 return 1;
2416 }
2417
2418 void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)2419 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2420 {
2421 event_debug_assert_is_setup_(event);
2422
2423 if (base_out)
2424 *base_out = event->ev_base;
2425 if (fd_out)
2426 *fd_out = event->ev_fd;
2427 if (events_out)
2428 *events_out = event->ev_events;
2429 if (callback_out)
2430 *callback_out = event->ev_callback;
2431 if (arg_out)
2432 *arg_out = event->ev_arg;
2433 }
2434
2435 size_t
event_get_struct_event_size(void)2436 event_get_struct_event_size(void)
2437 {
2438 return sizeof(struct event);
2439 }
2440
2441 evutil_socket_t
event_get_fd(const struct event * ev)2442 event_get_fd(const struct event *ev)
2443 {
2444 event_debug_assert_is_setup_(ev);
2445 return ev->ev_fd;
2446 }
2447
2448 struct event_base *
event_get_base(const struct event * ev)2449 event_get_base(const struct event *ev)
2450 {
2451 event_debug_assert_is_setup_(ev);
2452 return ev->ev_base;
2453 }
2454
2455 short
event_get_events(const struct event * ev)2456 event_get_events(const struct event *ev)
2457 {
2458 event_debug_assert_is_setup_(ev);
2459 return ev->ev_events;
2460 }
2461
2462 event_callback_fn
event_get_callback(const struct event * ev)2463 event_get_callback(const struct event *ev)
2464 {
2465 event_debug_assert_is_setup_(ev);
2466 return ev->ev_callback;
2467 }
2468
2469 void *
event_get_callback_arg(const struct event * ev)2470 event_get_callback_arg(const struct event *ev)
2471 {
2472 event_debug_assert_is_setup_(ev);
2473 return ev->ev_arg;
2474 }
2475
2476 int
event_get_priority(const struct event * ev)2477 event_get_priority(const struct event *ev)
2478 {
2479 event_debug_assert_is_setup_(ev);
2480 return ev->ev_pri;
2481 }
2482
2483 int
event_add(struct event * ev,const struct timeval * tv)2484 event_add(struct event *ev, const struct timeval *tv)
2485 {
2486 int res;
2487
2488 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2489 event_warnx("%s: event has no event_base set.", __func__);
2490 return -1;
2491 }
2492
2493 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2494
2495 res = event_add_nolock_(ev, tv, 0);
2496
2497 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2498
2499 return (res);
2500 }
2501
2502 /* Helper callback: wake an event_base from another thread. This version
2503 * works by writing a byte to one end of a socketpair, so that the event_base
2504 * listening on the other end will wake up as the corresponding event
2505 * triggers */
2506 static int
evthread_notify_base_default(struct event_base * base)2507 evthread_notify_base_default(struct event_base *base)
2508 {
2509 char buf[1];
2510 int r;
2511 buf[0] = (char) 0;
2512 #ifdef _WIN32
2513 r = send(base->th_notify_fd[1], buf, 1, 0);
2514 #else
2515 r = write(base->th_notify_fd[1], buf, 1);
2516 #endif
2517 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2518 }
2519
2520 #ifdef EVENT__HAVE_EVENTFD
2521 /* Helper callback: wake an event_base from another thread. This version
2522 * assumes that you have a working eventfd() implementation. */
2523 static int
evthread_notify_base_eventfd(struct event_base * base)2524 evthread_notify_base_eventfd(struct event_base *base)
2525 {
2526 ev_uint64_t msg = 1;
2527 int r;
2528 do {
2529 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2530 } while (r < 0 && errno == EAGAIN);
2531
2532 return (r < 0) ? -1 : 0;
2533 }
2534 #endif
2535
2536
2537 /** Tell the thread currently running the event_loop for base (if any) that it
2538 * needs to stop waiting in its dispatch function (if it is) and process all
2539 * active callbacks. */
2540 static int
evthread_notify_base(struct event_base * base)2541 evthread_notify_base(struct event_base *base)
2542 {
2543 EVENT_BASE_ASSERT_LOCKED(base);
2544 if (!base->th_notify_fn)
2545 return -1;
2546 if (base->is_notify_pending)
2547 return 0;
2548 base->is_notify_pending = 1;
2549 return base->th_notify_fn(base);
2550 }
2551
2552 /* Implementation function to remove a timeout on a currently pending event.
2553 */
2554 int
event_remove_timer_nolock_(struct event * ev)2555 event_remove_timer_nolock_(struct event *ev)
2556 {
2557 struct event_base *base = ev->ev_base;
2558
2559 EVENT_BASE_ASSERT_LOCKED(base);
2560 event_debug_assert_is_setup_(ev);
2561
2562 event_debug(("event_remove_timer_nolock: event: %p", ev));
2563
2564 /* If it's not pending on a timeout, we don't need to do anything. */
2565 if (ev->ev_flags & EVLIST_TIMEOUT) {
2566 event_queue_remove_timeout(base, ev);
2567 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2568 }
2569
2570 return (0);
2571 }
2572
2573 int
event_remove_timer(struct event * ev)2574 event_remove_timer(struct event *ev)
2575 {
2576 int res;
2577
2578 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2579 event_warnx("%s: event has no event_base set.", __func__);
2580 return -1;
2581 }
2582
2583 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2584
2585 res = event_remove_timer_nolock_(ev);
2586
2587 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2588
2589 return (res);
2590 }
2591
2592 /* Implementation function to add an event. Works just like event_add,
2593 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2594 * we treat tv as an absolute time, not as an interval to add to the current
2595 * time */
2596 int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)2597 event_add_nolock_(struct event *ev, const struct timeval *tv,
2598 int tv_is_absolute)
2599 {
2600 struct event_base *base = ev->ev_base;
2601 int res = 0;
2602 int notify = 0;
2603
2604 EVENT_BASE_ASSERT_LOCKED(base);
2605 event_debug_assert_is_setup_(ev);
2606
2607 event_debug((
2608 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2609 ev,
2610 EV_SOCK_ARG(ev->ev_fd),
2611 ev->ev_events & EV_READ ? "EV_READ " : " ",
2612 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2613 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2614 tv ? "EV_TIMEOUT " : " ",
2615 ev->ev_callback));
2616
2617 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2618
2619 if (ev->ev_flags & EVLIST_FINALIZING) {
2620 /* XXXX debug */
2621 return (-1);
2622 }
2623
2624 /*
2625 * prepare for timeout insertion further below, if we get a
2626 * failure on any step, we should not change any state.
2627 */
2628 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2629 if (min_heap_reserve_(&base->timeheap,
2630 1 + min_heap_size_(&base->timeheap)) == -1)
2631 return (-1); /* ENOMEM == errno */
2632 }
2633
2634 /* If the main thread is currently executing a signal event's
2635 * callback, and we are not the main thread, then we want to wait
2636 * until the callback is done before we mess with the event, or else
2637 * we can race on ev_ncalls and ev_pncalls below. */
2638 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2639 if (base->current_event == event_to_event_callback(ev) &&
2640 (ev->ev_events & EV_SIGNAL)
2641 && !EVBASE_IN_THREAD(base)) {
2642 ++base->current_event_waiters;
2643 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2644 }
2645 #endif
2646
2647 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2648 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2649 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2650 res = evmap_io_add_(base, ev->ev_fd, ev);
2651 else if (ev->ev_events & EV_SIGNAL)
2652 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2653 if (res != -1)
2654 event_queue_insert_inserted(base, ev);
2655 if (res == 1) {
2656 /* evmap says we need to notify the main thread. */
2657 notify = 1;
2658 res = 0;
2659 }
2660 }
2661
2662 /*
2663 * we should change the timeout state only if the previous event
2664 * addition succeeded.
2665 */
2666 if (res != -1 && tv != NULL) {
2667 struct timeval now;
2668 int common_timeout;
2669 #ifdef USE_REINSERT_TIMEOUT
2670 int was_common;
2671 int old_timeout_idx;
2672 #endif
2673
2674 /*
2675 * for persistent timeout events, we remember the
2676 * timeout value and re-add the event.
2677 *
2678 * If tv_is_absolute, this was already set.
2679 */
2680 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2681 ev->ev_io_timeout = *tv;
2682
2683 #ifndef USE_REINSERT_TIMEOUT
2684 if (ev->ev_flags & EVLIST_TIMEOUT) {
2685 event_queue_remove_timeout(base, ev);
2686 }
2687 #endif
2688
2689 /* Check if it is active due to a timeout. Rescheduling
2690 * this timeout before the callback can be executed
2691 * removes it from the active list. */
2692 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2693 (ev->ev_res & EV_TIMEOUT)) {
2694 if (ev->ev_events & EV_SIGNAL) {
2695 /* See if we are just active executing
2696 * this event in a loop
2697 */
2698 if (ev->ev_ncalls && ev->ev_pncalls) {
2699 /* Abort loop */
2700 *ev->ev_pncalls = 0;
2701 }
2702 }
2703
2704 event_queue_remove_active(base, event_to_event_callback(ev));
2705 }
2706
2707 gettime(base, &now);
2708
2709 common_timeout = is_common_timeout(tv, base);
2710 #ifdef USE_REINSERT_TIMEOUT
2711 was_common = is_common_timeout(&ev->ev_timeout, base);
2712 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2713 #endif
2714
2715 if (tv_is_absolute) {
2716 ev->ev_timeout = *tv;
2717 } else if (common_timeout) {
2718 struct timeval tmp = *tv;
2719 tmp.tv_usec &= MICROSECONDS_MASK;
2720 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2721 ev->ev_timeout.tv_usec |=
2722 (tv->tv_usec & ~MICROSECONDS_MASK);
2723 } else {
2724 evutil_timeradd(&now, tv, &ev->ev_timeout);
2725 }
2726
2727 event_debug((
2728 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2729 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2730
2731 #ifdef USE_REINSERT_TIMEOUT
2732 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2733 #else
2734 event_queue_insert_timeout(base, ev);
2735 #endif
2736
2737 if (common_timeout) {
2738 struct common_timeout_list *ctl =
2739 get_common_timeout_list(base, &ev->ev_timeout);
2740 if (ev == TAILQ_FIRST(&ctl->events)) {
2741 common_timeout_schedule(ctl, &now, ev);
2742 }
2743 } else {
2744 struct event* top = NULL;
2745 /* See if the earliest timeout is now earlier than it
2746 * was before: if so, we will need to tell the main
2747 * thread to wake up earlier than it would otherwise.
2748 * We double check the timeout of the top element to
2749 * handle time distortions due to system suspension.
2750 */
2751 if (min_heap_elt_is_top_(ev))
2752 notify = 1;
2753 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2754 evutil_timercmp(&top->ev_timeout, &now, <))
2755 notify = 1;
2756 }
2757 }
2758
2759 /* if we are not in the right thread, we need to wake up the loop */
2760 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2761 evthread_notify_base(base);
2762
2763 event_debug_note_add_(ev);
2764
2765 return (res);
2766 }
2767
2768 static int
event_del_(struct event * ev,int blocking)2769 event_del_(struct event *ev, int blocking)
2770 {
2771 int res;
2772 struct event_base *base = ev->ev_base;
2773
2774 if (EVUTIL_FAILURE_CHECK(!base)) {
2775 event_warnx("%s: event has no event_base set.", __func__);
2776 return -1;
2777 }
2778
2779 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2780 res = event_del_nolock_(ev, blocking);
2781 EVBASE_RELEASE_LOCK(base, th_base_lock);
2782
2783 return (res);
2784 }
2785
2786 int
event_del(struct event * ev)2787 event_del(struct event *ev)
2788 {
2789 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2790 }
2791
2792 int
event_del_block(struct event * ev)2793 event_del_block(struct event *ev)
2794 {
2795 return event_del_(ev, EVENT_DEL_BLOCK);
2796 }
2797
2798 int
event_del_noblock(struct event * ev)2799 event_del_noblock(struct event *ev)
2800 {
2801 return event_del_(ev, EVENT_DEL_NOBLOCK);
2802 }
2803
2804 /** Helper for event_del: always called with th_base_lock held.
2805 *
2806 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2807 * EVEN_IF_FINALIZING} values. See those for more information.
2808 */
2809 int
event_del_nolock_(struct event * ev,int blocking)2810 event_del_nolock_(struct event *ev, int blocking)
2811 {
2812 struct event_base *base;
2813 int res = 0, notify = 0;
2814
2815 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2816 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2817
2818 /* An event without a base has not been added */
2819 if (ev->ev_base == NULL)
2820 return (-1);
2821
2822 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2823
2824 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2825 if (ev->ev_flags & EVLIST_FINALIZING) {
2826 /* XXXX Debug */
2827 return 0;
2828 }
2829 }
2830
2831 base = ev->ev_base;
2832
2833 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2834
2835 /* See if we are just active executing this event in a loop */
2836 if (ev->ev_events & EV_SIGNAL) {
2837 if (ev->ev_ncalls && ev->ev_pncalls) {
2838 /* Abort loop */
2839 *ev->ev_pncalls = 0;
2840 }
2841 }
2842
2843 if (ev->ev_flags & EVLIST_TIMEOUT) {
2844 /* NOTE: We never need to notify the main thread because of a
2845 * deleted timeout event: all that could happen if we don't is
2846 * that the dispatch loop might wake up too early. But the
2847 * point of notifying the main thread _is_ to wake up the
2848 * dispatch loop early anyway, so we wouldn't gain anything by
2849 * doing it.
2850 */
2851 event_queue_remove_timeout(base, ev);
2852 }
2853
2854 if (ev->ev_flags & EVLIST_ACTIVE)
2855 event_queue_remove_active(base, event_to_event_callback(ev));
2856 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2857 event_queue_remove_active_later(base, event_to_event_callback(ev));
2858
2859 if (ev->ev_flags & EVLIST_INSERTED) {
2860 event_queue_remove_inserted(base, ev);
2861 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2862 res = evmap_io_del_(base, ev->ev_fd, ev);
2863 else
2864 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2865 if (res == 1) {
2866 /* evmap says we need to notify the main thread. */
2867 notify = 1;
2868 res = 0;
2869 }
2870 /* If we do not have events, let's notify event base so it can
2871 * exit without waiting */
2872 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2873 notify = 1;
2874 }
2875
2876 /* if we are not in the right thread, we need to wake up the loop */
2877 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2878 evthread_notify_base(base);
2879
2880 event_debug_note_del_(ev);
2881
2882 /* If the main thread is currently executing this event's callback,
2883 * and we are not the main thread, then we want to wait until the
2884 * callback is done before returning. That way, when this function
2885 * returns, it will be safe to free the user-supplied argument.
2886 */
2887 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2888 if (blocking != EVENT_DEL_NOBLOCK &&
2889 base->current_event == event_to_event_callback(ev) &&
2890 !EVBASE_IN_THREAD(base) &&
2891 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2892 ++base->current_event_waiters;
2893 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2894 }
2895 #endif
2896
2897 return (res);
2898 }
2899
2900 void
event_active(struct event * ev,int res,short ncalls)2901 event_active(struct event *ev, int res, short ncalls)
2902 {
2903 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2904 event_warnx("%s: event has no event_base set.", __func__);
2905 return;
2906 }
2907
2908 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2909
2910 event_debug_assert_is_setup_(ev);
2911
2912 event_active_nolock_(ev, res, ncalls);
2913
2914 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2915 }
2916
2917
2918 void
event_active_nolock_(struct event * ev,int res,short ncalls)2919 event_active_nolock_(struct event *ev, int res, short ncalls)
2920 {
2921 struct event_base *base;
2922
2923 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2924 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2925
2926 base = ev->ev_base;
2927 EVENT_BASE_ASSERT_LOCKED(base);
2928
2929 if (ev->ev_flags & EVLIST_FINALIZING) {
2930 /* XXXX debug */
2931 return;
2932 }
2933
2934 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2935 default:
2936 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2937 EVUTIL_ASSERT(0);
2938 break;
2939 case EVLIST_ACTIVE:
2940 /* We get different kinds of events, add them together */
2941 ev->ev_res |= res;
2942 return;
2943 case EVLIST_ACTIVE_LATER:
2944 ev->ev_res |= res;
2945 break;
2946 case 0:
2947 ev->ev_res = res;
2948 break;
2949 }
2950
2951 if (ev->ev_pri < base->event_running_priority)
2952 base->event_continue = 1;
2953
2954 if (ev->ev_events & EV_SIGNAL) {
2955 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2956 if (base->current_event == event_to_event_callback(ev) &&
2957 !EVBASE_IN_THREAD(base)) {
2958 ++base->current_event_waiters;
2959 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2960 }
2961 #endif
2962 ev->ev_ncalls = ncalls;
2963 ev->ev_pncalls = NULL;
2964 }
2965
2966 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2967 }
2968
2969 void
event_active_later_(struct event * ev,int res)2970 event_active_later_(struct event *ev, int res)
2971 {
2972 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2973 event_active_later_nolock_(ev, res);
2974 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2975 }
2976
2977 void
event_active_later_nolock_(struct event * ev,int res)2978 event_active_later_nolock_(struct event *ev, int res)
2979 {
2980 struct event_base *base = ev->ev_base;
2981 EVENT_BASE_ASSERT_LOCKED(base);
2982
2983 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2984 /* We get different kinds of events, add them together */
2985 ev->ev_res |= res;
2986 return;
2987 }
2988
2989 ev->ev_res = res;
2990
2991 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2992 }
2993
2994 int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)2995 event_callback_activate_(struct event_base *base,
2996 struct event_callback *evcb)
2997 {
2998 int r;
2999 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3000 r = event_callback_activate_nolock_(base, evcb);
3001 EVBASE_RELEASE_LOCK(base, th_base_lock);
3002 return r;
3003 }
3004
3005 int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)3006 event_callback_activate_nolock_(struct event_base *base,
3007 struct event_callback *evcb)
3008 {
3009 int r = 1;
3010
3011 if (evcb->evcb_flags & EVLIST_FINALIZING)
3012 return 0;
3013
3014 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3015 default:
3016 EVUTIL_ASSERT(0);
3017 EVUTIL_FALLTHROUGH;
3018 case EVLIST_ACTIVE_LATER:
3019 event_queue_remove_active_later(base, evcb);
3020 r = 0;
3021 break;
3022 case EVLIST_ACTIVE:
3023 return 0;
3024 case 0:
3025 break;
3026 }
3027
3028 event_queue_insert_active(base, evcb);
3029
3030 if (EVBASE_NEED_NOTIFY(base))
3031 evthread_notify_base(base);
3032
3033 return r;
3034 }
3035
3036 int
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)3037 event_callback_activate_later_nolock_(struct event_base *base,
3038 struct event_callback *evcb)
3039 {
3040 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3041 return 0;
3042
3043 event_queue_insert_active_later(base, evcb);
3044 if (EVBASE_NEED_NOTIFY(base))
3045 evthread_notify_base(base);
3046 return 1;
3047 }
3048
3049 void
event_callback_init_(struct event_base * base,struct event_callback * cb)3050 event_callback_init_(struct event_base *base,
3051 struct event_callback *cb)
3052 {
3053 memset(cb, 0, sizeof(*cb));
3054 cb->evcb_pri = base->nactivequeues - 1;
3055 }
3056
3057 int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)3058 event_callback_cancel_(struct event_base *base,
3059 struct event_callback *evcb)
3060 {
3061 int r;
3062 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3063 r = event_callback_cancel_nolock_(base, evcb, 0);
3064 EVBASE_RELEASE_LOCK(base, th_base_lock);
3065 return r;
3066 }
3067
3068 int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)3069 event_callback_cancel_nolock_(struct event_base *base,
3070 struct event_callback *evcb, int even_if_finalizing)
3071 {
3072 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3073 return 0;
3074
3075 if (evcb->evcb_flags & EVLIST_INIT)
3076 return event_del_nolock_(event_callback_to_event(evcb),
3077 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3078
3079 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3080 default:
3081 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3082 EVUTIL_ASSERT(0);
3083 break;
3084 case EVLIST_ACTIVE:
3085 /* We get different kinds of events, add them together */
3086 event_queue_remove_active(base, evcb);
3087 return 0;
3088 case EVLIST_ACTIVE_LATER:
3089 event_queue_remove_active_later(base, evcb);
3090 break;
3091 case 0:
3092 break;
3093 }
3094
3095 return 0;
3096 }
3097
3098 void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)3099 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3100 {
3101 memset(cb, 0, sizeof(*cb));
3102 cb->evcb_cb_union.evcb_selfcb = fn;
3103 cb->evcb_arg = arg;
3104 cb->evcb_pri = priority;
3105 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3106 }
3107
3108 void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)3109 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3110 {
3111 cb->evcb_pri = priority;
3112 }
3113
3114 void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)3115 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3116 {
3117 if (!base)
3118 base = current_base;
3119 event_callback_cancel_(base, cb);
3120 }
3121
3122 #define MAX_DEFERREDS_QUEUED 32
3123 int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)3124 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3125 {
3126 int r = 1;
3127 if (!base)
3128 base = current_base;
3129 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3130 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3131 r = event_callback_activate_later_nolock_(base, cb);
3132 } else {
3133 r = event_callback_activate_nolock_(base, cb);
3134 if (r) {
3135 ++base->n_deferreds_queued;
3136 }
3137 }
3138 EVBASE_RELEASE_LOCK(base, th_base_lock);
3139 return r;
3140 }
3141
3142 static int
timeout_next(struct event_base * base,struct timeval ** tv_p)3143 timeout_next(struct event_base *base, struct timeval **tv_p)
3144 {
3145 /* Caller must hold th_base_lock */
3146 struct timeval now;
3147 struct event *ev;
3148 struct timeval *tv = *tv_p;
3149 int res = 0;
3150
3151 ev = min_heap_top_(&base->timeheap);
3152
3153 if (ev == NULL) {
3154 /* if no time-based events are active wait for I/O */
3155 *tv_p = NULL;
3156 goto out;
3157 }
3158
3159 if (gettime(base, &now) == -1) {
3160 res = -1;
3161 goto out;
3162 }
3163
3164 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3165 evutil_timerclear(tv);
3166 goto out;
3167 }
3168
3169 evutil_timersub(&ev->ev_timeout, &now, tv);
3170
3171 EVUTIL_ASSERT(tv->tv_sec >= 0);
3172 EVUTIL_ASSERT(tv->tv_usec >= 0);
3173 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3174
3175 out:
3176 return (res);
3177 }
3178
3179 /* Activate every event whose timeout has elapsed. */
3180 static void
timeout_process(struct event_base * base)3181 timeout_process(struct event_base *base)
3182 {
3183 /* Caller must hold lock. */
3184 struct timeval now;
3185 struct event *ev;
3186
3187 if (min_heap_empty_(&base->timeheap)) {
3188 return;
3189 }
3190
3191 gettime(base, &now);
3192
3193 while ((ev = min_heap_top_(&base->timeheap))) {
3194 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3195 break;
3196
3197 /* delete this event from the I/O queues */
3198 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3199
3200 event_debug(("timeout_process: event: %p, call %p",
3201 ev, ev->ev_callback));
3202 event_active_nolock_(ev, EV_TIMEOUT, 1);
3203 }
3204 }
3205
3206 #ifndef MAX
3207 #define MAX(a,b) (((a)>(b))?(a):(b))
3208 #endif
3209
3210 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3211
3212 /* These are a fancy way to spell
3213 if (~flags & EVLIST_INTERNAL)
3214 base->event_count--/++;
3215 */
3216 #define DECR_EVENT_COUNT(base,flags) \
3217 ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3218 #define INCR_EVENT_COUNT(base,flags) do { \
3219 ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
3220 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3221 } while (0)
3222
3223 static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)3224 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3225 {
3226 EVENT_BASE_ASSERT_LOCKED(base);
3227 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3228 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3229 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3230 return;
3231 }
3232 DECR_EVENT_COUNT(base, ev->ev_flags);
3233 ev->ev_flags &= ~EVLIST_INSERTED;
3234 }
3235 static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)3236 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3237 {
3238 EVENT_BASE_ASSERT_LOCKED(base);
3239 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3240 event_errx(1, "%s: %p not on queue %x", __func__,
3241 evcb, EVLIST_ACTIVE);
3242 return;
3243 }
3244 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3245 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3246 base->event_count_active--;
3247
3248 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3249 evcb, evcb_active_next);
3250 }
3251 static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)3252 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3253 {
3254 EVENT_BASE_ASSERT_LOCKED(base);
3255 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3256 event_errx(1, "%s: %p not on queue %x", __func__,
3257 evcb, EVLIST_ACTIVE_LATER);
3258 return;
3259 }
3260 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3261 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3262 base->event_count_active--;
3263
3264 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3265 }
3266 static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)3267 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3268 {
3269 EVENT_BASE_ASSERT_LOCKED(base);
3270 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3271 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3272 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3273 return;
3274 }
3275 DECR_EVENT_COUNT(base, ev->ev_flags);
3276 ev->ev_flags &= ~EVLIST_TIMEOUT;
3277
3278 if (is_common_timeout(&ev->ev_timeout, base)) {
3279 struct common_timeout_list *ctl =
3280 get_common_timeout_list(base, &ev->ev_timeout);
3281 TAILQ_REMOVE(&ctl->events, ev,
3282 ev_timeout_pos.ev_next_with_common_timeout);
3283 } else {
3284 min_heap_erase_(&base->timeheap, ev);
3285 }
3286 }
3287
3288 #ifdef USE_REINSERT_TIMEOUT
3289 /* Remove and reinsert 'ev' into the timeout queue. */
3290 static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)3291 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3292 int was_common, int is_common, int old_timeout_idx)
3293 {
3294 struct common_timeout_list *ctl;
3295 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3296 event_queue_insert_timeout(base, ev);
3297 return;
3298 }
3299
3300 switch ((was_common<<1) | is_common) {
3301 case 3: /* Changing from one common timeout to another */
3302 ctl = base->common_timeout_queues[old_timeout_idx];
3303 TAILQ_REMOVE(&ctl->events, ev,
3304 ev_timeout_pos.ev_next_with_common_timeout);
3305 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3306 insert_common_timeout_inorder(ctl, ev);
3307 break;
3308 case 2: /* Was common; is no longer common */
3309 ctl = base->common_timeout_queues[old_timeout_idx];
3310 TAILQ_REMOVE(&ctl->events, ev,
3311 ev_timeout_pos.ev_next_with_common_timeout);
3312 min_heap_push_(&base->timeheap, ev);
3313 break;
3314 case 1: /* Wasn't common; has become common. */
3315 min_heap_erase_(&base->timeheap, ev);
3316 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3317 insert_common_timeout_inorder(ctl, ev);
3318 break;
3319 case 0: /* was in heap; is still on heap. */
3320 min_heap_adjust_(&base->timeheap, ev);
3321 break;
3322 default:
3323 EVUTIL_ASSERT(0); /* unreachable */
3324 break;
3325 }
3326 }
3327 #endif
3328
3329 /* Add 'ev' to the common timeout list in 'ev'. */
3330 static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)3331 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3332 struct event *ev)
3333 {
3334 struct event *e;
3335 /* By all logic, we should just be able to append 'ev' to the end of
3336 * ctl->events, since the timeout on each 'ev' is set to {the common
3337 * timeout} + {the time when we add the event}, and so the events
3338 * should arrive in order of their timeeouts. But just in case
3339 * there's some wacky threading issue going on, we do a search from
3340 * the end of 'ev' to find the right insertion point.
3341 */
3342 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3343 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3344 /* This timercmp is a little sneaky, since both ev and e have
3345 * magic values in tv_usec. Fortunately, they ought to have
3346 * the _same_ magic values in tv_usec. Let's assert for that.
3347 */
3348 EVUTIL_ASSERT(
3349 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3350 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3351 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3352 ev_timeout_pos.ev_next_with_common_timeout);
3353 return;
3354 }
3355 }
3356 TAILQ_INSERT_HEAD(&ctl->events, ev,
3357 ev_timeout_pos.ev_next_with_common_timeout);
3358 }
3359
3360 static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)3361 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3362 {
3363 EVENT_BASE_ASSERT_LOCKED(base);
3364
3365 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3366 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3367 ev, EV_SOCK_ARG(ev->ev_fd));
3368 return;
3369 }
3370
3371 INCR_EVENT_COUNT(base, ev->ev_flags);
3372
3373 ev->ev_flags |= EVLIST_INSERTED;
3374 }
3375
3376 static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)3377 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3378 {
3379 EVENT_BASE_ASSERT_LOCKED(base);
3380
3381 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3382 /* Double insertion is possible for active events */
3383 return;
3384 }
3385
3386 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3387
3388 evcb->evcb_flags |= EVLIST_ACTIVE;
3389
3390 base->event_count_active++;
3391 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3392 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3393 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3394 evcb, evcb_active_next);
3395 }
3396
3397 static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)3398 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3399 {
3400 EVENT_BASE_ASSERT_LOCKED(base);
3401 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3402 /* Double insertion is possible */
3403 return;
3404 }
3405
3406 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3407 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3408 base->event_count_active++;
3409 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3410 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3411 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3412 }
3413
3414 static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)3415 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3416 {
3417 EVENT_BASE_ASSERT_LOCKED(base);
3418
3419 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3420 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3421 ev, EV_SOCK_ARG(ev->ev_fd));
3422 return;
3423 }
3424
3425 INCR_EVENT_COUNT(base, ev->ev_flags);
3426
3427 ev->ev_flags |= EVLIST_TIMEOUT;
3428
3429 if (is_common_timeout(&ev->ev_timeout, base)) {
3430 struct common_timeout_list *ctl =
3431 get_common_timeout_list(base, &ev->ev_timeout);
3432 insert_common_timeout_inorder(ctl, ev);
3433 } else {
3434 min_heap_push_(&base->timeheap, ev);
3435 }
3436 }
3437
3438 static void
event_queue_make_later_events_active(struct event_base * base)3439 event_queue_make_later_events_active(struct event_base *base)
3440 {
3441 struct event_callback *evcb;
3442 EVENT_BASE_ASSERT_LOCKED(base);
3443
3444 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3445 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3446 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3447 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3448 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3449 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3450 }
3451 }
3452
3453 /* Functions for debugging */
3454
3455 const char *
event_get_version(void)3456 event_get_version(void)
3457 {
3458 return (EVENT__VERSION);
3459 }
3460
3461 ev_uint32_t
event_get_version_number(void)3462 event_get_version_number(void)
3463 {
3464 return (EVENT__NUMERIC_VERSION);
3465 }
3466
3467 /*
3468 * No thread-safe interface needed - the information should be the same
3469 * for all threads.
3470 */
3471
3472 const char *
event_get_method(void)3473 event_get_method(void)
3474 {
3475 return (current_base->evsel->name);
3476 }
3477
3478 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3479 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3480 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3481 static void (*mm_free_fn_)(void *p) = NULL;
3482
3483 void *
event_mm_malloc_(size_t sz)3484 event_mm_malloc_(size_t sz)
3485 {
3486 if (sz == 0)
3487 return NULL;
3488
3489 if (mm_malloc_fn_)
3490 return mm_malloc_fn_(sz);
3491 else
3492 return malloc(sz);
3493 }
3494
3495 void *
event_mm_calloc_(size_t count,size_t size)3496 event_mm_calloc_(size_t count, size_t size)
3497 {
3498 if (count == 0 || size == 0)
3499 return NULL;
3500
3501 if (mm_malloc_fn_) {
3502 size_t sz = count * size;
3503 void *p = NULL;
3504 if (count > EV_SIZE_MAX / size)
3505 goto error;
3506 p = mm_malloc_fn_(sz);
3507 if (p)
3508 return memset(p, 0, sz);
3509 } else {
3510 void *p = calloc(count, size);
3511 #ifdef _WIN32
3512 /* Windows calloc doesn't reliably set ENOMEM */
3513 if (p == NULL)
3514 goto error;
3515 #endif
3516 return p;
3517 }
3518
3519 error:
3520 errno = ENOMEM;
3521 return NULL;
3522 }
3523
3524 char *
event_mm_strdup_(const char * str)3525 event_mm_strdup_(const char *str)
3526 {
3527 if (!str) {
3528 errno = EINVAL;
3529 return NULL;
3530 }
3531
3532 if (mm_malloc_fn_) {
3533 size_t ln = strlen(str);
3534 void *p = NULL;
3535 if (ln == EV_SIZE_MAX)
3536 goto error;
3537 p = mm_malloc_fn_(ln+1);
3538 if (p)
3539 return memcpy(p, str, ln+1);
3540 } else
3541 #ifdef _WIN32
3542 return _strdup(str);
3543 #else
3544 return strdup(str);
3545 #endif
3546
3547 error:
3548 errno = ENOMEM;
3549 return NULL;
3550 }
3551
3552 void *
event_mm_realloc_(void * ptr,size_t sz)3553 event_mm_realloc_(void *ptr, size_t sz)
3554 {
3555 if (mm_realloc_fn_)
3556 return mm_realloc_fn_(ptr, sz);
3557 else
3558 return realloc(ptr, sz);
3559 }
3560
3561 void
event_mm_free_(void * ptr)3562 event_mm_free_(void *ptr)
3563 {
3564 if (mm_free_fn_)
3565 mm_free_fn_(ptr);
3566 else
3567 free(ptr);
3568 }
3569
3570 void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))3571 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3572 void *(*realloc_fn)(void *ptr, size_t sz),
3573 void (*free_fn)(void *ptr))
3574 {
3575 mm_malloc_fn_ = malloc_fn;
3576 mm_realloc_fn_ = realloc_fn;
3577 mm_free_fn_ = free_fn;
3578 }
3579 #endif
3580
3581 #ifdef EVENT__HAVE_EVENTFD
3582 static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)3583 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3584 {
3585 ev_uint64_t msg;
3586 ev_ssize_t r;
3587 struct event_base *base = arg;
3588
3589 r = read(fd, (void*) &msg, sizeof(msg));
3590 if (r<0 && errno != EAGAIN) {
3591 event_sock_warn(fd, "Error reading from eventfd");
3592 }
3593 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3594 base->is_notify_pending = 0;
3595 EVBASE_RELEASE_LOCK(base, th_base_lock);
3596 }
3597 #endif
3598
3599 static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)3600 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3601 {
3602 unsigned char buf[1024];
3603 struct event_base *base = arg;
3604 #ifdef _WIN32
3605 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3606 ;
3607 #else
3608 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3609 ;
3610 #endif
3611
3612 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3613 base->is_notify_pending = 0;
3614 EVBASE_RELEASE_LOCK(base, th_base_lock);
3615 }
3616
3617 int
evthread_make_base_notifiable(struct event_base * base)3618 evthread_make_base_notifiable(struct event_base *base)
3619 {
3620 int r;
3621 if (!base)
3622 return -1;
3623
3624 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3625 r = evthread_make_base_notifiable_nolock_(base);
3626 EVBASE_RELEASE_LOCK(base, th_base_lock);
3627 return r;
3628 }
3629
3630 static int
evthread_make_base_notifiable_nolock_(struct event_base * base)3631 evthread_make_base_notifiable_nolock_(struct event_base *base)
3632 {
3633 void (*cb)(evutil_socket_t, short, void *);
3634 int (*notify)(struct event_base *);
3635
3636 if (base->th_notify_fn != NULL) {
3637 /* The base is already notifiable: we're doing fine. */
3638 return 0;
3639 }
3640
3641 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3642 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3643 base->th_notify_fn = event_kq_notify_base_;
3644 /* No need to add an event here; the backend can wake
3645 * itself up just fine. */
3646 return 0;
3647 }
3648 #endif
3649
3650 #ifdef EVENT__HAVE_EVENTFD
3651 base->th_notify_fd[0] = evutil_eventfd_(0,
3652 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3653 if (base->th_notify_fd[0] >= 0) {
3654 base->th_notify_fd[1] = -1;
3655 notify = evthread_notify_base_eventfd;
3656 cb = evthread_notify_drain_eventfd;
3657 } else
3658 #endif
3659 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3660 notify = evthread_notify_base_default;
3661 cb = evthread_notify_drain_default;
3662 } else {
3663 return -1;
3664 }
3665
3666 base->th_notify_fn = notify;
3667
3668 /* prepare an event that we can use for wakeup */
3669 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3670 EV_READ|EV_PERSIST, cb, base);
3671
3672 /* we need to mark this as internal event */
3673 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3674 event_priority_set(&base->th_notify, 0);
3675
3676 return event_add_nolock_(&base->th_notify, NULL, 0);
3677 }
3678
3679 int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3680 event_base_foreach_event_nolock_(struct event_base *base,
3681 event_base_foreach_event_cb fn, void *arg)
3682 {
3683 int r, i;
3684 unsigned u;
3685 struct event *ev;
3686
3687 /* Start out with all the EVLIST_INSERTED events. */
3688 if ((r = evmap_foreach_event_(base, fn, arg)))
3689 return r;
3690
3691 /* Okay, now we deal with those events that have timeouts and are in
3692 * the min-heap. */
3693 for (u = 0; u < base->timeheap.n; ++u) {
3694 ev = base->timeheap.p[u];
3695 if (ev->ev_flags & EVLIST_INSERTED) {
3696 /* we already processed this one */
3697 continue;
3698 }
3699 if ((r = fn(base, ev, arg)))
3700 return r;
3701 }
3702
3703 /* Now for the events in one of the timeout queues.
3704 * the min-heap. */
3705 for (i = 0; i < base->n_common_timeouts; ++i) {
3706 struct common_timeout_list *ctl =
3707 base->common_timeout_queues[i];
3708 TAILQ_FOREACH(ev, &ctl->events,
3709 ev_timeout_pos.ev_next_with_common_timeout) {
3710 if (ev->ev_flags & EVLIST_INSERTED) {
3711 /* we already processed this one */
3712 continue;
3713 }
3714 if ((r = fn(base, ev, arg)))
3715 return r;
3716 }
3717 }
3718
3719 /* Finally, we deal wit all the active events that we haven't touched
3720 * yet. */
3721 for (i = 0; i < base->nactivequeues; ++i) {
3722 struct event_callback *evcb;
3723 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3724 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3725 /* This isn't an event (evlist_init clear), or
3726 * we already processed it. (inserted or
3727 * timeout set */
3728 continue;
3729 }
3730 ev = event_callback_to_event(evcb);
3731 if ((r = fn(base, ev, arg)))
3732 return r;
3733 }
3734 }
3735
3736 return 0;
3737 }
3738
3739 /* Helper for event_base_dump_events: called on each event in the event base;
3740 * dumps only the inserted events. */
3741 static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)3742 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3743 {
3744 FILE *output = arg;
3745 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3746 "sig" : "fd ";
3747
3748 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3749 return 0;
3750
3751 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3752 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3753 (e->ev_events&EV_READ)?" Read":"",
3754 (e->ev_events&EV_WRITE)?" Write":"",
3755 (e->ev_events&EV_CLOSED)?" EOF":"",
3756 (e->ev_events&EV_SIGNAL)?" Signal":"",
3757 (e->ev_events&EV_PERSIST)?" Persist":"",
3758 (e->ev_events&EV_ET)?" ET":"",
3759 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3760 if (e->ev_flags & EVLIST_TIMEOUT) {
3761 struct timeval tv;
3762 tv.tv_sec = e->ev_timeout.tv_sec;
3763 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3764 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3765 fprintf(output, " Timeout=%ld.%06d",
3766 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3767 }
3768 fputc('\n', output);
3769
3770 return 0;
3771 }
3772
3773 /* Helper for event_base_dump_events: called on each event in the event base;
3774 * dumps only the active events. */
3775 static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)3776 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3777 {
3778 FILE *output = arg;
3779 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3780 "sig" : "fd ";
3781
3782 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3783 return 0;
3784
3785 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3786 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3787 (e->ev_res&EV_READ)?" Read":"",
3788 (e->ev_res&EV_WRITE)?" Write":"",
3789 (e->ev_res&EV_CLOSED)?" EOF":"",
3790 (e->ev_res&EV_SIGNAL)?" Signal":"",
3791 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3792 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3793 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3794
3795 return 0;
3796 }
3797
3798 int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3799 event_base_foreach_event(struct event_base *base,
3800 event_base_foreach_event_cb fn, void *arg)
3801 {
3802 int r;
3803 if ((!fn) || (!base)) {
3804 return -1;
3805 }
3806 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3807 r = event_base_foreach_event_nolock_(base, fn, arg);
3808 EVBASE_RELEASE_LOCK(base, th_base_lock);
3809 return r;
3810 }
3811
3812
3813 void
event_base_dump_events(struct event_base * base,FILE * output)3814 event_base_dump_events(struct event_base *base, FILE *output)
3815 {
3816 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3817 fprintf(output, "Inserted events:\n");
3818 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3819
3820 fprintf(output, "Active events:\n");
3821 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3822 EVBASE_RELEASE_LOCK(base, th_base_lock);
3823 }
3824
3825 void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)3826 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3827 {
3828 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3829
3830 /* Activate any non timer events */
3831 if (!(events & EV_TIMEOUT)) {
3832 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3833 } else {
3834 /* If we want to activate timer events, loop and activate each event with
3835 * the same fd in both the timeheap and common timeouts list */
3836 int i;
3837 unsigned u;
3838 struct event *ev;
3839
3840 for (u = 0; u < base->timeheap.n; ++u) {
3841 ev = base->timeheap.p[u];
3842 if (ev->ev_fd == fd) {
3843 event_active_nolock_(ev, EV_TIMEOUT, 1);
3844 }
3845 }
3846
3847 for (i = 0; i < base->n_common_timeouts; ++i) {
3848 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3849 TAILQ_FOREACH(ev, &ctl->events,
3850 ev_timeout_pos.ev_next_with_common_timeout) {
3851 if (ev->ev_fd == fd) {
3852 event_active_nolock_(ev, EV_TIMEOUT, 1);
3853 }
3854 }
3855 }
3856 }
3857
3858 EVBASE_RELEASE_LOCK(base, th_base_lock);
3859 }
3860
3861 void
event_base_active_by_signal(struct event_base * base,int sig)3862 event_base_active_by_signal(struct event_base *base, int sig)
3863 {
3864 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3865 evmap_signal_active_(base, sig, 1);
3866 EVBASE_RELEASE_LOCK(base, th_base_lock);
3867 }
3868
3869
3870 void
event_base_add_virtual_(struct event_base * base)3871 event_base_add_virtual_(struct event_base *base)
3872 {
3873 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3874 base->virtual_event_count++;
3875 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3876 EVBASE_RELEASE_LOCK(base, th_base_lock);
3877 }
3878
3879 void
event_base_del_virtual_(struct event_base * base)3880 event_base_del_virtual_(struct event_base *base)
3881 {
3882 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3883 EVUTIL_ASSERT(base->virtual_event_count > 0);
3884 base->virtual_event_count--;
3885 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3886 evthread_notify_base(base);
3887 EVBASE_RELEASE_LOCK(base, th_base_lock);
3888 }
3889
3890 static void
event_free_debug_globals_locks(void)3891 event_free_debug_globals_locks(void)
3892 {
3893 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3894 #ifndef EVENT__DISABLE_DEBUG_MODE
3895 if (event_debug_map_lock_ != NULL) {
3896 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3897 event_debug_map_lock_ = NULL;
3898 evthreadimpl_disable_lock_debugging_();
3899 }
3900 #endif /* EVENT__DISABLE_DEBUG_MODE */
3901 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3902 return;
3903 }
3904
3905 static void
event_free_debug_globals(void)3906 event_free_debug_globals(void)
3907 {
3908 event_free_debug_globals_locks();
3909 }
3910
3911 static void
event_free_evsig_globals(void)3912 event_free_evsig_globals(void)
3913 {
3914 evsig_free_globals_();
3915 }
3916
3917 static void
event_free_evutil_globals(void)3918 event_free_evutil_globals(void)
3919 {
3920 evutil_free_globals_();
3921 }
3922
3923 static void
event_free_globals(void)3924 event_free_globals(void)
3925 {
3926 event_free_debug_globals();
3927 event_free_evsig_globals();
3928 event_free_evutil_globals();
3929 }
3930
3931 void
libevent_global_shutdown(void)3932 libevent_global_shutdown(void)
3933 {
3934 event_disable_debug_mode();
3935 event_free_globals();
3936 }
3937
3938 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3939 int
event_global_setup_locks_(const int enable_locks)3940 event_global_setup_locks_(const int enable_locks)
3941 {
3942 #ifndef EVENT__DISABLE_DEBUG_MODE
3943 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3944 #endif
3945 if (evsig_global_setup_locks_(enable_locks) < 0)
3946 return -1;
3947 if (evutil_global_setup_locks_(enable_locks) < 0)
3948 return -1;
3949 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3950 return -1;
3951 return 0;
3952 }
3953 #endif
3954
3955 void
event_base_assert_ok_(struct event_base * base)3956 event_base_assert_ok_(struct event_base *base)
3957 {
3958 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3959 event_base_assert_ok_nolock_(base);
3960 EVBASE_RELEASE_LOCK(base, th_base_lock);
3961 }
3962
3963 void
event_base_assert_ok_nolock_(struct event_base * base)3964 event_base_assert_ok_nolock_(struct event_base *base)
3965 {
3966 int i;
3967 int count;
3968
3969 /* First do checks on the per-fd and per-signal lists */
3970 evmap_check_integrity_(base);
3971
3972 /* Check the heap property */
3973 for (i = 1; i < (int)base->timeheap.n; ++i) {
3974 int parent = (i - 1) / 2;
3975 struct event *ev, *p_ev;
3976 ev = base->timeheap.p[i];
3977 p_ev = base->timeheap.p[parent];
3978 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3979 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3980 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3981 }
3982
3983 /* Check that the common timeouts are fine */
3984 for (i = 0; i < base->n_common_timeouts; ++i) {
3985 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3986 struct event *last=NULL, *ev;
3987
3988 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3989
3990 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3991 if (last)
3992 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3993 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3994 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3995 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3996 last = ev;
3997 }
3998 }
3999
4000 /* Check the active queues. */
4001 count = 0;
4002 for (i = 0; i < base->nactivequeues; ++i) {
4003 struct event_callback *evcb;
4004 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4005 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4006 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4007 EVUTIL_ASSERT(evcb->evcb_pri == i);
4008 ++count;
4009 }
4010 }
4011
4012 {
4013 struct event_callback *evcb;
4014 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4015 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4016 ++count;
4017 }
4018 }
4019 EVUTIL_ASSERT(count == base->event_count_active);
4020 }
4021