1 /*
2 * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include "util-internal.h"
28
29 #ifdef _WIN32
30 #include <winsock2.h>
31 #include <windows.h>
32 #endif
33
34 #include "event2/event-config.h"
35
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #ifdef EVENT__HAVE_SYS_TIME_H
39 #include <sys/time.h>
40 #endif
41 #include <sys/queue.h>
42 #ifndef _WIN32
43 #include <sys/socket.h>
44 #include <sys/wait.h>
45 #include <limits.h>
46 #include <signal.h>
47 #include <unistd.h>
48 #include <netdb.h>
49 #endif
50 #include <fcntl.h>
51 #include <signal.h>
52 #include <stdlib.h>
53 #include <stdio.h>
54 #include <string.h>
55 #include <errno.h>
56 #include <assert.h>
57 #include <ctype.h>
58
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event2/tag.h"
63 #include "event2/buffer.h"
64 #include "event2/buffer_compat.h"
65 #include "event2/util.h"
66 #include "event-internal.h"
67 #include "evthread-internal.h"
68 #include "log-internal.h"
69 #include "time-internal.h"
70
71 #include "regress.h"
72 #include "regress_thread.h"
73
74 #ifndef _WIN32
75 #include "regress.gen.h"
76 #endif
77
78 evutil_socket_t pair[2];
79 int test_ok;
80 int called;
81 struct event_base *global_base;
82
83 static char wbuf[4096];
84 static char rbuf[4096];
85 static int woff;
86 static int roff;
87 static int usepersist;
88 static struct timeval tset;
89 static struct timeval tcalled;
90
91
92 #define TEST1 "this is a test"
93
94 #ifdef _WIN32
95 #define write(fd,buf,len) send((fd),(buf),(int)(len),0)
96 #define read(fd,buf,len) recv((fd),(buf),(int)(len),0)
97 #endif
98
99 struct basic_cb_args
100 {
101 struct event_base *eb;
102 struct event *ev;
103 unsigned int callcount;
104 };
105
106 static void
simple_read_cb(evutil_socket_t fd,short event,void * arg)107 simple_read_cb(evutil_socket_t fd, short event, void *arg)
108 {
109 char buf[256];
110 int len;
111
112 len = read(fd, buf, sizeof(buf));
113
114 if (len) {
115 if (!called) {
116 if (event_add(arg, NULL) == -1)
117 exit(1);
118 }
119 } else if (called == 1)
120 test_ok = 1;
121
122 called++;
123 }
124
125 static void
basic_read_cb(evutil_socket_t fd,short event,void * data)126 basic_read_cb(evutil_socket_t fd, short event, void *data)
127 {
128 char buf[256];
129 int len;
130 struct basic_cb_args *arg = data;
131
132 len = read(fd, buf, sizeof(buf));
133
134 if (len < 0) {
135 tt_fail_perror("read (callback)");
136 } else {
137 switch (arg->callcount++) {
138 case 0: /* first call: expect to read data; cycle */
139 if (len > 0)
140 return;
141
142 tt_fail_msg("EOF before data read");
143 break;
144
145 case 1: /* second call: expect EOF; stop */
146 if (len > 0)
147 tt_fail_msg("not all data read on first cycle");
148 break;
149
150 default: /* third call: should not happen */
151 tt_fail_msg("too many cycles");
152 }
153 }
154
155 event_del(arg->ev);
156 event_base_loopexit(arg->eb, NULL);
157 }
158
159 static void
dummy_read_cb(evutil_socket_t fd,short event,void * arg)160 dummy_read_cb(evutil_socket_t fd, short event, void *arg)
161 {
162 }
163
164 static void
simple_write_cb(evutil_socket_t fd,short event,void * arg)165 simple_write_cb(evutil_socket_t fd, short event, void *arg)
166 {
167 int len;
168
169 len = write(fd, TEST1, strlen(TEST1) + 1);
170 if (len == -1)
171 test_ok = 0;
172 else
173 test_ok = 1;
174 }
175
176 static void
multiple_write_cb(evutil_socket_t fd,short event,void * arg)177 multiple_write_cb(evutil_socket_t fd, short event, void *arg)
178 {
179 struct event *ev = arg;
180 int len;
181
182 len = 128;
183 if (woff + len >= (int)sizeof(wbuf))
184 len = sizeof(wbuf) - woff;
185
186 len = write(fd, wbuf + woff, len);
187 if (len == -1) {
188 fprintf(stderr, "%s: write\n", __func__);
189 if (usepersist)
190 event_del(ev);
191 return;
192 }
193
194 woff += len;
195
196 if (woff >= (int)sizeof(wbuf)) {
197 shutdown(fd, EVUTIL_SHUT_WR);
198 if (usepersist)
199 event_del(ev);
200 return;
201 }
202
203 if (!usepersist) {
204 if (event_add(ev, NULL) == -1)
205 exit(1);
206 }
207 }
208
209 static void
multiple_read_cb(evutil_socket_t fd,short event,void * arg)210 multiple_read_cb(evutil_socket_t fd, short event, void *arg)
211 {
212 struct event *ev = arg;
213 int len;
214
215 len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
216 if (len == -1)
217 fprintf(stderr, "%s: read\n", __func__);
218 if (len <= 0) {
219 if (usepersist)
220 event_del(ev);
221 return;
222 }
223
224 roff += len;
225 if (!usepersist) {
226 if (event_add(ev, NULL) == -1)
227 exit(1);
228 }
229 }
230
231 static void
timeout_cb(evutil_socket_t fd,short event,void * arg)232 timeout_cb(evutil_socket_t fd, short event, void *arg)
233 {
234 evutil_gettimeofday(&tcalled, NULL);
235 }
236
237 struct both {
238 struct event ev;
239 int nread;
240 };
241
242 static void
combined_read_cb(evutil_socket_t fd,short event,void * arg)243 combined_read_cb(evutil_socket_t fd, short event, void *arg)
244 {
245 struct both *both = arg;
246 char buf[128];
247 int len;
248
249 len = read(fd, buf, sizeof(buf));
250 if (len == -1)
251 fprintf(stderr, "%s: read\n", __func__);
252 if (len <= 0)
253 return;
254
255 both->nread += len;
256 if (event_add(&both->ev, NULL) == -1)
257 exit(1);
258 }
259
260 static void
combined_write_cb(evutil_socket_t fd,short event,void * arg)261 combined_write_cb(evutil_socket_t fd, short event, void *arg)
262 {
263 struct both *both = arg;
264 char buf[128];
265 int len;
266
267 len = sizeof(buf);
268 if (len > both->nread)
269 len = both->nread;
270
271 memset(buf, 'q', len);
272
273 len = write(fd, buf, len);
274 if (len == -1)
275 fprintf(stderr, "%s: write\n", __func__);
276 if (len <= 0) {
277 shutdown(fd, EVUTIL_SHUT_WR);
278 return;
279 }
280
281 both->nread -= len;
282 if (event_add(&both->ev, NULL) == -1)
283 exit(1);
284 }
285
286 /* These macros used to replicate the work of the legacy test wrapper code */
287 #define setup_test(x) do { \
288 if (!in_legacy_test_wrapper) { \
289 TT_FAIL(("Legacy test %s not wrapped properly", x)); \
290 return; \
291 } \
292 } while (0)
293 #define cleanup_test() setup_test("cleanup")
294
295 static void
test_simpleread(void)296 test_simpleread(void)
297 {
298 struct event ev;
299
300 /* Very simple read test */
301 setup_test("Simple read: ");
302
303 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
304 tt_fail_perror("write");
305 }
306
307 shutdown(pair[0], EVUTIL_SHUT_WR);
308
309 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
310 if (event_add(&ev, NULL) == -1)
311 exit(1);
312 event_dispatch();
313
314 cleanup_test();
315 }
316
317 static void
test_simplewrite(void)318 test_simplewrite(void)
319 {
320 struct event ev;
321
322 /* Very simple write test */
323 setup_test("Simple write: ");
324
325 event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
326 if (event_add(&ev, NULL) == -1)
327 exit(1);
328 event_dispatch();
329
330 cleanup_test();
331 }
332
333 static void
simpleread_multiple_cb(evutil_socket_t fd,short event,void * arg)334 simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg)
335 {
336 if (++called == 2)
337 test_ok = 1;
338 }
339
340 static void
test_simpleread_multiple(void)341 test_simpleread_multiple(void)
342 {
343 struct event one, two;
344
345 /* Very simple read test */
346 setup_test("Simple read to multiple evens: ");
347
348 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
349 tt_fail_perror("write");
350 }
351
352 shutdown(pair[0], EVUTIL_SHUT_WR);
353
354 event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL);
355 if (event_add(&one, NULL) == -1)
356 exit(1);
357 event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL);
358 if (event_add(&two, NULL) == -1)
359 exit(1);
360 event_dispatch();
361
362 cleanup_test();
363 }
364
365 static int have_closed = 0;
366 static int premature_event = 0;
367 static void
simpleclose_close_fd_cb(evutil_socket_t s,short what,void * ptr)368 simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr)
369 {
370 evutil_socket_t **fds = ptr;
371 TT_BLATHER(("Closing"));
372 evutil_closesocket(*fds[0]);
373 evutil_closesocket(*fds[1]);
374 *fds[0] = -1;
375 *fds[1] = -1;
376 have_closed = 1;
377 }
378
379 static void
record_event_cb(evutil_socket_t s,short what,void * ptr)380 record_event_cb(evutil_socket_t s, short what, void *ptr)
381 {
382 short *whatp = ptr;
383 if (!have_closed)
384 premature_event = 1;
385 *whatp = what;
386 TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s));
387 }
388
389 static void
test_simpleclose_rw(void * ptr)390 test_simpleclose_rw(void *ptr)
391 {
392 /* Test that a close of FD is detected as a read and as a write. */
393 struct event_base *base = event_base_new();
394 evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1};
395 evutil_socket_t *to_close[2];
396 struct event *rev=NULL, *wev=NULL, *closeev=NULL;
397 struct timeval tv;
398 short got_read_on_close = 0, got_write_on_close = 0;
399 char buf[1024];
400 memset(buf, 99, sizeof(buf));
401 #ifdef _WIN32
402 #define LOCAL_SOCKETPAIR_AF AF_INET
403 #else
404 #define LOCAL_SOCKETPAIR_AF AF_UNIX
405 #endif
406 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0)
407 TT_DIE(("socketpair: %s", strerror(errno)));
408 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0)
409 TT_DIE(("socketpair: %s", strerror(errno)));
410 if (evutil_make_socket_nonblocking(pair1[1]) < 0)
411 TT_DIE(("make_socket_nonblocking"));
412 if (evutil_make_socket_nonblocking(pair2[1]) < 0)
413 TT_DIE(("make_socket_nonblocking"));
414
415 /** Stuff pair2[1] full of data, until write fails */
416 while (1) {
417 int r = write(pair2[1], buf, sizeof(buf));
418 if (r<0) {
419 int err = evutil_socket_geterror(pair2[1]);
420 if (! EVUTIL_ERR_RW_RETRIABLE(err))
421 TT_DIE(("write failed strangely: %s",
422 evutil_socket_error_to_string(err)));
423 break;
424 }
425 }
426 to_close[0] = &pair1[0];
427 to_close[1] = &pair2[0];
428
429 closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb,
430 to_close);
431 rev = event_new(base, pair1[1], EV_READ, record_event_cb,
432 &got_read_on_close);
433 TT_BLATHER(("Waiting for read on %d", (int)pair1[1]));
434 wev = event_new(base, pair2[1], EV_WRITE, record_event_cb,
435 &got_write_on_close);
436 TT_BLATHER(("Waiting for write on %d", (int)pair2[1]));
437 tv.tv_sec = 0;
438 tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make
439 * sure we get a read event. */
440 event_add(closeev, &tv);
441 event_add(rev, NULL);
442 event_add(wev, NULL);
443 /* Don't let the test go on too long. */
444 tv.tv_sec = 0;
445 tv.tv_usec = 200*1000;
446 event_base_loopexit(base, &tv);
447 event_base_loop(base, 0);
448
449 tt_int_op(got_read_on_close, ==, EV_READ);
450 tt_int_op(got_write_on_close, ==, EV_WRITE);
451 tt_int_op(premature_event, ==, 0);
452
453 end:
454 if (pair1[0] >= 0)
455 evutil_closesocket(pair1[0]);
456 if (pair1[1] >= 0)
457 evutil_closesocket(pair1[1]);
458 if (pair2[0] >= 0)
459 evutil_closesocket(pair2[0]);
460 if (pair2[1] >= 0)
461 evutil_closesocket(pair2[1]);
462 if (rev)
463 event_free(rev);
464 if (wev)
465 event_free(wev);
466 if (closeev)
467 event_free(closeev);
468 if (base)
469 event_base_free(base);
470 }
471
472 static void
test_simpleclose(void * ptr)473 test_simpleclose(void *ptr)
474 {
475 struct basic_test_data *data = ptr;
476 struct event_base *base = data->base;
477 evutil_socket_t *pair = data->pair;
478 const char *flags = (const char *)data->setup_data;
479 int et = !!strstr(flags, "ET");
480 int persist = !!strstr(flags, "persist");
481 short events = EV_CLOSED | (et ? EV_ET : 0) | (persist ? EV_PERSIST : 0);
482 struct event *ev = NULL;
483 short got_event;
484
485 if (!(event_base_get_features(data->base) & EV_FEATURE_EARLY_CLOSE))
486 tt_skip();
487
488 /* XXX: should this code moved to regress_et.c ? */
489 if (et && !(event_base_get_features(data->base) & EV_FEATURE_ET))
490 tt_skip();
491
492 ev = event_new(base, pair[0], events, record_event_cb, &got_event);
493 tt_assert(ev);
494 tt_assert(!event_add(ev, NULL));
495
496 got_event = 0;
497 if (strstr(flags, "close")) {
498 tt_assert(!evutil_closesocket(pair[1]));
499 /* avoid closing in setup routines */
500 pair[1] = -1;
501 } else if (strstr(flags, "shutdown")) {
502 tt_assert(!shutdown(pair[1], EVUTIL_SHUT_WR));
503 } else {
504 tt_abort_msg("unknown flags");
505 }
506
507 /* w/o edge-triggerd but w/ persist it will not stop */
508 if (!et && persist) {
509 struct timeval tv;
510 tv.tv_sec = 0;
511 tv.tv_usec = 10000;
512 tt_assert(!event_base_loopexit(base, &tv));
513 }
514
515 tt_int_op(event_base_loop(base, EVLOOP_NONBLOCK), ==, !persist);
516 tt_int_op(got_event, ==, (events & ~EV_PERSIST));
517
518 end:
519 if (ev)
520 event_free(ev);
521 }
522
523 static void
test_multiple(void)524 test_multiple(void)
525 {
526 struct event ev, ev2;
527 int i;
528
529 /* Multiple read and write test */
530 setup_test("Multiple read/write: ");
531 memset(rbuf, 0, sizeof(rbuf));
532 for (i = 0; i < (int)sizeof(wbuf); i++)
533 wbuf[i] = i;
534
535 roff = woff = 0;
536 usepersist = 0;
537
538 event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
539 if (event_add(&ev, NULL) == -1)
540 exit(1);
541 event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
542 if (event_add(&ev2, NULL) == -1)
543 exit(1);
544 event_dispatch();
545
546 if (roff == woff)
547 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
548
549 cleanup_test();
550 }
551
552 static void
test_persistent(void)553 test_persistent(void)
554 {
555 struct event ev, ev2;
556 int i;
557
558 /* Multiple read and write test with persist */
559 setup_test("Persist read/write: ");
560 memset(rbuf, 0, sizeof(rbuf));
561 for (i = 0; i < (int)sizeof(wbuf); i++)
562 wbuf[i] = i;
563
564 roff = woff = 0;
565 usepersist = 1;
566
567 event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
568 if (event_add(&ev, NULL) == -1)
569 exit(1);
570 event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
571 if (event_add(&ev2, NULL) == -1)
572 exit(1);
573 event_dispatch();
574
575 if (roff == woff)
576 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
577
578 cleanup_test();
579 }
580
581 static void
test_combined(void)582 test_combined(void)
583 {
584 struct both r1, r2, w1, w2;
585
586 setup_test("Combined read/write: ");
587 memset(&r1, 0, sizeof(r1));
588 memset(&r2, 0, sizeof(r2));
589 memset(&w1, 0, sizeof(w1));
590 memset(&w2, 0, sizeof(w2));
591
592 w1.nread = 4096;
593 w2.nread = 8192;
594
595 event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
596 event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
597 event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
598 event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
599 tt_assert(event_add(&r1.ev, NULL) != -1);
600 tt_assert(!event_add(&w1.ev, NULL));
601 tt_assert(!event_add(&r2.ev, NULL));
602 tt_assert(!event_add(&w2.ev, NULL));
603 event_dispatch();
604
605 if (r1.nread == 8192 && r2.nread == 4096)
606 test_ok = 1;
607
608 end:
609 cleanup_test();
610 }
611
612 static void
test_simpletimeout(void)613 test_simpletimeout(void)
614 {
615 struct timeval tv;
616 struct event ev;
617
618 setup_test("Simple timeout: ");
619
620 tv.tv_usec = 200*1000;
621 tv.tv_sec = 0;
622 evutil_timerclear(&tcalled);
623 evtimer_set(&ev, timeout_cb, NULL);
624 evtimer_add(&ev, &tv);
625
626 evutil_gettimeofday(&tset, NULL);
627 event_dispatch();
628 test_timeval_diff_eq(&tset, &tcalled, 200);
629
630 test_ok = 1;
631 end:
632 cleanup_test();
633 }
634
635 static void
periodic_timeout_cb(evutil_socket_t fd,short event,void * arg)636 periodic_timeout_cb(evutil_socket_t fd, short event, void *arg)
637 {
638 int *count = arg;
639
640 (*count)++;
641 if (*count == 6) {
642 /* call loopexit only once - on slow machines(?), it is
643 * apparently possible for this to get called twice. */
644 test_ok = 1;
645 event_base_loopexit(global_base, NULL);
646 }
647 }
648
649 static void
test_persistent_timeout(void)650 test_persistent_timeout(void)
651 {
652 struct timeval tv;
653 struct event ev;
654 int count = 0;
655
656 evutil_timerclear(&tv);
657 tv.tv_usec = 10000;
658
659 event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST,
660 periodic_timeout_cb, &count);
661 event_add(&ev, &tv);
662
663 event_dispatch();
664
665 event_del(&ev);
666 }
667
668 static void
test_persistent_timeout_jump(void * ptr)669 test_persistent_timeout_jump(void *ptr)
670 {
671 struct basic_test_data *data = ptr;
672 struct event ev;
673 int count = 0;
674 struct timeval msec100 = { 0, 100 * 1000 };
675 struct timeval msec50 = { 0, 50 * 1000 };
676 struct timeval msec300 = { 0, 300 * 1000 };
677
678 event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count);
679 event_add(&ev, &msec100);
680 /* Wait for a bit */
681 evutil_usleep_(&msec300);
682 event_base_loopexit(data->base, &msec50);
683 event_base_dispatch(data->base);
684 tt_int_op(count, ==, 1);
685
686 end:
687 event_del(&ev);
688 }
689
690 struct persist_active_timeout_called {
691 int n;
692 short events[16];
693 struct timeval tvs[16];
694 };
695
696 static void
activate_cb(evutil_socket_t fd,short event,void * arg)697 activate_cb(evutil_socket_t fd, short event, void *arg)
698 {
699 struct event *ev = arg;
700 event_active(ev, EV_READ, 1);
701 }
702
703 static void
persist_active_timeout_cb(evutil_socket_t fd,short event,void * arg)704 persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg)
705 {
706 struct persist_active_timeout_called *c = arg;
707 if (c->n < 15) {
708 c->events[c->n] = event;
709 evutil_gettimeofday(&c->tvs[c->n], NULL);
710 ++c->n;
711 }
712 }
713
714 static void
test_persistent_active_timeout(void * ptr)715 test_persistent_active_timeout(void *ptr)
716 {
717 struct timeval tv, tv2, tv_exit, start;
718 struct event ev;
719 struct persist_active_timeout_called res;
720
721 struct basic_test_data *data = ptr;
722 struct event_base *base = data->base;
723
724 memset(&res, 0, sizeof(res));
725
726 tv.tv_sec = 0;
727 tv.tv_usec = 200 * 1000;
728 event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST,
729 persist_active_timeout_cb, &res);
730 event_add(&ev, &tv);
731
732 tv2.tv_sec = 0;
733 tv2.tv_usec = 100 * 1000;
734 event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2);
735
736 tv_exit.tv_sec = 0;
737 tv_exit.tv_usec = 600 * 1000;
738 event_base_loopexit(base, &tv_exit);
739
740 event_base_assert_ok_(base);
741 evutil_gettimeofday(&start, NULL);
742
743 event_base_dispatch(base);
744 event_base_assert_ok_(base);
745
746 tt_int_op(res.n, ==, 3);
747 tt_int_op(res.events[0], ==, EV_READ);
748 tt_int_op(res.events[1], ==, EV_TIMEOUT);
749 tt_int_op(res.events[2], ==, EV_TIMEOUT);
750 test_timeval_diff_eq(&start, &res.tvs[0], 100);
751 test_timeval_diff_eq(&start, &res.tvs[1], 300);
752 test_timeval_diff_eq(&start, &res.tvs[2], 500);
753 end:
754 event_del(&ev);
755 }
756
757 struct common_timeout_info {
758 struct event ev;
759 struct timeval called_at;
760 int which;
761 int count;
762 };
763
764 static void
common_timeout_cb(evutil_socket_t fd,short event,void * arg)765 common_timeout_cb(evutil_socket_t fd, short event, void *arg)
766 {
767 struct common_timeout_info *ti = arg;
768 ++ti->count;
769 evutil_gettimeofday(&ti->called_at, NULL);
770 if (ti->count >= 4)
771 event_del(&ti->ev);
772 }
773
774 static void
test_common_timeout(void * ptr)775 test_common_timeout(void *ptr)
776 {
777 struct basic_test_data *data = ptr;
778
779 struct event_base *base = data->base;
780 int i;
781 struct common_timeout_info info[100];
782
783 struct timeval start;
784 struct timeval tmp_100_ms = { 0, 100*1000 };
785 struct timeval tmp_200_ms = { 0, 200*1000 };
786 struct timeval tmp_5_sec = { 5, 0 };
787 struct timeval tmp_5M_usec = { 0, 5*1000*1000 };
788
789 const struct timeval *ms_100, *ms_200, *sec_5;
790
791 ms_100 = event_base_init_common_timeout(base, &tmp_100_ms);
792 ms_200 = event_base_init_common_timeout(base, &tmp_200_ms);
793 sec_5 = event_base_init_common_timeout(base, &tmp_5_sec);
794 tt_assert(ms_100);
795 tt_assert(ms_200);
796 tt_assert(sec_5);
797 tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms),
798 ==, ms_200);
799 tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200);
800 tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5);
801 tt_int_op(ms_100->tv_sec, ==, 0);
802 tt_int_op(ms_200->tv_sec, ==, 0);
803 tt_int_op(sec_5->tv_sec, ==, 5);
804 tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000);
805 tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000);
806 tt_int_op(sec_5->tv_usec, ==, 0|0x50200000);
807
808 memset(info, 0, sizeof(info));
809
810 for (i=0; i<100; ++i) {
811 info[i].which = i;
812 event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST,
813 common_timeout_cb, &info[i]);
814 if (i % 2) {
815 if ((i%20)==1) {
816 /* Glass-box test: Make sure we survive the
817 * transition to non-common timeouts. It's
818 * a little tricky. */
819 event_add(&info[i].ev, ms_200);
820 event_add(&info[i].ev, &tmp_100_ms);
821 } else if ((i%20)==3) {
822 /* Check heap-to-common too. */
823 event_add(&info[i].ev, &tmp_200_ms);
824 event_add(&info[i].ev, ms_100);
825 } else if ((i%20)==5) {
826 /* Also check common-to-common. */
827 event_add(&info[i].ev, ms_200);
828 event_add(&info[i].ev, ms_100);
829 } else {
830 event_add(&info[i].ev, ms_100);
831 }
832 } else {
833 event_add(&info[i].ev, ms_200);
834 }
835 }
836
837 event_base_assert_ok_(base);
838 evutil_gettimeofday(&start, NULL);
839 event_base_dispatch(base);
840
841 event_base_assert_ok_(base);
842
843 for (i=0; i<10; ++i) {
844 tt_int_op(info[i].count, ==, 4);
845 if (i % 2) {
846 test_timeval_diff_eq(&start, &info[i].called_at, 400);
847 } else {
848 test_timeval_diff_eq(&start, &info[i].called_at, 800);
849 }
850 }
851
852 /* Make sure we can free the base with some events in. */
853 for (i=0; i<100; ++i) {
854 if (i % 2) {
855 event_add(&info[i].ev, ms_100);
856 } else {
857 event_add(&info[i].ev, ms_200);
858 }
859 }
860
861 end:
862 event_base_free(data->base); /* need to do this here before info is
863 * out-of-scope */
864 data->base = NULL;
865 }
866
867 #ifndef _WIN32
868
869 #define current_base event_global_current_base_
870 extern struct event_base *current_base;
871
872 static void
fork_signal_cb(evutil_socket_t fd,short events,void * arg)873 fork_signal_cb(evutil_socket_t fd, short events, void *arg)
874 {
875 event_del(arg);
876 }
877
878 int child_pair[2] = { -1, -1 };
879 static void
simple_child_read_cb(evutil_socket_t fd,short event,void * arg)880 simple_child_read_cb(evutil_socket_t fd, short event, void *arg)
881 {
882 char buf[256];
883 int len;
884
885 len = read(fd, buf, sizeof(buf));
886 if (write(child_pair[0], "", 1) < 0)
887 tt_fail_perror("write");
888
889 if (len) {
890 if (!called) {
891 if (event_add(arg, NULL) == -1)
892 exit(1);
893 }
894 } else if (called == 1)
895 test_ok = 1;
896
897 called++;
898 }
899
900 #define TEST_FORK_EXIT_SUCCESS 76
fork_wait_check(int pid)901 static void fork_wait_check(int pid)
902 {
903 int status;
904
905 TT_BLATHER(("Before waitpid"));
906
907 #ifdef WNOWAIT
908 if ((waitpid(pid, &status, WNOWAIT) == -1 && errno == EINVAL) &&
909 #else
910 if (
911 #endif
912 waitpid(pid, &status, 0) == -1) {
913 perror("waitpid");
914 exit(1);
915 }
916 TT_BLATHER(("After waitpid"));
917
918 if (WEXITSTATUS(status) != TEST_FORK_EXIT_SUCCESS) {
919 fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status));
920 exit(1);
921 }
922 }
923 static void
test_fork(void)924 test_fork(void)
925 {
926 char c;
927 struct event ev, sig_ev, usr_ev, existing_ev;
928 pid_t pid;
929
930 setup_test("After fork: ");
931
932 {
933 if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, child_pair) == -1) {
934 fprintf(stderr, "%s: socketpair\n", __func__);
935 exit(1);
936 }
937
938 if (evutil_make_socket_nonblocking(child_pair[0]) == -1) {
939 fprintf(stderr, "fcntl(O_NONBLOCK)");
940 exit(1);
941 }
942 }
943
944 tt_assert(current_base);
945 evthread_make_base_notifiable(current_base);
946
947 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
948 tt_fail_perror("write");
949 }
950
951 event_set(&ev, pair[1], EV_READ, simple_child_read_cb, &ev);
952 if (event_add(&ev, NULL) == -1)
953 exit(1);
954
955 evsignal_set(&sig_ev, SIGCHLD, fork_signal_cb, &sig_ev);
956 evsignal_add(&sig_ev, NULL);
957
958 evsignal_set(&existing_ev, SIGUSR2, fork_signal_cb, &existing_ev);
959 evsignal_add(&existing_ev, NULL);
960
961 event_base_assert_ok_(current_base);
962 TT_BLATHER(("Before fork"));
963 if ((pid = regress_fork()) == 0) {
964 /* in the child */
965 TT_BLATHER(("In child, before reinit"));
966 event_base_assert_ok_(current_base);
967 if (event_reinit(current_base) == -1) {
968 fprintf(stdout, "FAILED (reinit)\n");
969 exit(1);
970 }
971 TT_BLATHER(("After reinit"));
972 event_base_assert_ok_(current_base);
973 TT_BLATHER(("After assert-ok"));
974
975 evsignal_del(&sig_ev);
976
977 evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
978 evsignal_add(&usr_ev, NULL);
979 kill(getpid(), SIGUSR1);
980 kill(getpid(), SIGUSR2);
981
982 called = 0;
983
984 event_dispatch();
985
986 event_base_free(current_base);
987
988 /* we do not send an EOF; simple_read_cb requires an EOF
989 * to set test_ok. we just verify that the callback was
990 * called. */
991 exit(test_ok != 0 || called != 2 ? -2 : TEST_FORK_EXIT_SUCCESS);
992 }
993
994 /** wait until client read first message */
995 if (read(child_pair[1], &c, 1) < 0) {
996 tt_fail_perror("read");
997 }
998 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
999 tt_fail_perror("write");
1000 }
1001
1002 fork_wait_check(pid);
1003
1004 /* test that the current event loop still works */
1005 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
1006 fprintf(stderr, "%s: write\n", __func__);
1007 }
1008
1009 shutdown(pair[0], EVUTIL_SHUT_WR);
1010
1011 evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
1012 evsignal_add(&usr_ev, NULL);
1013 kill(getpid(), SIGUSR1);
1014 kill(getpid(), SIGUSR2);
1015
1016 event_dispatch();
1017
1018 evsignal_del(&sig_ev);
1019 tt_int_op(test_ok, ==, 1);
1020
1021 end:
1022 cleanup_test();
1023 if (child_pair[0] != -1)
1024 evutil_closesocket(child_pair[0]);
1025 if (child_pair[1] != -1)
1026 evutil_closesocket(child_pair[1]);
1027 }
1028
1029 #ifdef EVTHREAD_USE_PTHREADS_IMPLEMENTED
del_wait_thread(void * arg)1030 static void* del_wait_thread(void *arg)
1031 {
1032 struct timeval tv_start, tv_end;
1033
1034 evutil_gettimeofday(&tv_start, NULL);
1035 event_dispatch();
1036 evutil_gettimeofday(&tv_end, NULL);
1037
1038 test_timeval_diff_eq(&tv_start, &tv_end, 300);
1039
1040 end:
1041 return NULL;
1042 }
1043
1044 static void
del_wait_cb(evutil_socket_t fd,short event,void * arg)1045 del_wait_cb(evutil_socket_t fd, short event, void *arg)
1046 {
1047 struct timeval delay = { 0, 300*1000 };
1048 TT_BLATHER(("Sleeping: %i", test_ok));
1049 evutil_usleep_(&delay);
1050 ++test_ok;
1051 }
1052
1053 static void
test_del_wait(void)1054 test_del_wait(void)
1055 {
1056 struct event ev;
1057 THREAD_T thread;
1058
1059 setup_test("event_del will wait: ");
1060
1061 event_set(&ev, pair[1], EV_READ|EV_PERSIST, del_wait_cb, &ev);
1062 event_add(&ev, NULL);
1063
1064 THREAD_START(thread, del_wait_thread, NULL);
1065
1066 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
1067 tt_fail_perror("write");
1068 }
1069
1070 {
1071 struct timeval delay = { 0, 30*1000 };
1072 evutil_usleep_(&delay);
1073 }
1074
1075 {
1076 struct timeval tv_start, tv_end;
1077 evutil_gettimeofday(&tv_start, NULL);
1078 event_del(&ev);
1079 evutil_gettimeofday(&tv_end, NULL);
1080 test_timeval_diff_eq(&tv_start, &tv_end, 270);
1081 }
1082
1083 THREAD_JOIN(thread);
1084
1085 tt_int_op(test_ok, ==, 1);
1086
1087 end:
1088 ;
1089 }
1090
null_cb(evutil_socket_t fd,short what,void * arg)1091 static void null_cb(evutil_socket_t fd, short what, void *arg) {}
test_del_notify_thread(void * arg)1092 static void* test_del_notify_thread(void *arg)
1093 {
1094 event_dispatch();
1095 return NULL;
1096 }
1097 static void
test_del_notify(void)1098 test_del_notify(void)
1099 {
1100 struct event ev;
1101 THREAD_T thread;
1102
1103 test_ok = 1;
1104
1105 event_set(&ev, -1, EV_READ, null_cb, &ev);
1106 event_add(&ev, NULL);
1107
1108 THREAD_START(thread, test_del_notify_thread, NULL);
1109
1110 {
1111 struct timeval delay = { 0, 1000 };
1112 evutil_usleep_(&delay);
1113 }
1114
1115 event_del(&ev);
1116 THREAD_JOIN(thread);
1117 }
1118 #endif
1119
1120 static void
signal_cb_sa(int sig)1121 signal_cb_sa(int sig)
1122 {
1123 test_ok = 2;
1124 }
1125
1126 static void
signal_cb(evutil_socket_t fd,short event,void * arg)1127 signal_cb(evutil_socket_t fd, short event, void *arg)
1128 {
1129 struct event *ev = arg;
1130
1131 evsignal_del(ev);
1132 test_ok = 1;
1133 }
1134
1135 static void
test_simplesignal_impl(int find_reorder)1136 test_simplesignal_impl(int find_reorder)
1137 {
1138 struct event ev;
1139 struct itimerval itv;
1140
1141 evsignal_set(&ev, SIGALRM, signal_cb, &ev);
1142 evsignal_add(&ev, NULL);
1143 /* find bugs in which operations are re-ordered */
1144 if (find_reorder) {
1145 evsignal_del(&ev);
1146 evsignal_add(&ev, NULL);
1147 }
1148
1149 memset(&itv, 0, sizeof(itv));
1150 itv.it_value.tv_sec = 0;
1151 itv.it_value.tv_usec = 100000;
1152 if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
1153 goto skip_simplesignal;
1154
1155 event_dispatch();
1156 skip_simplesignal:
1157 if (evsignal_del(&ev) == -1)
1158 test_ok = 0;
1159
1160 cleanup_test();
1161 }
1162
1163 static void
test_simplestsignal(void)1164 test_simplestsignal(void)
1165 {
1166 setup_test("Simplest one signal: ");
1167 test_simplesignal_impl(0);
1168 }
1169
1170 static void
test_simplesignal(void)1171 test_simplesignal(void)
1172 {
1173 setup_test("Simple signal: ");
1174 test_simplesignal_impl(1);
1175 }
1176
1177 static void
test_multiplesignal(void)1178 test_multiplesignal(void)
1179 {
1180 struct event ev_one, ev_two;
1181 struct itimerval itv;
1182
1183 setup_test("Multiple signal: ");
1184
1185 evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
1186 evsignal_add(&ev_one, NULL);
1187
1188 evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
1189 evsignal_add(&ev_two, NULL);
1190
1191 memset(&itv, 0, sizeof(itv));
1192 itv.it_value.tv_sec = 0;
1193 itv.it_value.tv_usec = 100000;
1194 if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
1195 goto skip_simplesignal;
1196
1197 event_dispatch();
1198
1199 skip_simplesignal:
1200 if (evsignal_del(&ev_one) == -1)
1201 test_ok = 0;
1202 if (evsignal_del(&ev_two) == -1)
1203 test_ok = 0;
1204
1205 cleanup_test();
1206 }
1207
1208 static void
test_immediatesignal(void)1209 test_immediatesignal(void)
1210 {
1211 struct event ev;
1212
1213 test_ok = 0;
1214 evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1215 evsignal_add(&ev, NULL);
1216 kill(getpid(), SIGUSR1);
1217 event_loop(EVLOOP_NONBLOCK);
1218 evsignal_del(&ev);
1219 cleanup_test();
1220 }
1221
1222 static void
test_signal_dealloc(void)1223 test_signal_dealloc(void)
1224 {
1225 /* make sure that evsignal_event is event_del'ed and pipe closed */
1226 struct event ev;
1227 struct event_base *base = event_init();
1228 evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1229 evsignal_add(&ev, NULL);
1230 evsignal_del(&ev);
1231 event_base_free(base);
1232 /* If we got here without asserting, we're fine. */
1233 test_ok = 1;
1234 cleanup_test();
1235 }
1236
1237 static void
test_signal_pipeloss(void)1238 test_signal_pipeloss(void)
1239 {
1240 /* make sure that the base1 pipe is closed correctly. */
1241 struct event_base *base1, *base2;
1242 int pipe1;
1243 test_ok = 0;
1244 base1 = event_init();
1245 pipe1 = base1->sig.ev_signal_pair[0];
1246 base2 = event_init();
1247 event_base_free(base2);
1248 event_base_free(base1);
1249 if (close(pipe1) != -1 || errno!=EBADF) {
1250 /* fd must be closed, so second close gives -1, EBADF */
1251 printf("signal pipe not closed. ");
1252 test_ok = 0;
1253 } else {
1254 test_ok = 1;
1255 }
1256 cleanup_test();
1257 }
1258
1259 /*
1260 * make two bases to catch signals, use both of them. this only works
1261 * for event mechanisms that use our signal pipe trick. kqueue handles
1262 * signals internally, and all interested kqueues get all the signals.
1263 */
1264 static void
test_signal_switchbase(void)1265 test_signal_switchbase(void)
1266 {
1267 struct event ev1, ev2;
1268 struct event_base *base1, *base2;
1269 int is_kqueue;
1270 test_ok = 0;
1271 base1 = event_init();
1272 base2 = event_init();
1273 is_kqueue = !strcmp(event_get_method(),"kqueue");
1274 evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1);
1275 evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2);
1276 if (event_base_set(base1, &ev1) ||
1277 event_base_set(base2, &ev2) ||
1278 event_add(&ev1, NULL) ||
1279 event_add(&ev2, NULL)) {
1280 fprintf(stderr, "%s: cannot set base, add\n", __func__);
1281 exit(1);
1282 }
1283
1284 tt_ptr_op(event_get_base(&ev1), ==, base1);
1285 tt_ptr_op(event_get_base(&ev2), ==, base2);
1286
1287 test_ok = 0;
1288 /* can handle signal before loop is called */
1289 kill(getpid(), SIGUSR1);
1290 event_base_loop(base2, EVLOOP_NONBLOCK);
1291 if (is_kqueue) {
1292 if (!test_ok)
1293 goto end;
1294 test_ok = 0;
1295 }
1296 event_base_loop(base1, EVLOOP_NONBLOCK);
1297 if (test_ok && !is_kqueue) {
1298 test_ok = 0;
1299
1300 /* set base1 to handle signals */
1301 event_base_loop(base1, EVLOOP_NONBLOCK);
1302 kill(getpid(), SIGUSR1);
1303 event_base_loop(base1, EVLOOP_NONBLOCK);
1304 event_base_loop(base2, EVLOOP_NONBLOCK);
1305 }
1306 end:
1307 event_base_free(base1);
1308 event_base_free(base2);
1309 cleanup_test();
1310 }
1311
1312 /*
1313 * assert that a signal event removed from the event queue really is
1314 * removed - with no possibility of it's parent handler being fired.
1315 */
1316 static void
test_signal_assert(void)1317 test_signal_assert(void)
1318 {
1319 struct event ev;
1320 struct event_base *base = event_init();
1321 test_ok = 0;
1322 /* use SIGCONT so we don't kill ourselves when we signal to nowhere */
1323 evsignal_set(&ev, SIGCONT, signal_cb, &ev);
1324 evsignal_add(&ev, NULL);
1325 /*
1326 * if evsignal_del() fails to reset the handler, it's current handler
1327 * will still point to evsig_handler().
1328 */
1329 evsignal_del(&ev);
1330
1331 kill(getpid(), SIGCONT);
1332 #if 0
1333 /* only way to verify we were in evsig_handler() */
1334 /* XXXX Now there's no longer a good way. */
1335 if (base->sig.evsig_caught)
1336 test_ok = 0;
1337 else
1338 test_ok = 1;
1339 #else
1340 test_ok = 1;
1341 #endif
1342
1343 event_base_free(base);
1344 cleanup_test();
1345 return;
1346 }
1347
1348 /*
1349 * assert that we restore our previous signal handler properly.
1350 */
1351 static void
test_signal_restore(void)1352 test_signal_restore(void)
1353 {
1354 struct event ev;
1355 struct event_base *base = event_init();
1356 #ifdef EVENT__HAVE_SIGACTION
1357 struct sigaction sa;
1358 #endif
1359
1360 test_ok = 0;
1361 #ifdef EVENT__HAVE_SIGACTION
1362 sa.sa_handler = signal_cb_sa;
1363 sa.sa_flags = 0x0;
1364 sigemptyset(&sa.sa_mask);
1365 if (sigaction(SIGUSR1, &sa, NULL) == -1)
1366 goto out;
1367 #else
1368 if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
1369 goto out;
1370 #endif
1371 evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1372 evsignal_add(&ev, NULL);
1373 evsignal_del(&ev);
1374
1375 kill(getpid(), SIGUSR1);
1376 /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
1377 if (test_ok != 2)
1378 test_ok = 0;
1379 out:
1380 event_base_free(base);
1381 cleanup_test();
1382 return;
1383 }
1384
1385 static void
signal_cb_swp(int sig,short event,void * arg)1386 signal_cb_swp(int sig, short event, void *arg)
1387 {
1388 called++;
1389 if (called < 5)
1390 kill(getpid(), sig);
1391 else
1392 event_loopexit(NULL);
1393 }
1394 static void
timeout_cb_swp(evutil_socket_t fd,short event,void * arg)1395 timeout_cb_swp(evutil_socket_t fd, short event, void *arg)
1396 {
1397 if (called == -1) {
1398 struct timeval tv = {5, 0};
1399
1400 called = 0;
1401 evtimer_add((struct event *)arg, &tv);
1402 kill(getpid(), SIGUSR1);
1403 return;
1404 }
1405 test_ok = 0;
1406 event_loopexit(NULL);
1407 }
1408
1409 static void
test_signal_while_processing(void)1410 test_signal_while_processing(void)
1411 {
1412 struct event_base *base = event_init();
1413 struct event ev, ev_timer;
1414 struct timeval tv = {0, 0};
1415
1416 setup_test("Receiving a signal while processing other signal: ");
1417
1418 called = -1;
1419 test_ok = 1;
1420 signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
1421 signal_add(&ev, NULL);
1422 evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
1423 evtimer_add(&ev_timer, &tv);
1424 event_dispatch();
1425
1426 event_base_free(base);
1427 cleanup_test();
1428 return;
1429 }
1430 #endif
1431
1432 static void
test_free_active_base(void * ptr)1433 test_free_active_base(void *ptr)
1434 {
1435 struct basic_test_data *data = ptr;
1436 struct event_base *base1;
1437 struct event ev1;
1438
1439 base1 = event_init();
1440 tt_assert(base1);
1441 event_assign(&ev1, base1, data->pair[1], EV_READ, dummy_read_cb, NULL);
1442 event_add(&ev1, NULL);
1443 event_base_free(base1); /* should not crash */
1444
1445 base1 = event_init();
1446 tt_assert(base1);
1447 event_assign(&ev1, base1, data->pair[0], 0, dummy_read_cb, NULL);
1448 event_active(&ev1, EV_READ, 1);
1449 event_base_free(base1);
1450 end:
1451 ;
1452 }
1453
1454 static void
test_manipulate_active_events(void * ptr)1455 test_manipulate_active_events(void *ptr)
1456 {
1457 struct basic_test_data *data = ptr;
1458 struct event_base *base = data->base;
1459 struct event ev1;
1460
1461 event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL);
1462
1463 /* Make sure an active event is pending. */
1464 event_active(&ev1, EV_READ, 1);
1465 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1466 ==, EV_READ);
1467
1468 /* Make sure that activating an event twice works. */
1469 event_active(&ev1, EV_WRITE, 1);
1470 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1471 ==, EV_READ|EV_WRITE);
1472
1473 end:
1474 event_del(&ev1);
1475 }
1476
1477 static void
event_selfarg_cb(evutil_socket_t fd,short event,void * arg)1478 event_selfarg_cb(evutil_socket_t fd, short event, void *arg)
1479 {
1480 struct event *ev = arg;
1481 struct event_base *base = event_get_base(ev);
1482 event_base_assert_ok_(base);
1483 event_base_loopexit(base, NULL);
1484 tt_want(ev == event_base_get_running_event(base));
1485 }
1486
1487 static void
test_event_new_selfarg(void * ptr)1488 test_event_new_selfarg(void *ptr)
1489 {
1490 struct basic_test_data *data = ptr;
1491 struct event_base *base = data->base;
1492 struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb,
1493 event_self_cbarg());
1494
1495 event_active(ev, EV_READ, 1);
1496 event_base_dispatch(base);
1497
1498 event_free(ev);
1499 }
1500
1501 static void
test_event_assign_selfarg(void * ptr)1502 test_event_assign_selfarg(void *ptr)
1503 {
1504 struct basic_test_data *data = ptr;
1505 struct event_base *base = data->base;
1506 struct event ev;
1507
1508 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1509 event_self_cbarg());
1510 event_active(&ev, EV_READ, 1);
1511 event_base_dispatch(base);
1512 }
1513
1514 static void
test_event_base_get_num_events(void * ptr)1515 test_event_base_get_num_events(void *ptr)
1516 {
1517 struct basic_test_data *data = ptr;
1518 struct event_base *base = data->base;
1519 struct event ev;
1520 int event_count_active;
1521 int event_count_virtual;
1522 int event_count_added;
1523 int event_count_active_virtual;
1524 int event_count_active_added;
1525 int event_count_virtual_added;
1526 int event_count_active_added_virtual;
1527
1528 struct timeval qsec = {0, 100000};
1529
1530 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1531 event_self_cbarg());
1532
1533 event_add(&ev, &qsec);
1534 event_count_active = event_base_get_num_events(base,
1535 EVENT_BASE_COUNT_ACTIVE);
1536 event_count_virtual = event_base_get_num_events(base,
1537 EVENT_BASE_COUNT_VIRTUAL);
1538 event_count_added = event_base_get_num_events(base,
1539 EVENT_BASE_COUNT_ADDED);
1540 event_count_active_virtual = event_base_get_num_events(base,
1541 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1542 event_count_active_added = event_base_get_num_events(base,
1543 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1544 event_count_virtual_added = event_base_get_num_events(base,
1545 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1546 event_count_active_added_virtual = event_base_get_num_events(base,
1547 EVENT_BASE_COUNT_ACTIVE|
1548 EVENT_BASE_COUNT_ADDED|
1549 EVENT_BASE_COUNT_VIRTUAL);
1550 tt_int_op(event_count_active, ==, 0);
1551 tt_int_op(event_count_virtual, ==, 0);
1552 /* libevent itself adds a timeout event, so the event_count is 2 here */
1553 tt_int_op(event_count_added, ==, 2);
1554 tt_int_op(event_count_active_virtual, ==, 0);
1555 tt_int_op(event_count_active_added, ==, 2);
1556 tt_int_op(event_count_virtual_added, ==, 2);
1557 tt_int_op(event_count_active_added_virtual, ==, 2);
1558
1559 event_active(&ev, EV_READ, 1);
1560 event_count_active = event_base_get_num_events(base,
1561 EVENT_BASE_COUNT_ACTIVE);
1562 event_count_virtual = event_base_get_num_events(base,
1563 EVENT_BASE_COUNT_VIRTUAL);
1564 event_count_added = event_base_get_num_events(base,
1565 EVENT_BASE_COUNT_ADDED);
1566 event_count_active_virtual = event_base_get_num_events(base,
1567 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1568 event_count_active_added = event_base_get_num_events(base,
1569 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1570 event_count_virtual_added = event_base_get_num_events(base,
1571 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1572 event_count_active_added_virtual = event_base_get_num_events(base,
1573 EVENT_BASE_COUNT_ACTIVE|
1574 EVENT_BASE_COUNT_ADDED|
1575 EVENT_BASE_COUNT_VIRTUAL);
1576 tt_int_op(event_count_active, ==, 1);
1577 tt_int_op(event_count_virtual, ==, 0);
1578 tt_int_op(event_count_added, ==, 3);
1579 tt_int_op(event_count_active_virtual, ==, 1);
1580 tt_int_op(event_count_active_added, ==, 4);
1581 tt_int_op(event_count_virtual_added, ==, 3);
1582 tt_int_op(event_count_active_added_virtual, ==, 4);
1583
1584 event_base_loop(base, 0);
1585 event_count_active = event_base_get_num_events(base,
1586 EVENT_BASE_COUNT_ACTIVE);
1587 event_count_virtual = event_base_get_num_events(base,
1588 EVENT_BASE_COUNT_VIRTUAL);
1589 event_count_added = event_base_get_num_events(base,
1590 EVENT_BASE_COUNT_ADDED);
1591 event_count_active_virtual = event_base_get_num_events(base,
1592 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1593 event_count_active_added = event_base_get_num_events(base,
1594 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1595 event_count_virtual_added = event_base_get_num_events(base,
1596 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1597 event_count_active_added_virtual = event_base_get_num_events(base,
1598 EVENT_BASE_COUNT_ACTIVE|
1599 EVENT_BASE_COUNT_ADDED|
1600 EVENT_BASE_COUNT_VIRTUAL);
1601 tt_int_op(event_count_active, ==, 0);
1602 tt_int_op(event_count_virtual, ==, 0);
1603 tt_int_op(event_count_added, ==, 0);
1604 tt_int_op(event_count_active_virtual, ==, 0);
1605 tt_int_op(event_count_active_added, ==, 0);
1606 tt_int_op(event_count_virtual_added, ==, 0);
1607 tt_int_op(event_count_active_added_virtual, ==, 0);
1608
1609 event_base_add_virtual_(base);
1610 event_count_active = event_base_get_num_events(base,
1611 EVENT_BASE_COUNT_ACTIVE);
1612 event_count_virtual = event_base_get_num_events(base,
1613 EVENT_BASE_COUNT_VIRTUAL);
1614 event_count_added = event_base_get_num_events(base,
1615 EVENT_BASE_COUNT_ADDED);
1616 event_count_active_virtual = event_base_get_num_events(base,
1617 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1618 event_count_active_added = event_base_get_num_events(base,
1619 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1620 event_count_virtual_added = event_base_get_num_events(base,
1621 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1622 event_count_active_added_virtual = event_base_get_num_events(base,
1623 EVENT_BASE_COUNT_ACTIVE|
1624 EVENT_BASE_COUNT_ADDED|
1625 EVENT_BASE_COUNT_VIRTUAL);
1626 tt_int_op(event_count_active, ==, 0);
1627 tt_int_op(event_count_virtual, ==, 1);
1628 tt_int_op(event_count_added, ==, 0);
1629 tt_int_op(event_count_active_virtual, ==, 1);
1630 tt_int_op(event_count_active_added, ==, 0);
1631 tt_int_op(event_count_virtual_added, ==, 1);
1632 tt_int_op(event_count_active_added_virtual, ==, 1);
1633
1634 end:
1635 ;
1636 }
1637
1638 static void
test_event_base_get_max_events(void * ptr)1639 test_event_base_get_max_events(void *ptr)
1640 {
1641 struct basic_test_data *data = ptr;
1642 struct event_base *base = data->base;
1643 struct event ev;
1644 struct event ev2;
1645 int event_count_active;
1646 int event_count_virtual;
1647 int event_count_added;
1648 int event_count_active_virtual;
1649 int event_count_active_added;
1650 int event_count_virtual_added;
1651 int event_count_active_added_virtual;
1652
1653 struct timeval qsec = {0, 100000};
1654
1655 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1656 event_self_cbarg());
1657 event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb,
1658 event_self_cbarg());
1659
1660 event_add(&ev, &qsec);
1661 event_add(&ev2, &qsec);
1662 event_del(&ev2);
1663
1664 event_count_active = event_base_get_max_events(base,
1665 EVENT_BASE_COUNT_ACTIVE, 0);
1666 event_count_virtual = event_base_get_max_events(base,
1667 EVENT_BASE_COUNT_VIRTUAL, 0);
1668 event_count_added = event_base_get_max_events(base,
1669 EVENT_BASE_COUNT_ADDED, 0);
1670 event_count_active_virtual = event_base_get_max_events(base,
1671 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1672 event_count_active_added = event_base_get_max_events(base,
1673 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1674 event_count_virtual_added = event_base_get_max_events(base,
1675 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1676 event_count_active_added_virtual = event_base_get_max_events(base,
1677 EVENT_BASE_COUNT_ACTIVE |
1678 EVENT_BASE_COUNT_ADDED |
1679 EVENT_BASE_COUNT_VIRTUAL, 0);
1680
1681 tt_int_op(event_count_active, ==, 0);
1682 tt_int_op(event_count_virtual, ==, 0);
1683 /* libevent itself adds a timeout event, so the event_count is 4 here */
1684 tt_int_op(event_count_added, ==, 4);
1685 tt_int_op(event_count_active_virtual, ==, 0);
1686 tt_int_op(event_count_active_added, ==, 4);
1687 tt_int_op(event_count_virtual_added, ==, 4);
1688 tt_int_op(event_count_active_added_virtual, ==, 4);
1689
1690 event_active(&ev, EV_READ, 1);
1691 event_count_active = event_base_get_max_events(base,
1692 EVENT_BASE_COUNT_ACTIVE, 0);
1693 event_count_virtual = event_base_get_max_events(base,
1694 EVENT_BASE_COUNT_VIRTUAL, 0);
1695 event_count_added = event_base_get_max_events(base,
1696 EVENT_BASE_COUNT_ADDED, 0);
1697 event_count_active_virtual = event_base_get_max_events(base,
1698 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1699 event_count_active_added = event_base_get_max_events(base,
1700 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1701 event_count_virtual_added = event_base_get_max_events(base,
1702 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1703 event_count_active_added_virtual = event_base_get_max_events(base,
1704 EVENT_BASE_COUNT_ACTIVE |
1705 EVENT_BASE_COUNT_ADDED |
1706 EVENT_BASE_COUNT_VIRTUAL, 0);
1707
1708 tt_int_op(event_count_active, ==, 1);
1709 tt_int_op(event_count_virtual, ==, 0);
1710 tt_int_op(event_count_added, ==, 4);
1711 tt_int_op(event_count_active_virtual, ==, 1);
1712 tt_int_op(event_count_active_added, ==, 5);
1713 tt_int_op(event_count_virtual_added, ==, 4);
1714 tt_int_op(event_count_active_added_virtual, ==, 5);
1715
1716 event_base_loop(base, 0);
1717 event_count_active = event_base_get_max_events(base,
1718 EVENT_BASE_COUNT_ACTIVE, 1);
1719 event_count_virtual = event_base_get_max_events(base,
1720 EVENT_BASE_COUNT_VIRTUAL, 1);
1721 event_count_added = event_base_get_max_events(base,
1722 EVENT_BASE_COUNT_ADDED, 1);
1723 event_count_active_virtual = event_base_get_max_events(base,
1724 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1725 event_count_active_added = event_base_get_max_events(base,
1726 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1727 event_count_virtual_added = event_base_get_max_events(base,
1728 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1729 event_count_active_added_virtual = event_base_get_max_events(base,
1730 EVENT_BASE_COUNT_ACTIVE |
1731 EVENT_BASE_COUNT_ADDED |
1732 EVENT_BASE_COUNT_VIRTUAL, 1);
1733
1734 tt_int_op(event_count_active, ==, 1);
1735 tt_int_op(event_count_virtual, ==, 0);
1736 tt_int_op(event_count_added, ==, 4);
1737 tt_int_op(event_count_active_virtual, ==, 0);
1738 tt_int_op(event_count_active_added, ==, 0);
1739 tt_int_op(event_count_virtual_added, ==, 0);
1740 tt_int_op(event_count_active_added_virtual, ==, 0);
1741
1742 event_count_active = event_base_get_max_events(base,
1743 EVENT_BASE_COUNT_ACTIVE, 0);
1744 event_count_virtual = event_base_get_max_events(base,
1745 EVENT_BASE_COUNT_VIRTUAL, 0);
1746 event_count_added = event_base_get_max_events(base,
1747 EVENT_BASE_COUNT_ADDED, 0);
1748 tt_int_op(event_count_active, ==, 0);
1749 tt_int_op(event_count_virtual, ==, 0);
1750 tt_int_op(event_count_added, ==, 0);
1751
1752 event_base_add_virtual_(base);
1753 event_count_active = event_base_get_max_events(base,
1754 EVENT_BASE_COUNT_ACTIVE, 0);
1755 event_count_virtual = event_base_get_max_events(base,
1756 EVENT_BASE_COUNT_VIRTUAL, 0);
1757 event_count_added = event_base_get_max_events(base,
1758 EVENT_BASE_COUNT_ADDED, 0);
1759 event_count_active_virtual = event_base_get_max_events(base,
1760 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1761 event_count_active_added = event_base_get_max_events(base,
1762 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1763 event_count_virtual_added = event_base_get_max_events(base,
1764 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1765 event_count_active_added_virtual = event_base_get_max_events(base,
1766 EVENT_BASE_COUNT_ACTIVE |
1767 EVENT_BASE_COUNT_ADDED |
1768 EVENT_BASE_COUNT_VIRTUAL, 0);
1769
1770 tt_int_op(event_count_active, ==, 0);
1771 tt_int_op(event_count_virtual, ==, 1);
1772 tt_int_op(event_count_added, ==, 0);
1773 tt_int_op(event_count_active_virtual, ==, 1);
1774 tt_int_op(event_count_active_added, ==, 0);
1775 tt_int_op(event_count_virtual_added, ==, 1);
1776 tt_int_op(event_count_active_added_virtual, ==, 1);
1777
1778 end:
1779 ;
1780 }
1781
1782 static void
test_bad_assign(void * ptr)1783 test_bad_assign(void *ptr)
1784 {
1785 struct event ev;
1786 int r;
1787 /* READ|SIGNAL is not allowed */
1788 r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL);
1789 tt_int_op(r,==,-1);
1790
1791 end:
1792 ;
1793 }
1794
1795 static int reentrant_cb_run = 0;
1796
1797 static void
bad_reentrant_run_loop_cb(evutil_socket_t fd,short what,void * ptr)1798 bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr)
1799 {
1800 struct event_base *base = ptr;
1801 int r;
1802 reentrant_cb_run = 1;
1803 /* This reentrant call to event_base_loop should be detected and
1804 * should fail */
1805 r = event_base_loop(base, 0);
1806 tt_int_op(r, ==, -1);
1807 end:
1808 ;
1809 }
1810
1811 static void
test_bad_reentrant(void * ptr)1812 test_bad_reentrant(void *ptr)
1813 {
1814 struct basic_test_data *data = ptr;
1815 struct event_base *base = data->base;
1816 struct event ev;
1817 int r;
1818 event_assign(&ev, base, -1,
1819 0, bad_reentrant_run_loop_cb, base);
1820
1821 event_active(&ev, EV_WRITE, 1);
1822 r = event_base_loop(base, 0);
1823 tt_int_op(r, ==, 1);
1824 tt_int_op(reentrant_cb_run, ==, 1);
1825 end:
1826 ;
1827 }
1828
1829 static int n_write_a_byte_cb=0;
1830 static int n_read_and_drain_cb=0;
1831 static int n_activate_other_event_cb=0;
1832 static void
write_a_byte_cb(evutil_socket_t fd,short what,void * arg)1833 write_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1834 {
1835 char buf[] = "x";
1836 if (write(fd, buf, 1) == 1)
1837 ++n_write_a_byte_cb;
1838 }
1839 static void
read_and_drain_cb(evutil_socket_t fd,short what,void * arg)1840 read_and_drain_cb(evutil_socket_t fd, short what, void *arg)
1841 {
1842 char buf[128];
1843 int n;
1844 ++n_read_and_drain_cb;
1845 while ((n = read(fd, buf, sizeof(buf))) > 0)
1846 ;
1847 }
1848
1849 static void
activate_other_event_cb(evutil_socket_t fd,short what,void * other_)1850 activate_other_event_cb(evutil_socket_t fd, short what, void *other_)
1851 {
1852 struct event *ev_activate = other_;
1853 ++n_activate_other_event_cb;
1854 event_active_later_(ev_activate, EV_READ);
1855 }
1856
1857 static void
test_active_later(void * ptr)1858 test_active_later(void *ptr)
1859 {
1860 struct basic_test_data *data = ptr;
1861 struct event *ev1 = NULL, *ev2 = NULL;
1862 struct event ev3, ev4;
1863 struct timeval qsec = {0, 100000};
1864 ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL);
1865 ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL);
1866 event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4);
1867 event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3);
1868 event_add(ev1, NULL);
1869 event_add(ev2, NULL);
1870 event_active_later_(&ev3, EV_READ);
1871
1872 event_base_loopexit(data->base, &qsec);
1873
1874 event_base_loop(data->base, 0);
1875
1876 TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.",
1877 n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb));
1878 event_del(&ev3);
1879 event_del(&ev4);
1880
1881 tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb);
1882 tt_int_op(n_write_a_byte_cb, >, 100);
1883 tt_int_op(n_read_and_drain_cb, >, 100);
1884 tt_int_op(n_activate_other_event_cb, >, 100);
1885
1886 event_active_later_(&ev4, EV_READ);
1887 event_active(&ev4, EV_READ, 1); /* This should make the event
1888 active immediately. */
1889 tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0);
1890 tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0);
1891
1892 /* Now leave this one around, so that event_free sees it and removes
1893 * it. */
1894 event_active_later_(&ev3, EV_READ);
1895 event_base_assert_ok_(data->base);
1896
1897 end:
1898 if (ev1)
1899 event_free(ev1);
1900 if (ev2)
1901 event_free(ev2);
1902
1903 event_base_free(data->base);
1904 data->base = NULL;
1905 }
1906
1907
incr_arg_cb(evutil_socket_t fd,short what,void * arg)1908 static void incr_arg_cb(evutil_socket_t fd, short what, void *arg)
1909 {
1910 int *intptr = arg;
1911 (void) fd; (void) what;
1912 ++*intptr;
1913 }
remove_timers_cb(evutil_socket_t fd,short what,void * arg)1914 static void remove_timers_cb(evutil_socket_t fd, short what, void *arg)
1915 {
1916 struct event **ep = arg;
1917 (void) fd; (void) what;
1918 event_remove_timer(ep[0]);
1919 event_remove_timer(ep[1]);
1920 }
send_a_byte_cb(evutil_socket_t fd,short what,void * arg)1921 static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1922 {
1923 evutil_socket_t *sockp = arg;
1924 (void) fd; (void) what;
1925 if (write(*sockp, "A", 1) < 0)
1926 tt_fail_perror("write");
1927 }
1928 struct read_not_timeout_param
1929 {
1930 struct event **ev;
1931 int events;
1932 int count;
1933 };
read_not_timeout_cb(evutil_socket_t fd,short what,void * arg)1934 static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg)
1935 {
1936 struct read_not_timeout_param *rntp = arg;
1937 char c;
1938 ev_ssize_t n;
1939 (void) fd; (void) what;
1940 n = read(fd, &c, 1);
1941 tt_int_op(n, ==, 1);
1942 rntp->events |= what;
1943 ++rntp->count;
1944 if(2 == rntp->count) event_del(rntp->ev[0]);
1945 end:
1946 ;
1947 }
1948
1949 static void
test_event_remove_timeout(void * ptr)1950 test_event_remove_timeout(void *ptr)
1951 {
1952 struct basic_test_data *data = ptr;
1953 struct event_base *base = data->base;
1954 struct event *ev[5];
1955 int ev1_fired=0;
1956 struct timeval ms25 = { 0, 25*1000 },
1957 ms40 = { 0, 40*1000 },
1958 ms75 = { 0, 75*1000 },
1959 ms125 = { 0, 125*1000 };
1960 struct read_not_timeout_param rntp = { ev, 0, 0 };
1961
1962 event_base_assert_ok_(base);
1963
1964 ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST,
1965 read_not_timeout_cb, &rntp);
1966 ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired);
1967 ev[2] = evtimer_new(base, remove_timers_cb, ev);
1968 ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1969 ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1970 tt_assert(base);
1971 event_add(ev[2], &ms25); /* remove timers */
1972 event_add(ev[4], &ms40); /* write to test if timer re-activates */
1973 event_add(ev[0], &ms75); /* read */
1974 event_add(ev[1], &ms75); /* timer */
1975 event_add(ev[3], &ms125); /* timeout. */
1976 event_base_assert_ok_(base);
1977
1978 event_base_dispatch(base);
1979
1980 tt_int_op(ev1_fired, ==, 0);
1981 tt_int_op(rntp.events, ==, EV_READ);
1982
1983 event_base_assert_ok_(base);
1984 end:
1985 event_free(ev[0]);
1986 event_free(ev[1]);
1987 event_free(ev[2]);
1988 event_free(ev[3]);
1989 event_free(ev[4]);
1990 }
1991
1992 static void
test_event_base_new(void * ptr)1993 test_event_base_new(void *ptr)
1994 {
1995 struct basic_test_data *data = ptr;
1996 struct event_base *base = 0;
1997 struct event ev1;
1998 struct basic_cb_args args;
1999
2000 int towrite = (int)strlen(TEST1)+1;
2001 int len = write(data->pair[0], TEST1, towrite);
2002
2003 if (len < 0)
2004 tt_abort_perror("initial write");
2005 else if (len != towrite)
2006 tt_abort_printf(("initial write fell short (%d of %d bytes)",
2007 len, towrite));
2008
2009 if (shutdown(data->pair[0], EVUTIL_SHUT_WR))
2010 tt_abort_perror("initial write shutdown");
2011
2012 base = event_base_new();
2013 if (!base)
2014 tt_abort_msg("failed to create event base");
2015
2016 args.eb = base;
2017 args.ev = &ev1;
2018 args.callcount = 0;
2019 event_assign(&ev1, base, data->pair[1],
2020 EV_READ|EV_PERSIST, basic_read_cb, &args);
2021
2022 if (event_add(&ev1, NULL))
2023 tt_abort_perror("initial event_add");
2024
2025 if (event_base_loop(base, 0))
2026 tt_abort_msg("unsuccessful exit from event loop");
2027
2028 end:
2029 if (base)
2030 event_base_free(base);
2031 }
2032
2033 static void
test_loopexit(void)2034 test_loopexit(void)
2035 {
2036 struct timeval tv, tv_start, tv_end;
2037 struct event ev;
2038
2039 setup_test("Loop exit: ");
2040
2041 tv.tv_usec = 0;
2042 tv.tv_sec = 60*60*24;
2043 evtimer_set(&ev, timeout_cb, NULL);
2044 evtimer_add(&ev, &tv);
2045
2046 tv.tv_usec = 300*1000;
2047 tv.tv_sec = 0;
2048 event_loopexit(&tv);
2049
2050 evutil_gettimeofday(&tv_start, NULL);
2051 event_dispatch();
2052 evutil_gettimeofday(&tv_end, NULL);
2053
2054 evtimer_del(&ev);
2055
2056 tt_assert(event_base_got_exit(global_base));
2057 tt_assert(!event_base_got_break(global_base));
2058
2059 test_timeval_diff_eq(&tv_start, &tv_end, 300);
2060
2061 test_ok = 1;
2062 end:
2063 cleanup_test();
2064 }
2065
2066 static void
test_loopexit_multiple(void)2067 test_loopexit_multiple(void)
2068 {
2069 struct timeval tv, tv_start, tv_end;
2070 struct event_base *base;
2071
2072 setup_test("Loop Multiple exit: ");
2073
2074 base = event_base_new();
2075
2076 tv.tv_usec = 200*1000;
2077 tv.tv_sec = 0;
2078 event_base_loopexit(base, &tv);
2079
2080 tv.tv_usec = 0;
2081 tv.tv_sec = 3;
2082 event_base_loopexit(base, &tv);
2083
2084 evutil_gettimeofday(&tv_start, NULL);
2085 event_base_dispatch(base);
2086 evutil_gettimeofday(&tv_end, NULL);
2087
2088 tt_assert(event_base_got_exit(base));
2089 tt_assert(!event_base_got_break(base));
2090
2091 event_base_free(base);
2092
2093 test_timeval_diff_eq(&tv_start, &tv_end, 200);
2094
2095 test_ok = 1;
2096
2097 end:
2098 cleanup_test();
2099 }
2100
2101 static void
break_cb(evutil_socket_t fd,short events,void * arg)2102 break_cb(evutil_socket_t fd, short events, void *arg)
2103 {
2104 test_ok = 1;
2105 event_loopbreak();
2106 }
2107
2108 static void
fail_cb(evutil_socket_t fd,short events,void * arg)2109 fail_cb(evutil_socket_t fd, short events, void *arg)
2110 {
2111 test_ok = 0;
2112 }
2113
2114 static void
test_loopbreak(void)2115 test_loopbreak(void)
2116 {
2117 struct event ev1, ev2;
2118 struct timeval tv;
2119
2120 setup_test("Loop break: ");
2121
2122 tv.tv_sec = 0;
2123 tv.tv_usec = 0;
2124 evtimer_set(&ev1, break_cb, NULL);
2125 evtimer_add(&ev1, &tv);
2126 evtimer_set(&ev2, fail_cb, NULL);
2127 evtimer_add(&ev2, &tv);
2128
2129 event_dispatch();
2130
2131 tt_assert(!event_base_got_exit(global_base));
2132 tt_assert(event_base_got_break(global_base));
2133
2134 evtimer_del(&ev1);
2135 evtimer_del(&ev2);
2136
2137 end:
2138 cleanup_test();
2139 }
2140
2141 static struct event *readd_test_event_last_added = NULL;
2142 static void
re_add_read_cb(evutil_socket_t fd,short event,void * arg)2143 re_add_read_cb(evutil_socket_t fd, short event, void *arg)
2144 {
2145 char buf[256];
2146 struct event *ev_other = arg;
2147 ev_ssize_t n_read;
2148
2149 readd_test_event_last_added = ev_other;
2150
2151 n_read = read(fd, buf, sizeof(buf));
2152
2153 if (n_read < 0) {
2154 tt_fail_perror("read");
2155 event_base_loopbreak(event_get_base(ev_other));
2156 } else {
2157 event_add(ev_other, NULL);
2158 ++test_ok;
2159 }
2160 }
2161 static void
test_nonpersist_readd(void * _data)2162 test_nonpersist_readd(void *_data)
2163 {
2164 struct event ev1, ev2;
2165 struct basic_test_data *data = _data;
2166
2167 memset(&ev1, 0, sizeof(ev1));
2168 memset(&ev2, 0, sizeof(ev2));
2169
2170 tt_assert(!event_assign(&ev1, data->base, data->pair[0], EV_READ, re_add_read_cb, &ev2));
2171 tt_assert(!event_assign(&ev2, data->base, data->pair[1], EV_READ, re_add_read_cb, &ev1));
2172
2173 tt_int_op(write(data->pair[0], "Hello", 5), ==, 5);
2174 tt_int_op(write(data->pair[1], "Hello", 5), ==, 5);
2175
2176 tt_int_op(event_add(&ev1, NULL), ==, 0);
2177 tt_int_op(event_add(&ev2, NULL), ==, 0);
2178 tt_int_op(event_base_loop(data->base, EVLOOP_ONCE), ==, 0);
2179 tt_int_op(test_ok, ==, 2);
2180
2181 /* At this point, we executed both callbacks. Whichever one got
2182 * called first added the second, but the second then immediately got
2183 * deleted before its callback was called. At this point, though, it
2184 * re-added the first.
2185 */
2186 tt_assert(readd_test_event_last_added);
2187 if (readd_test_event_last_added == &ev1) {
2188 tt_assert(event_pending(&ev1, EV_READ, NULL) && !event_pending(&ev2, EV_READ, NULL));
2189 } else {
2190 tt_assert(event_pending(&ev2, EV_READ, NULL) && !event_pending(&ev1, EV_READ, NULL));
2191 }
2192
2193 end:
2194 if (event_initialized(&ev1))
2195 event_del(&ev1);
2196 if (event_initialized(&ev2))
2197 event_del(&ev2);
2198 }
2199
2200 struct test_pri_event {
2201 struct event ev;
2202 int count;
2203 };
2204
2205 static void
test_priorities_cb(evutil_socket_t fd,short what,void * arg)2206 test_priorities_cb(evutil_socket_t fd, short what, void *arg)
2207 {
2208 struct test_pri_event *pri = arg;
2209 struct timeval tv;
2210
2211 if (pri->count == 3) {
2212 event_loopexit(NULL);
2213 return;
2214 }
2215
2216 pri->count++;
2217
2218 evutil_timerclear(&tv);
2219 event_add(&pri->ev, &tv);
2220 }
2221
2222 static void
test_priorities_impl(int npriorities)2223 test_priorities_impl(int npriorities)
2224 {
2225 struct test_pri_event one, two;
2226 struct timeval tv;
2227
2228 TT_BLATHER(("Testing Priorities %d: ", npriorities));
2229
2230 event_base_priority_init(global_base, npriorities);
2231
2232 memset(&one, 0, sizeof(one));
2233 memset(&two, 0, sizeof(two));
2234
2235 timeout_set(&one.ev, test_priorities_cb, &one);
2236 if (event_priority_set(&one.ev, 0) == -1) {
2237 fprintf(stderr, "%s: failed to set priority", __func__);
2238 exit(1);
2239 }
2240
2241 timeout_set(&two.ev, test_priorities_cb, &two);
2242 if (event_priority_set(&two.ev, npriorities - 1) == -1) {
2243 fprintf(stderr, "%s: failed to set priority", __func__);
2244 exit(1);
2245 }
2246
2247 evutil_timerclear(&tv);
2248
2249 if (event_add(&one.ev, &tv) == -1)
2250 exit(1);
2251 if (event_add(&two.ev, &tv) == -1)
2252 exit(1);
2253
2254 event_dispatch();
2255
2256 event_del(&one.ev);
2257 event_del(&two.ev);
2258
2259 if (npriorities == 1) {
2260 if (one.count == 3 && two.count == 3)
2261 test_ok = 1;
2262 } else if (npriorities == 2) {
2263 /* Two is called once because event_loopexit is priority 1 */
2264 if (one.count == 3 && two.count == 1)
2265 test_ok = 1;
2266 } else {
2267 if (one.count == 3 && two.count == 0)
2268 test_ok = 1;
2269 }
2270 }
2271
2272 static void
test_priorities(void)2273 test_priorities(void)
2274 {
2275 test_priorities_impl(1);
2276 if (test_ok)
2277 test_priorities_impl(2);
2278 if (test_ok)
2279 test_priorities_impl(3);
2280 }
2281
2282 /* priority-active-inversion: activate a higher-priority event, and make sure
2283 * it keeps us from running a lower-priority event first. */
2284 static int n_pai_calls = 0;
2285 static struct event pai_events[3];
2286
2287 static void
prio_active_inversion_cb(evutil_socket_t fd,short what,void * arg)2288 prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg)
2289 {
2290 int *call_order = arg;
2291 *call_order = n_pai_calls++;
2292 if (n_pai_calls == 1) {
2293 /* This should activate later, even though it shares a
2294 priority with us. */
2295 event_active(&pai_events[1], EV_READ, 1);
2296 /* This should activate next, since its priority is higher,
2297 even though we activated it second. */
2298 event_active(&pai_events[2], EV_TIMEOUT, 1);
2299 }
2300 }
2301
2302 static void
test_priority_active_inversion(void * data_)2303 test_priority_active_inversion(void *data_)
2304 {
2305 struct basic_test_data *data = data_;
2306 struct event_base *base = data->base;
2307 int call_order[3];
2308 int i;
2309 tt_int_op(event_base_priority_init(base, 8), ==, 0);
2310
2311 n_pai_calls = 0;
2312 memset(call_order, 0, sizeof(call_order));
2313
2314 for (i=0;i<3;++i) {
2315 event_assign(&pai_events[i], data->base, -1, 0,
2316 prio_active_inversion_cb, &call_order[i]);
2317 }
2318
2319 event_priority_set(&pai_events[0], 4);
2320 event_priority_set(&pai_events[1], 4);
2321 event_priority_set(&pai_events[2], 0);
2322
2323 event_active(&pai_events[0], EV_WRITE, 1);
2324
2325 event_base_dispatch(base);
2326 tt_int_op(n_pai_calls, ==, 3);
2327 tt_int_op(call_order[0], ==, 0);
2328 tt_int_op(call_order[1], ==, 2);
2329 tt_int_op(call_order[2], ==, 1);
2330 end:
2331 ;
2332 }
2333
2334
2335 static void
test_multiple_cb(evutil_socket_t fd,short event,void * arg)2336 test_multiple_cb(evutil_socket_t fd, short event, void *arg)
2337 {
2338 if (event & EV_READ)
2339 test_ok |= 1;
2340 else if (event & EV_WRITE)
2341 test_ok |= 2;
2342 }
2343
2344 static void
test_multiple_events_for_same_fd(void)2345 test_multiple_events_for_same_fd(void)
2346 {
2347 struct event e1, e2;
2348
2349 setup_test("Multiple events for same fd: ");
2350
2351 event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
2352 event_add(&e1, NULL);
2353 event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
2354 event_add(&e2, NULL);
2355 event_loop(EVLOOP_ONCE);
2356 event_del(&e2);
2357
2358 if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) {
2359 tt_fail_perror("write");
2360 }
2361
2362 event_loop(EVLOOP_ONCE);
2363 event_del(&e1);
2364
2365 if (test_ok != 3)
2366 test_ok = 0;
2367
2368 cleanup_test();
2369 }
2370
2371 int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2372 int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
2373 int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number);
2374 int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2375
2376 static void
read_once_cb(evutil_socket_t fd,short event,void * arg)2377 read_once_cb(evutil_socket_t fd, short event, void *arg)
2378 {
2379 char buf[256];
2380 int len;
2381
2382 len = read(fd, buf, sizeof(buf));
2383
2384 if (called) {
2385 test_ok = 0;
2386 } else if (len) {
2387 /* Assumes global pair[0] can be used for writing */
2388 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2389 tt_fail_perror("write");
2390 test_ok = 0;
2391 } else {
2392 test_ok = 1;
2393 }
2394 }
2395
2396 called++;
2397 }
2398
2399 static void
test_want_only_once(void)2400 test_want_only_once(void)
2401 {
2402 struct event ev;
2403 struct timeval tv;
2404
2405 /* Very simple read test */
2406 setup_test("Want read only once: ");
2407
2408 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2409 tt_fail_perror("write");
2410 }
2411
2412 /* Setup the loop termination */
2413 evutil_timerclear(&tv);
2414 tv.tv_usec = 300*1000;
2415 event_loopexit(&tv);
2416
2417 event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
2418 if (event_add(&ev, NULL) == -1)
2419 exit(1);
2420 event_dispatch();
2421
2422 cleanup_test();
2423 }
2424
2425 #define TEST_MAX_INT 6
2426
2427 static void
evtag_int_test(void * ptr)2428 evtag_int_test(void *ptr)
2429 {
2430 struct evbuffer *tmp = evbuffer_new();
2431 ev_uint32_t integers[TEST_MAX_INT] = {
2432 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2433 };
2434 ev_uint32_t integer;
2435 ev_uint64_t big_int;
2436 int i;
2437
2438 evtag_init();
2439
2440 for (i = 0; i < TEST_MAX_INT; i++) {
2441 int oldlen, newlen;
2442 oldlen = (int)EVBUFFER_LENGTH(tmp);
2443 evtag_encode_int(tmp, integers[i]);
2444 newlen = (int)EVBUFFER_LENGTH(tmp);
2445 TT_BLATHER(("encoded 0x%08x with %d bytes",
2446 (unsigned)integers[i], newlen - oldlen));
2447 big_int = integers[i];
2448 big_int *= 1000000000; /* 1 billion */
2449 evtag_encode_int64(tmp, big_int);
2450 }
2451
2452 for (i = 0; i < TEST_MAX_INT; i++) {
2453 tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
2454 tt_uint_op(integer, ==, integers[i]);
2455 tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1);
2456 tt_assert((big_int / 1000000000) == integers[i]);
2457 }
2458
2459 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2460 end:
2461 evbuffer_free(tmp);
2462 }
2463
2464 static void
evtag_fuzz(void * ptr)2465 evtag_fuzz(void *ptr)
2466 {
2467 unsigned char buffer[4096];
2468 struct evbuffer *tmp = evbuffer_new();
2469 struct timeval tv;
2470 int i, j;
2471
2472 int not_failed = 0;
2473
2474 evtag_init();
2475
2476 for (j = 0; j < 100; j++) {
2477 for (i = 0; i < (int)sizeof(buffer); i++)
2478 buffer[i] = test_weakrand();
2479 evbuffer_drain(tmp, -1);
2480 evbuffer_add(tmp, buffer, sizeof(buffer));
2481
2482 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
2483 not_failed++;
2484 }
2485
2486 /* The majority of decodes should fail */
2487 tt_int_op(not_failed, <, 10);
2488
2489 /* Now insert some corruption into the tag length field */
2490 evbuffer_drain(tmp, -1);
2491 evutil_timerclear(&tv);
2492 tv.tv_sec = 1;
2493 evtag_marshal_timeval(tmp, 0, &tv);
2494 evbuffer_add(tmp, buffer, sizeof(buffer));
2495
2496 ((char *)EVBUFFER_DATA(tmp))[1] = '\xff';
2497 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
2498 tt_abort_msg("evtag_unmarshal_timeval should have failed");
2499 }
2500
2501 end:
2502 evbuffer_free(tmp);
2503 }
2504
2505 static void
evtag_tag_encoding(void * ptr)2506 evtag_tag_encoding(void *ptr)
2507 {
2508 struct evbuffer *tmp = evbuffer_new();
2509 ev_uint32_t integers[TEST_MAX_INT] = {
2510 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2511 };
2512 ev_uint32_t integer;
2513 int i;
2514
2515 evtag_init();
2516
2517 for (i = 0; i < TEST_MAX_INT; i++) {
2518 int oldlen, newlen;
2519 oldlen = (int)EVBUFFER_LENGTH(tmp);
2520 evtag_encode_tag(tmp, integers[i]);
2521 newlen = (int)EVBUFFER_LENGTH(tmp);
2522 TT_BLATHER(("encoded 0x%08x with %d bytes",
2523 (unsigned)integers[i], newlen - oldlen));
2524 }
2525
2526 for (i = 0; i < TEST_MAX_INT; i++) {
2527 tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
2528 tt_uint_op(integer, ==, integers[i]);
2529 }
2530
2531 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2532
2533 end:
2534 evbuffer_free(tmp);
2535 }
2536
2537 static void
evtag_test_peek(void * ptr)2538 evtag_test_peek(void *ptr)
2539 {
2540 struct evbuffer *tmp = evbuffer_new();
2541 ev_uint32_t u32;
2542
2543 evtag_marshal_int(tmp, 30, 0);
2544 evtag_marshal_string(tmp, 40, "Hello world");
2545
2546 tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2547 tt_int_op(u32, ==, 30);
2548 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2549 tt_int_op(u32, ==, 1+1+1);
2550 tt_int_op(evtag_consume(tmp), ==, 0);
2551
2552 tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2553 tt_int_op(u32, ==, 40);
2554 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2555 tt_int_op(u32, ==, 1+1+11);
2556 tt_int_op(evtag_payload_length(tmp, &u32), ==, 0);
2557 tt_int_op(u32, ==, 11);
2558
2559 end:
2560 evbuffer_free(tmp);
2561 }
2562
2563
2564 static void
test_methods(void * ptr)2565 test_methods(void *ptr)
2566 {
2567 const char **methods = event_get_supported_methods();
2568 struct event_config *cfg = NULL;
2569 struct event_base *base = NULL;
2570 const char *backend;
2571 int n_methods = 0;
2572
2573 tt_assert(methods);
2574
2575 backend = methods[0];
2576 while (*methods != NULL) {
2577 TT_BLATHER(("Support method: %s", *methods));
2578 ++methods;
2579 ++n_methods;
2580 }
2581
2582 cfg = event_config_new();
2583 assert(cfg != NULL);
2584
2585 tt_int_op(event_config_avoid_method(cfg, backend), ==, 0);
2586 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2587
2588 base = event_base_new_with_config(cfg);
2589 if (n_methods > 1) {
2590 tt_assert(base);
2591 tt_str_op(backend, !=, event_base_get_method(base));
2592 } else {
2593 tt_assert(base == NULL);
2594 }
2595
2596 end:
2597 if (base)
2598 event_base_free(base);
2599 if (cfg)
2600 event_config_free(cfg);
2601 }
2602
2603 static void
test_version(void * arg)2604 test_version(void *arg)
2605 {
2606 const char *vstr;
2607 ev_uint32_t vint;
2608 int major, minor, patch, n;
2609
2610 vstr = event_get_version();
2611 vint = event_get_version_number();
2612
2613 tt_assert(vstr);
2614 tt_assert(vint);
2615
2616 tt_str_op(vstr, ==, LIBEVENT_VERSION);
2617 tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER);
2618
2619 n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch);
2620 tt_assert(3 == n);
2621 tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8)));
2622 end:
2623 ;
2624 }
2625
2626 static void
test_base_features(void * arg)2627 test_base_features(void *arg)
2628 {
2629 struct event_base *base = NULL;
2630 struct event_config *cfg = NULL;
2631
2632 cfg = event_config_new();
2633
2634 tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET));
2635
2636 base = event_base_new_with_config(cfg);
2637 if (base) {
2638 tt_int_op(EV_FEATURE_ET, ==,
2639 event_base_get_features(base) & EV_FEATURE_ET);
2640 } else {
2641 base = event_base_new();
2642 tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET);
2643 }
2644
2645 end:
2646 if (base)
2647 event_base_free(base);
2648 if (cfg)
2649 event_config_free(cfg);
2650 }
2651
2652 #ifdef EVENT__HAVE_SETENV
2653 #define SETENV_OK
2654 #elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV)
setenv(const char * k,const char * v,int o_)2655 static void setenv(const char *k, const char *v, int o_)
2656 {
2657 char b[256];
2658 evutil_snprintf(b, sizeof(b), "%s=%s",k,v);
2659 putenv(b);
2660 }
2661 #define SETENV_OK
2662 #endif
2663
2664 #ifdef EVENT__HAVE_UNSETENV
2665 #define UNSETENV_OK
2666 #elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV)
unsetenv(const char * k)2667 static void unsetenv(const char *k)
2668 {
2669 char b[256];
2670 evutil_snprintf(b, sizeof(b), "%s=",k);
2671 putenv(b);
2672 }
2673 #define UNSETENV_OK
2674 #endif
2675
2676 #if defined(SETENV_OK) && defined(UNSETENV_OK)
2677 static void
methodname_to_envvar(const char * mname,char * buf,size_t buflen)2678 methodname_to_envvar(const char *mname, char *buf, size_t buflen)
2679 {
2680 char *cp;
2681 evutil_snprintf(buf, buflen, "EVENT_NO%s", mname);
2682 for (cp = buf; *cp; ++cp) {
2683 *cp = EVUTIL_TOUPPER_(*cp);
2684 }
2685 }
2686 #endif
2687
2688 static void
test_base_environ(void * arg)2689 test_base_environ(void *arg)
2690 {
2691 struct event_base *base = NULL;
2692 struct event_config *cfg = NULL;
2693
2694 #if defined(SETENV_OK) && defined(UNSETENV_OK)
2695 const char **basenames;
2696 int i, n_methods=0;
2697 char varbuf[128];
2698 const char *defaultname, *ignoreenvname;
2699
2700 /* See if unsetenv works before we rely on it. */
2701 setenv("EVENT_NOWAFFLES", "1", 1);
2702 unsetenv("EVENT_NOWAFFLES");
2703 if (getenv("EVENT_NOWAFFLES") != NULL) {
2704 #ifndef EVENT__HAVE_UNSETENV
2705 TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test"));
2706 #else
2707 TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test"));
2708 #endif
2709 tt_skip();
2710 }
2711
2712 basenames = event_get_supported_methods();
2713 for (i = 0; basenames[i]; ++i) {
2714 methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf));
2715 unsetenv(varbuf);
2716 ++n_methods;
2717 }
2718
2719 base = event_base_new();
2720 tt_assert(base);
2721
2722 defaultname = event_base_get_method(base);
2723 TT_BLATHER(("default is <%s>", defaultname));
2724 event_base_free(base);
2725 base = NULL;
2726
2727 /* Can we disable the method with EVENT_NOfoo ? */
2728 if (!strcmp(defaultname, "epoll (with changelist)")) {
2729 setenv("EVENT_NOEPOLL", "1", 1);
2730 ignoreenvname = "epoll";
2731 } else {
2732 methodname_to_envvar(defaultname, varbuf, sizeof(varbuf));
2733 setenv(varbuf, "1", 1);
2734 ignoreenvname = defaultname;
2735 }
2736
2737 /* Use an empty cfg rather than NULL so a failure doesn't exit() */
2738 cfg = event_config_new();
2739 base = event_base_new_with_config(cfg);
2740 event_config_free(cfg);
2741 cfg = NULL;
2742 if (n_methods == 1) {
2743 tt_assert(!base);
2744 } else {
2745 tt_assert(base);
2746 tt_str_op(defaultname, !=, event_base_get_method(base));
2747 event_base_free(base);
2748 base = NULL;
2749 }
2750
2751 /* Can we disable looking at the environment with IGNORE_ENV ? */
2752 cfg = event_config_new();
2753 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2754 base = event_base_new_with_config(cfg);
2755 tt_assert(base);
2756 tt_str_op(ignoreenvname, ==, event_base_get_method(base));
2757 #else
2758 tt_skip();
2759 #endif
2760
2761 end:
2762 if (base)
2763 event_base_free(base);
2764 if (cfg)
2765 event_config_free(cfg);
2766 }
2767
2768 static void
read_called_once_cb(evutil_socket_t fd,short event,void * arg)2769 read_called_once_cb(evutil_socket_t fd, short event, void *arg)
2770 {
2771 tt_int_op(event, ==, EV_READ);
2772 called += 1;
2773 end:
2774 ;
2775 }
2776
2777 static void
timeout_called_once_cb(evutil_socket_t fd,short event,void * arg)2778 timeout_called_once_cb(evutil_socket_t fd, short event, void *arg)
2779 {
2780 tt_int_op(event, ==, EV_TIMEOUT);
2781 called += 100;
2782 end:
2783 ;
2784 }
2785
2786 static void
immediate_called_twice_cb(evutil_socket_t fd,short event,void * arg)2787 immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg)
2788 {
2789 tt_int_op(event, ==, EV_TIMEOUT);
2790 called += 1000;
2791 end:
2792 ;
2793 }
2794
2795 static void
test_event_once(void * ptr)2796 test_event_once(void *ptr)
2797 {
2798 struct basic_test_data *data = ptr;
2799 struct timeval tv;
2800 int r;
2801
2802 tv.tv_sec = 0;
2803 tv.tv_usec = 50*1000;
2804 called = 0;
2805 r = event_base_once(data->base, data->pair[0], EV_READ,
2806 read_called_once_cb, NULL, NULL);
2807 tt_int_op(r, ==, 0);
2808 r = event_base_once(data->base, -1, EV_TIMEOUT,
2809 timeout_called_once_cb, NULL, &tv);
2810 tt_int_op(r, ==, 0);
2811 r = event_base_once(data->base, -1, 0, NULL, NULL, NULL);
2812 tt_int_op(r, <, 0);
2813 r = event_base_once(data->base, -1, EV_TIMEOUT,
2814 immediate_called_twice_cb, NULL, NULL);
2815 tt_int_op(r, ==, 0);
2816 tv.tv_sec = 0;
2817 tv.tv_usec = 0;
2818 r = event_base_once(data->base, -1, EV_TIMEOUT,
2819 immediate_called_twice_cb, NULL, &tv);
2820 tt_int_op(r, ==, 0);
2821
2822 if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) {
2823 tt_fail_perror("write");
2824 }
2825
2826 shutdown(data->pair[1], EVUTIL_SHUT_WR);
2827
2828 event_base_dispatch(data->base);
2829
2830 tt_int_op(called, ==, 2101);
2831 end:
2832 ;
2833 }
2834
2835 static void
test_event_once_never(void * ptr)2836 test_event_once_never(void *ptr)
2837 {
2838 struct basic_test_data *data = ptr;
2839 struct timeval tv;
2840
2841 /* Have one trigger in 10 seconds (don't worry, because) */
2842 tv.tv_sec = 10;
2843 tv.tv_usec = 0;
2844 called = 0;
2845 event_base_once(data->base, -1, EV_TIMEOUT,
2846 timeout_called_once_cb, NULL, &tv);
2847
2848 /* But shut down the base in 75 msec. */
2849 tv.tv_sec = 0;
2850 tv.tv_usec = 75*1000;
2851 event_base_loopexit(data->base, &tv);
2852
2853 event_base_dispatch(data->base);
2854
2855 tt_int_op(called, ==, 0);
2856 end:
2857 ;
2858 }
2859
2860 static void
test_event_pending(void * ptr)2861 test_event_pending(void *ptr)
2862 {
2863 struct basic_test_data *data = ptr;
2864 struct event *r=NULL, *w=NULL, *t=NULL;
2865 struct timeval tv, now, tv2;
2866
2867 tv.tv_sec = 0;
2868 tv.tv_usec = 500 * 1000;
2869 r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb,
2870 NULL);
2871 w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb,
2872 NULL);
2873 t = evtimer_new(data->base, timeout_cb, NULL);
2874
2875 tt_assert(r);
2876 tt_assert(w);
2877 tt_assert(t);
2878
2879 evutil_gettimeofday(&now, NULL);
2880 event_add(r, NULL);
2881 event_add(t, &tv);
2882
2883 tt_assert( event_pending(r, EV_READ, NULL));
2884 tt_assert(!event_pending(w, EV_WRITE, NULL));
2885 tt_assert(!event_pending(r, EV_WRITE, NULL));
2886 tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL));
2887 tt_assert(!event_pending(r, EV_TIMEOUT, NULL));
2888 tt_assert( event_pending(t, EV_TIMEOUT, NULL));
2889 tt_assert( event_pending(t, EV_TIMEOUT, &tv2));
2890
2891 tt_assert(evutil_timercmp(&tv2, &now, >));
2892
2893 test_timeval_diff_eq(&now, &tv2, 500);
2894
2895 end:
2896 if (r) {
2897 event_del(r);
2898 event_free(r);
2899 }
2900 if (w) {
2901 event_del(w);
2902 event_free(w);
2903 }
2904 if (t) {
2905 event_del(t);
2906 event_free(t);
2907 }
2908 }
2909
2910 static void
dfd_cb(evutil_socket_t fd,short e,void * data)2911 dfd_cb(evutil_socket_t fd, short e, void *data)
2912 {
2913 *(int*)data = (int)e;
2914 }
2915
2916 static void
test_event_closed_fd_poll(void * arg)2917 test_event_closed_fd_poll(void *arg)
2918 {
2919 struct timeval tv;
2920 struct event *e;
2921 struct basic_test_data *data = (struct basic_test_data *)arg;
2922 int i = 0;
2923
2924 if (strcmp(event_base_get_method(data->base), "poll")) {
2925 tinytest_set_test_skipped_();
2926 return;
2927 }
2928
2929 e = event_new(data->base, data->pair[0], EV_READ, dfd_cb, &i);
2930 tt_assert(e);
2931
2932 tv.tv_sec = 0;
2933 tv.tv_usec = 500 * 1000;
2934 event_add(e, &tv);
2935 tt_assert(event_pending(e, EV_READ, NULL));
2936 close(data->pair[0]);
2937 data->pair[0] = -1; /** avoids double-close */
2938 event_base_loop(data->base, EVLOOP_ONCE);
2939 tt_int_op(i, ==, EV_READ);
2940
2941 end:
2942 if (e) {
2943 event_del(e);
2944 event_free(e);
2945 }
2946 }
2947
2948 #ifndef _WIN32
2949 /* You can't do this test on windows, since dup2 doesn't work on sockets */
2950
2951 /* Regression test for our workaround for a fun epoll/linux related bug
2952 * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2)
2953 * will get you an EEXIST */
2954 static void
test_dup_fd(void * arg)2955 test_dup_fd(void *arg)
2956 {
2957 struct basic_test_data *data = arg;
2958 struct event_base *base = data->base;
2959 struct event *ev1=NULL, *ev2=NULL;
2960 int fd, dfd=-1;
2961 int ev1_got, ev2_got;
2962
2963 tt_int_op(write(data->pair[0], "Hello world",
2964 strlen("Hello world")), >, 0);
2965 fd = data->pair[1];
2966
2967 dfd = dup(fd);
2968 tt_int_op(dfd, >=, 0);
2969
2970 ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got);
2971 ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got);
2972 ev1_got = ev2_got = 0;
2973 event_add(ev1, NULL);
2974 event_add(ev2, NULL);
2975 event_base_loop(base, EVLOOP_ONCE);
2976 tt_int_op(ev1_got, ==, EV_READ);
2977 tt_int_op(ev2_got, ==, EV_READ);
2978
2979 /* Now close and delete dfd then dispatch. We need to do the
2980 * dispatch here so that when we add it later, we think there
2981 * was an intermediate delete. */
2982 close(dfd);
2983 event_del(ev2);
2984 ev1_got = ev2_got = 0;
2985 event_base_loop(base, EVLOOP_ONCE);
2986 tt_want_int_op(ev1_got, ==, EV_READ);
2987 tt_int_op(ev2_got, ==, 0);
2988
2989 /* Re-duplicate the fd. We need to get the same duplicated
2990 * value that we closed to provoke the epoll quirk. Also, we
2991 * need to change the events to write, or else the old lingering
2992 * read event will make the test pass whether the change was
2993 * successful or not. */
2994 tt_int_op(dup2(fd, dfd), ==, dfd);
2995 event_free(ev2);
2996 ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got);
2997 event_add(ev2, NULL);
2998 ev1_got = ev2_got = 0;
2999 event_base_loop(base, EVLOOP_ONCE);
3000 tt_want_int_op(ev1_got, ==, EV_READ);
3001 tt_int_op(ev2_got, ==, EV_WRITE);
3002
3003 end:
3004 if (ev1)
3005 event_free(ev1);
3006 if (ev2)
3007 event_free(ev2);
3008 if (dfd >= 0)
3009 close(dfd);
3010 }
3011 #endif
3012
3013 #ifdef EVENT__DISABLE_MM_REPLACEMENT
3014 static void
test_mm_functions(void * arg)3015 test_mm_functions(void *arg)
3016 {
3017 tinytest_set_test_skipped_();
3018 }
3019 #else
3020 static int
check_dummy_mem_ok(void * mem_)3021 check_dummy_mem_ok(void *mem_)
3022 {
3023 char *mem = mem_;
3024 mem -= 16;
3025 return !memcmp(mem, "{[<guardedram>]}", 16);
3026 }
3027
3028 static void *
dummy_malloc(size_t len)3029 dummy_malloc(size_t len)
3030 {
3031 char *mem = malloc(len+16);
3032 memcpy(mem, "{[<guardedram>]}", 16);
3033 return mem+16;
3034 }
3035
3036 static void *
dummy_realloc(void * mem_,size_t len)3037 dummy_realloc(void *mem_, size_t len)
3038 {
3039 char *mem = mem_;
3040 if (!mem)
3041 return dummy_malloc(len);
3042 tt_want(check_dummy_mem_ok(mem_));
3043 mem -= 16;
3044 mem = realloc(mem, len+16);
3045 return mem+16;
3046 }
3047
3048 static void
dummy_free(void * mem_)3049 dummy_free(void *mem_)
3050 {
3051 char *mem = mem_;
3052 tt_want(check_dummy_mem_ok(mem_));
3053 mem -= 16;
3054 free(mem);
3055 }
3056
3057 static void
test_mm_functions(void * arg)3058 test_mm_functions(void *arg)
3059 {
3060 struct event_base *b = NULL;
3061 struct event_config *cfg = NULL;
3062 event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free);
3063 cfg = event_config_new();
3064 event_config_avoid_method(cfg, "Nonesuch");
3065 b = event_base_new_with_config(cfg);
3066 tt_assert(b);
3067 tt_assert(check_dummy_mem_ok(b));
3068 end:
3069 if (cfg)
3070 event_config_free(cfg);
3071 if (b)
3072 event_base_free(b);
3073 }
3074 #endif
3075
3076 static void
many_event_cb(evutil_socket_t fd,short event,void * arg)3077 many_event_cb(evutil_socket_t fd, short event, void *arg)
3078 {
3079 int *calledp = arg;
3080 *calledp += 1;
3081 }
3082
3083 static void
test_many_events(void * arg)3084 test_many_events(void *arg)
3085 {
3086 /* Try 70 events that should all be ready at once. This will
3087 * exercise the "resize" code on most of the backends, and will make
3088 * sure that we can get past the 64-handle limit of some windows
3089 * functions. */
3090 #define MANY 70
3091
3092 struct basic_test_data *data = arg;
3093 struct event_base *base = data->base;
3094 int one_at_a_time = data->setup_data != NULL;
3095 evutil_socket_t sock[MANY];
3096 struct event *ev[MANY];
3097 int called[MANY];
3098 int i;
3099 int loopflags = EVLOOP_NONBLOCK, evflags=0;
3100 if (one_at_a_time) {
3101 loopflags |= EVLOOP_ONCE;
3102 evflags = EV_PERSIST;
3103 }
3104
3105 memset(sock, 0xff, sizeof(sock));
3106 memset(ev, 0, sizeof(ev));
3107 memset(called, 0, sizeof(called));
3108
3109 for (i = 0; i < MANY; ++i) {
3110 /* We need an event that will hit the backend, and that will
3111 * be ready immediately. "Send a datagram" is an easy
3112 * instance of that. */
3113 sock[i] = socket(AF_INET, SOCK_DGRAM, 0);
3114 tt_assert(sock[i] >= 0);
3115 tt_assert(!evutil_make_socket_nonblocking(sock[i]));
3116 called[i] = 0;
3117 ev[i] = event_new(base, sock[i], EV_WRITE|evflags,
3118 many_event_cb, &called[i]);
3119 event_add(ev[i], NULL);
3120 if (one_at_a_time)
3121 event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE);
3122 }
3123
3124 event_base_loop(base, loopflags);
3125
3126 for (i = 0; i < MANY; ++i) {
3127 if (one_at_a_time)
3128 tt_int_op(called[i], ==, MANY - i + 1);
3129 else
3130 tt_int_op(called[i], ==, 1);
3131 }
3132
3133 end:
3134 for (i = 0; i < MANY; ++i) {
3135 if (ev[i])
3136 event_free(ev[i]);
3137 if (sock[i] >= 0)
3138 evutil_closesocket(sock[i]);
3139 }
3140 #undef MANY
3141 }
3142
3143 static void
test_struct_event_size(void * arg)3144 test_struct_event_size(void *arg)
3145 {
3146 tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event));
3147 end:
3148 ;
3149 }
3150
3151 static void
test_get_assignment(void * arg)3152 test_get_assignment(void *arg)
3153 {
3154 struct basic_test_data *data = arg;
3155 struct event_base *base = data->base;
3156 struct event *ev1 = NULL;
3157 const char *str = "foo";
3158
3159 struct event_base *b;
3160 evutil_socket_t s;
3161 short what;
3162 event_callback_fn cb;
3163 void *cb_arg;
3164
3165 ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str);
3166 event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg);
3167
3168 tt_ptr_op(b, ==, base);
3169 tt_fd_op(s, ==, data->pair[1]);
3170 tt_int_op(what, ==, EV_READ);
3171 tt_ptr_op(cb, ==, dummy_read_cb);
3172 tt_ptr_op(cb_arg, ==, str);
3173
3174 /* Now make sure this doesn't crash. */
3175 event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL);
3176
3177 end:
3178 if (ev1)
3179 event_free(ev1);
3180 }
3181
3182 struct foreach_helper {
3183 int count;
3184 const struct event *ev;
3185 };
3186
3187 static int
foreach_count_cb(const struct event_base * base,const struct event * ev,void * arg)3188 foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg)
3189 {
3190 struct foreach_helper *h = event_get_callback_arg(ev);
3191 struct timeval *tv = arg;
3192 if (event_get_callback(ev) != timeout_cb)
3193 return 0;
3194 tt_ptr_op(event_get_base(ev), ==, base);
3195 tt_int_op(tv->tv_sec, ==, 10);
3196 h->ev = ev;
3197 h->count++;
3198 return 0;
3199 end:
3200 return -1;
3201 }
3202
3203 static int
foreach_find_cb(const struct event_base * base,const struct event * ev,void * arg)3204 foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg)
3205 {
3206 const struct event **ev_out = arg;
3207 struct foreach_helper *h = event_get_callback_arg(ev);
3208 if (event_get_callback(ev) != timeout_cb)
3209 return 0;
3210 if (h->count == 99) {
3211 *ev_out = ev;
3212 return 101;
3213 }
3214 return 0;
3215 }
3216
3217 static void
test_event_foreach(void * arg)3218 test_event_foreach(void *arg)
3219 {
3220 struct basic_test_data *data = arg;
3221 struct event_base *base = data->base;
3222 struct event *ev[5];
3223 struct foreach_helper visited[5];
3224 int i;
3225 struct timeval ten_sec = {10,0};
3226 const struct event *ev_found = NULL;
3227
3228 for (i = 0; i < 5; ++i) {
3229 visited[i].count = 0;
3230 visited[i].ev = NULL;
3231 ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]);
3232 }
3233
3234 tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL));
3235 tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL));
3236
3237 event_add(ev[0], &ten_sec);
3238 event_add(ev[1], &ten_sec);
3239 event_active(ev[1], EV_TIMEOUT, 1);
3240 event_active(ev[2], EV_TIMEOUT, 1);
3241 event_add(ev[3], &ten_sec);
3242 /* Don't touch ev[4]. */
3243
3244 tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb,
3245 &ten_sec));
3246 tt_int_op(1, ==, visited[0].count);
3247 tt_int_op(1, ==, visited[1].count);
3248 tt_int_op(1, ==, visited[2].count);
3249 tt_int_op(1, ==, visited[3].count);
3250 tt_ptr_op(ev[0], ==, visited[0].ev);
3251 tt_ptr_op(ev[1], ==, visited[1].ev);
3252 tt_ptr_op(ev[2], ==, visited[2].ev);
3253 tt_ptr_op(ev[3], ==, visited[3].ev);
3254
3255 visited[2].count = 99;
3256 tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb,
3257 &ev_found));
3258 tt_ptr_op(ev_found, ==, ev[2]);
3259
3260 end:
3261 for (i=0; i<5; ++i) {
3262 event_free(ev[i]);
3263 }
3264 }
3265
3266 static struct event_base *cached_time_base = NULL;
3267 static int cached_time_reset = 0;
3268 static int cached_time_sleep = 0;
3269 static void
cache_time_cb(evutil_socket_t fd,short what,void * arg)3270 cache_time_cb(evutil_socket_t fd, short what, void *arg)
3271 {
3272 struct timeval *tv = arg;
3273 tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv));
3274 if (cached_time_sleep) {
3275 struct timeval delay = { 0, 30*1000 };
3276 evutil_usleep_(&delay);
3277 }
3278 if (cached_time_reset) {
3279 event_base_update_cache_time(cached_time_base);
3280 }
3281 end:
3282 ;
3283 }
3284
3285 static void
test_gettimeofday_cached(void * arg)3286 test_gettimeofday_cached(void *arg)
3287 {
3288 struct basic_test_data *data = arg;
3289 struct event_config *cfg = NULL;
3290 struct event_base *base = NULL;
3291 struct timeval tv1, tv2, tv3, now;
3292 struct event *ev1=NULL, *ev2=NULL, *ev3=NULL;
3293 int cached_time_disable = strstr(data->setup_data, "disable") != NULL;
3294
3295 cfg = event_config_new();
3296 if (cached_time_disable) {
3297 event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME);
3298 }
3299 cached_time_base = base = event_base_new_with_config(cfg);
3300 tt_assert(base);
3301
3302 /* Try gettimeofday_cached outside of an event loop. */
3303 evutil_gettimeofday(&now, NULL);
3304 tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1));
3305 tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2));
3306 tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10);
3307 tt_int_op(timeval_msec_diff(&tv1, &now), <, 10);
3308
3309 cached_time_reset = strstr(data->setup_data, "reset") != NULL;
3310 cached_time_sleep = strstr(data->setup_data, "sleep") != NULL;
3311
3312 ev1 = event_new(base, -1, 0, cache_time_cb, &tv1);
3313 ev2 = event_new(base, -1, 0, cache_time_cb, &tv2);
3314 ev3 = event_new(base, -1, 0, cache_time_cb, &tv3);
3315
3316 event_active(ev1, EV_TIMEOUT, 1);
3317 event_active(ev2, EV_TIMEOUT, 1);
3318 event_active(ev3, EV_TIMEOUT, 1);
3319
3320 event_base_dispatch(base);
3321
3322 if (cached_time_reset && cached_time_sleep) {
3323 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3324 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3325 } else if (cached_time_disable && cached_time_sleep) {
3326 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3327 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3328 } else if (! cached_time_disable) {
3329 tt_assert(evutil_timercmp(&tv1, &tv2, ==));
3330 tt_assert(evutil_timercmp(&tv2, &tv3, ==));
3331 }
3332
3333 end:
3334 if (ev1)
3335 event_free(ev1);
3336 if (ev2)
3337 event_free(ev2);
3338 if (ev3)
3339 event_free(ev3);
3340 if (base)
3341 event_base_free(base);
3342 if (cfg)
3343 event_config_free(cfg);
3344 }
3345
3346 static void
tabf_cb(evutil_socket_t fd,short what,void * arg)3347 tabf_cb(evutil_socket_t fd, short what, void *arg)
3348 {
3349 int *ptr = arg;
3350 *ptr = what;
3351 *ptr += 0x10000;
3352 }
3353
3354 static void
test_evmap_invalid_slots(void * arg)3355 test_evmap_invalid_slots(void *arg)
3356 {
3357 struct basic_test_data *data = arg;
3358 struct event_base *base = data->base;
3359 struct event *ev1 = NULL, *ev2 = NULL;
3360 int e1, e2;
3361 #ifndef _WIN32
3362 struct event *ev3 = NULL, *ev4 = NULL;
3363 int e3, e4;
3364 #endif
3365
3366 ev1 = evsignal_new(base, -1, dummy_read_cb, (void *)base);
3367 ev2 = evsignal_new(base, NSIG, dummy_read_cb, (void *)base);
3368 tt_assert(ev1);
3369 tt_assert(ev2);
3370 e1 = event_add(ev1, NULL);
3371 e2 = event_add(ev2, NULL);
3372 tt_int_op(e1, !=, 0);
3373 tt_int_op(e2, !=, 0);
3374 #ifndef _WIN32
3375 ev3 = event_new(base, INT_MAX, EV_READ, dummy_read_cb, (void *)base);
3376 ev4 = event_new(base, INT_MAX / 2, EV_READ, dummy_read_cb, (void *)base);
3377 tt_assert(ev3);
3378 tt_assert(ev4);
3379 e3 = event_add(ev3, NULL);
3380 e4 = event_add(ev4, NULL);
3381 tt_int_op(e3, !=, 0);
3382 tt_int_op(e4, !=, 0);
3383 #endif
3384
3385 end:
3386 event_free(ev1);
3387 event_free(ev2);
3388 #ifndef _WIN32
3389 event_free(ev3);
3390 event_free(ev4);
3391 #endif
3392 }
3393
3394 static void
test_active_by_fd(void * arg)3395 test_active_by_fd(void *arg)
3396 {
3397 struct basic_test_data *data = arg;
3398 struct event_base *base = data->base;
3399 struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL;
3400 int e1,e2,e3,e4;
3401 #ifndef _WIN32
3402 struct event *evsig = NULL;
3403 int es;
3404 #endif
3405 struct timeval tenmin = { 600, 0 };
3406
3407 /* Ensure no crash on nonexistent FD. */
3408 event_base_active_by_fd(base, 1000, EV_READ);
3409
3410 /* Ensure no crash on bogus FD. */
3411 event_base_active_by_fd(base, -1, EV_READ);
3412
3413 /* Ensure no crash on nonexistent/bogus signal. */
3414 event_base_active_by_signal(base, 1000);
3415 event_base_active_by_signal(base, -1);
3416
3417 event_base_assert_ok_(base);
3418
3419 e1 = e2 = e3 = e4 = 0;
3420 ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1);
3421 ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2);
3422 ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3);
3423 ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4);
3424 tt_assert(ev1);
3425 tt_assert(ev2);
3426 tt_assert(ev3);
3427 tt_assert(ev4);
3428 #ifndef _WIN32
3429 evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es);
3430 tt_assert(evsig);
3431 event_add(evsig, &tenmin);
3432 #endif
3433
3434 event_add(ev1, &tenmin);
3435 event_add(ev2, NULL);
3436 event_add(ev3, NULL);
3437 event_add(ev4, &tenmin);
3438
3439
3440 event_base_assert_ok_(base);
3441
3442 /* Trigger 2, 3, 4 */
3443 event_base_active_by_fd(base, data->pair[0], EV_WRITE);
3444 event_base_active_by_fd(base, data->pair[1], EV_READ);
3445 event_base_active_by_fd(base, data->pair[1], EV_TIMEOUT);
3446 #ifndef _WIN32
3447 event_base_active_by_signal(base, SIGHUP);
3448 #endif
3449
3450 event_base_assert_ok_(base);
3451
3452 event_base_loop(base, EVLOOP_ONCE);
3453
3454 tt_int_op(e1, ==, 0);
3455 tt_int_op(e2, ==, EV_WRITE | 0x10000);
3456 tt_int_op(e3, ==, EV_READ | 0x10000);
3457 /* Mask out EV_WRITE here, since it could be genuinely writeable. */
3458 tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | EV_TIMEOUT | 0x10000);
3459 #ifndef _WIN32
3460 tt_int_op(es, ==, EV_SIGNAL | 0x10000);
3461 #endif
3462
3463 end:
3464 if (ev1)
3465 event_free(ev1);
3466 if (ev2)
3467 event_free(ev2);
3468 if (ev3)
3469 event_free(ev3);
3470 if (ev4)
3471 event_free(ev4);
3472 #ifndef _WIN32
3473 if (evsig)
3474 event_free(evsig);
3475 #endif
3476 }
3477
3478 struct testcase_t main_testcases[] = {
3479 /* Some converted-over tests */
3480 { "methods", test_methods, TT_FORK, NULL, NULL },
3481 { "version", test_version, 0, NULL, NULL },
3482 BASIC(base_features, TT_FORK|TT_NO_LOGS),
3483 { "base_environ", test_base_environ, TT_FORK, NULL, NULL },
3484
3485 BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR),
3486 BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR),
3487
3488 BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE),
3489 BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE),
3490 BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE),
3491 BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE),
3492 BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE),
3493 BASIC(evmap_invalid_slots, TT_FORK|TT_NEED_BASE),
3494
3495 BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3496 BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3497 BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR|TT_RETRIABLE),
3498 BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3499
3500 /* These are still using the old API */
3501 LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
3502 { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
3503 { "persistent_active_timeout", test_persistent_active_timeout,
3504 TT_FORK|TT_NEED_BASE|TT_RETRIABLE, &basic_setup, NULL },
3505 LEGACY(priorities, TT_FORK|TT_NEED_BASE),
3506 BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE),
3507 { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE,
3508 &basic_setup, NULL },
3509
3510 /* These legacy tests may not all need all of these flags. */
3511 LEGACY(simpleread, TT_ISOLATED),
3512 LEGACY(simpleread_multiple, TT_ISOLATED),
3513 LEGACY(simplewrite, TT_ISOLATED),
3514 { "simpleclose_rw", test_simpleclose_rw, TT_FORK, &basic_setup, NULL },
3515 /* simpleclose */
3516 { "simpleclose_close", test_simpleclose,
3517 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3518 &basic_setup, (void *)"close" },
3519 { "simpleclose_shutdown", test_simpleclose,
3520 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3521 &basic_setup, (void *)"shutdown" },
3522 /* simpleclose_*_persist */
3523 { "simpleclose_close_persist", test_simpleclose,
3524 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3525 &basic_setup, (void *)"close_persist" },
3526 { "simpleclose_shutdown_persist", test_simpleclose,
3527 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3528 &basic_setup, (void *)"shutdown_persist" },
3529 /* simpleclose_*_et */
3530 { "simpleclose_close_et", test_simpleclose,
3531 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3532 &basic_setup, (void *)"close_ET" },
3533 { "simpleclose_shutdown_et", test_simpleclose,
3534 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3535 &basic_setup, (void *)"shutdown_ET" },
3536 /* simpleclose_*_persist_et */
3537 { "simpleclose_close_persist_et", test_simpleclose,
3538 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3539 &basic_setup, (void *)"close_persist_ET" },
3540 { "simpleclose_shutdown_persist_et", test_simpleclose,
3541 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3542 &basic_setup, (void *)"shutdown_persist_ET" },
3543 LEGACY(multiple, TT_ISOLATED),
3544 LEGACY(persistent, TT_ISOLATED),
3545 LEGACY(combined, TT_ISOLATED),
3546 LEGACY(simpletimeout, TT_ISOLATED),
3547 LEGACY(loopbreak, TT_ISOLATED),
3548 LEGACY(loopexit, TT_ISOLATED),
3549 LEGACY(loopexit_multiple, TT_ISOLATED),
3550 { "nonpersist_readd", test_nonpersist_readd, TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE, &basic_setup, NULL },
3551 LEGACY(multiple_events_for_same_fd, TT_ISOLATED),
3552 LEGACY(want_only_once, TT_ISOLATED),
3553 { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL },
3554 { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL },
3555 { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup,
3556 NULL },
3557 { "event_closed_fd_poll", test_event_closed_fd_poll, TT_ISOLATED, &basic_setup,
3558 NULL },
3559
3560 #ifndef _WIN32
3561 { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL },
3562 #endif
3563 { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL },
3564 { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL },
3565 { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 },
3566
3567 { "struct_event_size", test_struct_event_size, 0, NULL, NULL },
3568 BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3569
3570 BASIC(event_foreach, TT_FORK|TT_NEED_BASE),
3571 { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" },
3572 { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" },
3573 { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" },
3574 { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" },
3575 { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" },
3576
3577 BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3578
3579 #ifndef _WIN32
3580 LEGACY(fork, TT_ISOLATED),
3581 #endif
3582
3583 #ifdef EVTHREAD_USE_PTHREADS_IMPLEMENTED
3584 LEGACY(del_wait, TT_ISOLATED|TT_NEED_THREADS|TT_RETRIABLE),
3585 LEGACY(del_notify, TT_ISOLATED|TT_NEED_THREADS),
3586 #endif
3587
3588 END_OF_TESTCASES
3589 };
3590
3591 struct testcase_t evtag_testcases[] = {
3592 { "int", evtag_int_test, TT_FORK, NULL, NULL },
3593 { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL },
3594 { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL },
3595 { "peek", evtag_test_peek, 0, NULL, NULL },
3596
3597 END_OF_TESTCASES
3598 };
3599
3600 struct testcase_t signal_testcases[] = {
3601 #ifndef _WIN32
3602 LEGACY(simplestsignal, TT_ISOLATED),
3603 LEGACY(simplesignal, TT_ISOLATED),
3604 LEGACY(multiplesignal, TT_ISOLATED),
3605 LEGACY(immediatesignal, TT_ISOLATED),
3606 LEGACY(signal_dealloc, TT_ISOLATED),
3607 LEGACY(signal_pipeloss, TT_ISOLATED),
3608 LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS),
3609 LEGACY(signal_restore, TT_ISOLATED),
3610 LEGACY(signal_assert, TT_ISOLATED),
3611 LEGACY(signal_while_processing, TT_ISOLATED),
3612 #endif
3613 END_OF_TESTCASES
3614 };
3615
3616