1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26 #include "private-lib-event-libs-libevent.h"
27
28 #define pt_to_priv_event(_pt) ((struct lws_pt_eventlibs_libevent *)(_pt)->evlib_pt)
29 #define wsi_to_priv_event(_w) ((struct lws_wsi_eventlibs_libevent *)(_w)->evlib_wsi)
30
31 static void
lws_event_hrtimer_cb(evutil_socket_t fd,short event,void * p)32 lws_event_hrtimer_cb(evutil_socket_t fd, short event, void *p)
33 {
34 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)p;
35 struct lws_pt_eventlibs_libevent *ptpr = pt_to_priv_event(pt);
36 struct timeval tv;
37 lws_usec_t us;
38
39 lws_pt_lock(pt, __func__);
40 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
41 lws_now_usecs());
42 if (us) {
43 #if defined(__APPLE__)
44 tv.tv_sec = (int)(us / LWS_US_PER_SEC);
45 tv.tv_usec = (int)(us - (tv.tv_sec * LWS_US_PER_SEC));
46 #else
47 tv.tv_sec = (long)(us / LWS_US_PER_SEC);
48 tv.tv_usec = (long)(us - (tv.tv_sec * LWS_US_PER_SEC));
49 #endif
50 evtimer_add(ptpr->hrtimer, &tv);
51 }
52 lws_pt_unlock(pt);
53 }
54
55 static void
lws_event_idle_timer_cb(evutil_socket_t fd,short event,void * p)56 lws_event_idle_timer_cb(evutil_socket_t fd, short event, void *p)
57 {
58 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)p;
59 struct lws_pt_eventlibs_libevent *ptpr = pt_to_priv_event(pt);
60 struct timeval tv;
61 lws_usec_t us;
62
63 if (pt->is_destroyed)
64 return;
65
66 lws_service_do_ripe_rxflow(pt);
67
68 /*
69 * is there anybody with pending stuff that needs service forcing?
70 */
71 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
72 /* -1 timeout means just do forced service */
73 _lws_plat_service_forced_tsi(pt->context, pt->tid);
74 /* still somebody left who wants forced service? */
75 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
76 /* yes... come back again later */
77
78 tv.tv_sec = 0;
79 tv.tv_usec = 1000;
80 evtimer_add(ptpr->idle_timer, &tv);
81
82 return;
83 }
84 }
85
86 /* account for hrtimer */
87
88 lws_pt_lock(pt, __func__);
89 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
90 lws_now_usecs());
91 if (us) {
92 tv.tv_sec = (suseconds_t)(us / LWS_US_PER_SEC);
93 tv.tv_usec = (suseconds_t)(us - (tv.tv_sec * LWS_US_PER_SEC));
94 evtimer_add(ptpr->hrtimer, &tv);
95 }
96 lws_pt_unlock(pt);
97
98 if (pt->destroy_self)
99 lws_context_destroy(pt->context);
100 }
101
102 static void
lws_event_cb(evutil_socket_t sock_fd,short revents,void * ctx)103 lws_event_cb(evutil_socket_t sock_fd, short revents, void *ctx)
104 {
105 struct lws_signal_watcher_libevent *lws_io =
106 (struct lws_signal_watcher_libevent *)ctx;
107 struct lws_context *context = lws_io->context;
108 struct lws_context_per_thread *pt;
109 struct lws_pollfd eventfd;
110 struct timeval tv;
111 struct lws *wsi;
112
113 if (revents & EV_TIMEOUT)
114 return;
115
116 /* !!! EV_CLOSED doesn't exist in libevent2 */
117 #if LIBEVENT_VERSION_NUMBER < 0x02000000
118 if (revents & EV_CLOSED) {
119 event_del(lws_io->event.watcher);
120 event_free(lws_io->event.watcher);
121 return;
122 }
123 #endif
124
125 eventfd.fd = sock_fd;
126 eventfd.events = 0;
127 eventfd.revents = 0;
128 if (revents & EV_READ) {
129 eventfd.events |= LWS_POLLIN;
130 eventfd.revents |= LWS_POLLIN;
131 }
132 if (revents & EV_WRITE) {
133 eventfd.events |= LWS_POLLOUT;
134 eventfd.revents |= LWS_POLLOUT;
135 }
136
137 wsi = wsi_from_fd(context, sock_fd);
138 if (!wsi)
139 return;
140
141 pt = &context->pt[(int)wsi->tsi];
142 if (pt->is_destroyed)
143 return;
144
145 lws_service_fd_tsi(context, &eventfd, wsi->tsi);
146
147 if (pt->destroy_self) {
148 lwsl_cx_notice(context, "pt destroy self coming true");
149 lws_context_destroy(pt->context);
150 return;
151 }
152
153 /* set the idle timer for 1ms ahead */
154
155 tv.tv_sec = 0;
156 tv.tv_usec = 1000;
157 evtimer_add(pt_to_priv_event(pt)->idle_timer, &tv);
158 }
159
160 void
lws_event_sigint_cb(evutil_socket_t sock_fd,short revents,void * ctx)161 lws_event_sigint_cb(evutil_socket_t sock_fd, short revents, void *ctx)
162 {
163 struct lws_context_per_thread *pt = ctx;
164 struct event *signal = pt_to_priv_event(pt)->w_sigint.watcher;
165
166 if (pt->context->eventlib_signal_cb) {
167 pt->context->eventlib_signal_cb((void *)(lws_intptr_t)sock_fd,
168 event_get_signal(signal));
169
170 return;
171 }
172 if (!pt->event_loop_foreign)
173 event_base_loopbreak(pt_to_priv_event(pt)->io_loop);
174 }
175
176 static int
elops_listen_init_event(struct lws_dll2 * d,void * user)177 elops_listen_init_event(struct lws_dll2 *d, void *user)
178 {
179 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
180 struct lws_context *context = (struct lws_context *)user;
181 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
182 struct lws_pt_eventlibs_libevent *ptpr = pt_to_priv_event(pt);
183 struct lws_io_watcher_libevent *w_read =
184 &(wsi_to_priv_event(wsi)->w_read);
185
186 w_read->context = context;
187 w_read->watcher = event_new(ptpr->io_loop, wsi->desc.sockfd,
188 (EV_READ | EV_PERSIST), lws_event_cb, w_read);
189 event_add(w_read->watcher, NULL);
190 w_read->set = 1;
191
192 return 0;
193 }
194
195 static int
elops_init_pt_event(struct lws_context * context,void * _loop,int tsi)196 elops_init_pt_event(struct lws_context *context, void *_loop, int tsi)
197 {
198 struct event_base *loop = (struct event_base *)_loop;
199 struct lws_context_per_thread *pt = &context->pt[tsi];
200 struct lws_pt_eventlibs_libevent *ptpr = pt_to_priv_event(pt);
201
202 lwsl_cx_info(context, "loop %p", _loop);
203
204 if (!loop)
205 loop = event_base_new();
206 else
207 context->pt[tsi].event_loop_foreign = 1;
208
209 if (!loop) {
210 lwsl_cx_err(context, "creating event base failed");
211
212 return -1;
213 }
214
215 ptpr->io_loop = loop;
216
217 lws_vhost_foreach_listen_wsi(context, context, elops_listen_init_event);
218
219 /* static event loop objects */
220
221 ptpr->hrtimer = event_new(loop, -1, EV_PERSIST,
222 lws_event_hrtimer_cb, pt);
223
224 ptpr->idle_timer = event_new(loop, -1, 0,
225 lws_event_idle_timer_cb, pt);
226 {
227 struct timeval tv;
228 tv.tv_sec = (long)0;
229 tv.tv_usec = (long)1000;
230 evtimer_add(ptpr->hrtimer, &tv);
231 }
232
233 /* Register the signal watcher unless it's a foreign loop */
234
235 if (pt->event_loop_foreign)
236 return 0;
237
238 ptpr->w_sigint.watcher = evsignal_new(loop, SIGINT,
239 lws_event_sigint_cb, pt);
240 event_add(ptpr->w_sigint.watcher, NULL);
241
242 return 0;
243 }
244
245 static int
elops_init_context_event(struct lws_context * context,const struct lws_context_creation_info * info)246 elops_init_context_event(struct lws_context *context,
247 const struct lws_context_creation_info *info)
248 {
249 int n;
250
251 context->eventlib_signal_cb = info->signal_cb;
252
253 for (n = 0; n < context->count_threads; n++)
254 pt_to_priv_event(&context->pt[n])->w_sigint.context = context;
255
256 return 0;
257 }
258
259 static int
elops_accept_event(struct lws * wsi)260 elops_accept_event(struct lws *wsi)
261 {
262 struct lws_context *context = lws_get_context(wsi);
263 struct lws_context_per_thread *pt;
264 struct lws_pt_eventlibs_libevent *ptpr;
265 struct lws_wsi_eventlibs_libevent *wpr = wsi_to_priv_event(wsi);
266 evutil_socket_t fd;
267
268 wpr->w_read.context = context;
269 wpr->w_write.context = context;
270
271 // Initialize the event
272 pt = &context->pt[(int)wsi->tsi];
273 ptpr = pt_to_priv_event(pt);
274
275 if (wsi->role_ops->file_handle)
276 fd = (evutil_socket_t)(ev_intptr_t) wsi->desc.filefd;
277 else
278 fd = wsi->desc.sockfd;
279
280 wpr->w_read.watcher = event_new(ptpr->io_loop, fd,
281 (EV_READ | EV_PERSIST), lws_event_cb, &wpr->w_read);
282 wpr->w_write.watcher = event_new(ptpr->io_loop, fd,
283 (EV_WRITE | EV_PERSIST), lws_event_cb, &wpr->w_write);
284
285 return 0;
286 }
287
288 static void
elops_io_event(struct lws * wsi,unsigned int flags)289 elops_io_event(struct lws *wsi, unsigned int flags)
290 {
291 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
292 struct lws_pt_eventlibs_libevent *ptpr = pt_to_priv_event(pt);
293 struct lws_wsi_eventlibs_libevent *wpr = wsi_to_priv_event(wsi);
294
295 if (!ptpr->io_loop || wsi->a.context->being_destroyed ||
296 pt->is_destroyed)
297 return;
298
299 assert((flags & (LWS_EV_START | LWS_EV_STOP)) &&
300 (flags & (LWS_EV_READ | LWS_EV_WRITE)));
301
302 if (flags & LWS_EV_START) {
303 if ((flags & LWS_EV_WRITE) && !wpr->w_write.set) {
304 event_add(wpr->w_write.watcher, NULL);
305 wpr->w_write.set = 1;
306 }
307
308 if ((flags & LWS_EV_READ) && !wpr->w_read.set) {
309 event_add(wpr->w_read.watcher, NULL);
310 wpr->w_read.set = 1;
311 }
312 } else {
313 if ((flags & LWS_EV_WRITE) && wpr->w_write.set) {
314 event_del(wpr->w_write.watcher);
315 wpr->w_write.set = 0;
316 }
317
318 if ((flags & LWS_EV_READ) && wpr->w_read.set) {
319 event_del(wpr->w_read.watcher);
320 wpr->w_read.set = 0;
321 }
322 }
323 }
324
325 static void
elops_run_pt_event(struct lws_context * context,int tsi)326 elops_run_pt_event(struct lws_context *context, int tsi)
327 {
328 /* Run / Dispatch the event_base loop */
329 if (pt_to_priv_event(&context->pt[tsi])->io_loop)
330 event_base_dispatch(
331 pt_to_priv_event(&context->pt[tsi])->io_loop);
332 }
333
334 static int
elops_listen_destroy_event(struct lws_dll2 * d,void * user)335 elops_listen_destroy_event(struct lws_dll2 *d, void *user)
336 {
337 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
338 struct lws_wsi_eventlibs_libevent *w = wsi_to_priv_event(wsi);
339
340 event_free(w->w_read.watcher);
341 w->w_read.watcher = NULL;
342 event_free(w->w_write.watcher);
343 w->w_write.watcher = NULL;
344
345 return 0;
346 }
347
348 static void
elops_destroy_pt_event(struct lws_context * context,int tsi)349 elops_destroy_pt_event(struct lws_context *context, int tsi)
350 {
351 struct lws_context_per_thread *pt = &context->pt[tsi];
352 struct lws_pt_eventlibs_libevent *ptpr = pt_to_priv_event(pt);
353
354 if (!ptpr->io_loop)
355 return;
356
357 lws_vhost_foreach_listen_wsi(context, context, elops_listen_destroy_event);
358
359 event_free(ptpr->hrtimer);
360 event_free(ptpr->idle_timer);
361
362 if (!pt->event_loop_foreign) {
363 event_del(ptpr->w_sigint.watcher);
364 event_free(ptpr->w_sigint.watcher);
365 event_base_loopexit(ptpr->io_loop, NULL);
366 // event_base_free(pt->event.io_loop);
367 // pt->event.io_loop = NULL;
368 lwsl_cx_notice(context, "set to exit loop");
369 }
370 }
371
372 static void
elops_destroy_wsi_event(struct lws * wsi)373 elops_destroy_wsi_event(struct lws *wsi)
374 {
375 struct lws_context_per_thread *pt;
376 struct lws_wsi_eventlibs_libevent *w;
377
378 if (!wsi)
379 return;
380
381 pt = &wsi->a.context->pt[(int)wsi->tsi];
382 if (pt->is_destroyed)
383 return;
384
385 w = wsi_to_priv_event(wsi);
386
387 if (w->w_read.watcher) {
388 event_free(w->w_read.watcher);
389 w->w_read.watcher = NULL;
390 }
391
392 if (w->w_write.watcher) {
393 event_free(w->w_write.watcher);
394 w->w_write.watcher = NULL;
395 }
396 }
397
398 static int
elops_wsi_logical_close_event(struct lws * wsi)399 elops_wsi_logical_close_event(struct lws *wsi)
400 {
401 elops_destroy_wsi_event(wsi);
402
403 return 0;
404 }
405
406 static int
elops_init_vhost_listen_wsi_event(struct lws * wsi)407 elops_init_vhost_listen_wsi_event(struct lws *wsi)
408 {
409 struct lws_context_per_thread *pt;
410 struct lws_pt_eventlibs_libevent *ptpr;
411 struct lws_wsi_eventlibs_libevent *w;
412 evutil_socket_t fd;
413
414 if (!wsi) {
415 assert(0);
416 return 0;
417 }
418
419 w = wsi_to_priv_event(wsi);
420
421 w->w_read.context = wsi->a.context;
422 w->w_write.context = wsi->a.context;
423
424 pt = &wsi->a.context->pt[(int)wsi->tsi];
425 ptpr = pt_to_priv_event(pt);
426
427 if (wsi->role_ops->file_handle)
428 fd = (evutil_socket_t) wsi->desc.filefd;
429 else
430 fd = wsi->desc.sockfd;
431
432 w->w_read.watcher = event_new(ptpr->io_loop, fd, (EV_READ | EV_PERSIST),
433 lws_event_cb, &w->w_read);
434 w->w_write.watcher = event_new(ptpr->io_loop, fd,
435 (EV_WRITE | EV_PERSIST),
436 lws_event_cb, &w->w_write);
437
438 elops_io_event(wsi, LWS_EV_START | LWS_EV_READ);
439
440 return 0;
441 }
442
443 static int
elops_destroy_context2_event(struct lws_context * context)444 elops_destroy_context2_event(struct lws_context *context)
445 {
446 struct lws_context_per_thread *pt;
447 struct lws_pt_eventlibs_libevent *ptpr;
448 int n, m;
449
450 for (n = 0; n < context->count_threads; n++) {
451 int budget = 1000;
452
453 pt = &context->pt[n];
454 ptpr = pt_to_priv_event(pt);
455
456 /* only for internal loops... */
457
458 if (pt->event_loop_foreign || !ptpr->io_loop)
459 continue;
460
461 if (!context->evlib_finalize_destroy_after_int_loops_stop) {
462 event_base_loopexit(ptpr->io_loop, NULL);
463 continue;
464 }
465 while (budget-- &&
466 (m = event_base_loop(ptpr->io_loop, EVLOOP_NONBLOCK)))
467 ;
468
469 lwsl_cx_info(context, "event_base_free");
470
471 event_base_free(ptpr->io_loop);
472 ptpr->io_loop = NULL;
473 }
474
475 return 0;
476 }
477
478 static const struct lws_event_loop_ops event_loop_ops_event = {
479 /* name */ "libevent",
480 /* init_context */ elops_init_context_event,
481 /* destroy_context1 */ NULL,
482 /* destroy_context2 */ elops_destroy_context2_event,
483 /* init_vhost_listen_wsi */ elops_init_vhost_listen_wsi_event,
484 /* init_pt */ elops_init_pt_event,
485 /* wsi_logical_close */ elops_wsi_logical_close_event,
486 /* check_client_connect_ok */ NULL,
487 /* close_handle_manually */ NULL,
488 /* accept */ elops_accept_event,
489 /* io */ elops_io_event,
490 /* run_pt */ elops_run_pt_event,
491 /* destroy_pt */ elops_destroy_pt_event,
492 /* destroy wsi */ elops_destroy_wsi_event,
493 /* foreign_thread */ NULL,
494
495 /* flags */ 0,
496
497 /* evlib_size_ctx */ 0,
498 /* evlib_size_pt */ sizeof(struct lws_pt_eventlibs_libevent),
499 /* evlib_size_vh */ 0,
500 /* evlib_size_wsi */ sizeof(struct lws_wsi_eventlibs_libevent),
501 };
502
503 #if defined(LWS_WITH_EVLIB_PLUGINS)
504 LWS_VISIBLE
505 #endif
506 const lws_plugin_evlib_t evlib_event = {
507 .hdr = {
508 "libevent event loop",
509 "lws_evlib_plugin",
510 LWS_BUILD_HASH,
511 LWS_PLUGIN_API_MAGIC
512 },
513
514 .ops = &event_loop_ops_event
515 };
516