1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 static void
lws_event_hrtimer_cb(int fd,short event,void * p)28 lws_event_hrtimer_cb(int fd, short event, void *p)
29 {
30 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)p;
31 struct timeval tv;
32 lws_usec_t us;
33
34 lws_pt_lock(pt, __func__);
35 us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
36 if (us) {
37 tv.tv_sec = us / LWS_US_PER_SEC;
38 tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC);
39 evtimer_add(pt->event.hrtimer, &tv);
40 }
41 lws_pt_unlock(pt);
42 }
43
44 static void
lws_event_idle_timer_cb(int fd,short event,void * p)45 lws_event_idle_timer_cb(int fd, short event, void *p)
46 {
47 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)p;
48 struct timeval tv;
49 lws_usec_t us;
50
51 if (pt->is_destroyed)
52 return;
53
54 lws_service_do_ripe_rxflow(pt);
55
56 /*
57 * is there anybody with pending stuff that needs service forcing?
58 */
59 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
60 /* -1 timeout means just do forced service */
61 _lws_plat_service_forced_tsi(pt->context, pt->tid);
62 /* still somebody left who wants forced service? */
63 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
64 /* yes... come back again later */
65
66 tv.tv_sec = 0;
67 tv.tv_usec = 1000;
68 evtimer_add(pt->event.idle_timer, &tv);
69
70 return;
71 }
72 }
73
74 lwsl_debug("%s: wait\n", __func__);
75
76 /* account for hrtimer */
77
78 lws_pt_lock(pt, __func__);
79 us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
80 if (us) {
81 tv.tv_sec = us / LWS_US_PER_SEC;
82 tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC);
83 evtimer_add(pt->event.hrtimer, &tv);
84 }
85 lws_pt_unlock(pt);
86
87
88 if (pt->destroy_self)
89 lws_context_destroy(pt->context);
90 }
91
92 static void
lws_event_cb(evutil_socket_t sock_fd,short revents,void * ctx)93 lws_event_cb(evutil_socket_t sock_fd, short revents, void *ctx)
94 {
95 struct lws_io_watcher *lws_io = (struct lws_io_watcher *)ctx;
96 struct lws_context *context = lws_io->context;
97 struct lws_context_per_thread *pt;
98 struct lws_pollfd eventfd;
99 struct timeval tv;
100 struct lws *wsi;
101
102 if (revents & EV_TIMEOUT)
103 return;
104
105 /* !!! EV_CLOSED doesn't exist in libevent2 */
106 #if LIBEVENT_VERSION_NUMBER < 0x02000000
107 if (revents & EV_CLOSED) {
108 event_del(lws_io->event.watcher);
109 event_free(lws_io->event.watcher);
110 return;
111 }
112 #endif
113
114 eventfd.fd = sock_fd;
115 eventfd.events = 0;
116 eventfd.revents = 0;
117 if (revents & EV_READ) {
118 eventfd.events |= LWS_POLLIN;
119 eventfd.revents |= LWS_POLLIN;
120 }
121 if (revents & EV_WRITE) {
122 eventfd.events |= LWS_POLLOUT;
123 eventfd.revents |= LWS_POLLOUT;
124 }
125
126 wsi = wsi_from_fd(context, sock_fd);
127 if (!wsi)
128 return;
129
130 pt = &context->pt[(int)wsi->tsi];
131 if (pt->is_destroyed)
132 return;
133
134 lws_service_fd_tsi(context, &eventfd, wsi->tsi);
135
136 if (pt->destroy_self) {
137 lws_context_destroy(pt->context);
138 return;
139 }
140
141 /* set the idle timer for 1ms ahead */
142
143 tv.tv_sec = 0;
144 tv.tv_usec = 1000;
145 evtimer_add(pt->event.idle_timer, &tv);
146 }
147
148 void
lws_event_sigint_cb(evutil_socket_t sock_fd,short revents,void * ctx)149 lws_event_sigint_cb(evutil_socket_t sock_fd, short revents, void *ctx)
150 {
151 struct lws_context_per_thread *pt = ctx;
152 struct event *signal = (struct event *)ctx;
153
154 if (pt->context->eventlib_signal_cb) {
155 pt->context->eventlib_signal_cb((void *)(lws_intptr_t)sock_fd,
156 event_get_signal(signal));
157
158 return;
159 }
160 if (!pt->event_loop_foreign)
161 event_base_loopbreak(pt->event.io_loop);
162 }
163
164
165 static int
elops_init_pt_event(struct lws_context * context,void * _loop,int tsi)166 elops_init_pt_event(struct lws_context *context, void *_loop, int tsi)
167 {
168 struct lws_vhost *vh = context->vhost_list;
169 struct event_base *loop = (struct event_base *)_loop;
170 struct lws_context_per_thread *pt = &context->pt[tsi];
171
172 lwsl_info("%s: loop %p\n", __func__, _loop);
173
174 if (!loop)
175 loop = event_base_new();
176 else
177 context->pt[tsi].event_loop_foreign = 1;
178
179 if (!loop) {
180 lwsl_err("%s: creating event base failed\n", __func__);
181
182 return -1;
183 }
184
185 pt->event.io_loop = loop;
186
187 /*
188 * Initialize all events with the listening sockets
189 * and register a callback for read operations
190 */
191
192 while (vh) {
193 if (vh->lserv_wsi) {
194 vh->lserv_wsi->w_read.context = context;
195 vh->lserv_wsi->w_read.event.watcher = event_new(
196 loop, vh->lserv_wsi->desc.sockfd,
197 (EV_READ | EV_PERSIST), lws_event_cb,
198 &vh->lserv_wsi->w_read);
199 event_add(vh->lserv_wsi->w_read.event.watcher, NULL);
200 }
201 vh = vh->vhost_next;
202 }
203
204 /* static event loop objects */
205
206 pt->event.hrtimer = event_new(loop, -1, EV_PERSIST,
207 lws_event_hrtimer_cb, pt);
208
209 pt->event.idle_timer = event_new(loop, -1, 0,
210 lws_event_idle_timer_cb, pt);
211
212 /* Register the signal watcher unless it's a foreign loop */
213
214 if (pt->event_loop_foreign)
215 return 0;
216
217 pt->w_sigint.event.watcher = evsignal_new(loop, SIGINT,
218 lws_event_sigint_cb, pt);
219 event_add(pt->w_sigint.event.watcher, NULL);
220
221 return 0;
222 }
223
224 static int
elops_init_context_event(struct lws_context * context,const struct lws_context_creation_info * info)225 elops_init_context_event(struct lws_context *context,
226 const struct lws_context_creation_info *info)
227 {
228 int n;
229
230 context->eventlib_signal_cb = info->signal_cb;
231
232 for (n = 0; n < context->count_threads; n++)
233 context->pt[n].w_sigint.context = context;
234
235 return 0;
236 }
237
238 static int
elops_accept_event(struct lws * wsi)239 elops_accept_event(struct lws *wsi)
240 {
241 struct lws_context *context = lws_get_context(wsi);
242 struct lws_context_per_thread *pt;
243 int fd;
244
245 wsi->w_read.context = context;
246 wsi->w_write.context = context;
247
248 // Initialize the event
249 pt = &context->pt[(int)wsi->tsi];
250
251 if (wsi->role_ops->file_handle)
252 fd = wsi->desc.filefd;
253 else
254 fd = wsi->desc.sockfd;
255
256 wsi->w_read.event.watcher = event_new(pt->event.io_loop, fd,
257 (EV_READ | EV_PERSIST), lws_event_cb, &wsi->w_read);
258 wsi->w_write.event.watcher = event_new(pt->event.io_loop, fd,
259 (EV_WRITE | EV_PERSIST), lws_event_cb, &wsi->w_write);
260
261 return 0;
262 }
263
264 static void
elops_io_event(struct lws * wsi,int flags)265 elops_io_event(struct lws *wsi, int flags)
266 {
267 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
268
269 if (!pt->event.io_loop || wsi->context->being_destroyed ||
270 pt->is_destroyed)
271 return;
272
273 assert((flags & (LWS_EV_START | LWS_EV_STOP)) &&
274 (flags & (LWS_EV_READ | LWS_EV_WRITE)));
275
276 if (flags & LWS_EV_START) {
277 if (flags & LWS_EV_WRITE)
278 event_add(wsi->w_write.event.watcher, NULL);
279
280 if (flags & LWS_EV_READ)
281 event_add(wsi->w_read.event.watcher, NULL);
282 } else {
283 if (flags & LWS_EV_WRITE)
284 event_del(wsi->w_write.event.watcher);
285
286 if (flags & LWS_EV_READ)
287 event_del(wsi->w_read.event.watcher);
288 }
289 }
290
291 static void
elops_run_pt_event(struct lws_context * context,int tsi)292 elops_run_pt_event(struct lws_context *context, int tsi)
293 {
294 /* Run / Dispatch the event_base loop */
295 if (context->pt[tsi].event.io_loop)
296 event_base_dispatch(context->pt[tsi].event.io_loop);
297 }
298
299 static void
elops_destroy_pt_event(struct lws_context * context,int tsi)300 elops_destroy_pt_event(struct lws_context *context, int tsi)
301 {
302 struct lws_context_per_thread *pt = &context->pt[tsi];
303 struct lws_vhost *vh = context->vhost_list;
304
305 lwsl_info("%s\n", __func__);
306
307 if (!pt->event.io_loop)
308 return;
309
310 /*
311 * Free all events with the listening sockets
312 */
313 while (vh) {
314 if (vh->lserv_wsi) {
315 event_free(vh->lserv_wsi->w_read.event.watcher);
316 vh->lserv_wsi->w_read.event.watcher = NULL;
317 event_free(vh->lserv_wsi->w_write.event.watcher);
318 vh->lserv_wsi->w_write.event.watcher = NULL;
319 }
320 vh = vh->vhost_next;
321 }
322
323 event_free(pt->event.hrtimer);
324 event_free(pt->event.idle_timer);
325
326 if (!pt->event_loop_foreign) {
327 event_del(pt->w_sigint.event.watcher);
328 event_free(pt->w_sigint.event.watcher);
329
330 event_base_free(pt->event.io_loop);
331 }
332 }
333
334 static void
elops_destroy_wsi_event(struct lws * wsi)335 elops_destroy_wsi_event(struct lws *wsi)
336 {
337 struct lws_context_per_thread *pt;
338
339 if (!wsi)
340 return;
341
342 pt = &wsi->context->pt[(int)wsi->tsi];
343 if (pt->is_destroyed)
344 return;
345
346 if (wsi->w_read.event.watcher) {
347 event_free(wsi->w_read.event.watcher);
348 wsi->w_read.event.watcher = NULL;
349 }
350
351 if (wsi->w_write.event.watcher) {
352 event_free(wsi->w_write.event.watcher);
353 wsi->w_write.event.watcher = NULL;
354 }
355 }
356
357 static int
elops_wsi_logical_close_event(struct lws * wsi)358 elops_wsi_logical_close_event(struct lws *wsi)
359 {
360 elops_destroy_wsi_event(wsi);
361
362 return 0;
363 }
364
365 static int
elops_init_vhost_listen_wsi_event(struct lws * wsi)366 elops_init_vhost_listen_wsi_event(struct lws *wsi)
367 {
368 struct lws_context_per_thread *pt;
369 int fd;
370
371 if (!wsi) {
372 assert(0);
373 return 0;
374 }
375
376 wsi->w_read.context = wsi->context;
377 wsi->w_write.context = wsi->context;
378
379 pt = &wsi->context->pt[(int)wsi->tsi];
380
381 if (wsi->role_ops->file_handle)
382 fd = wsi->desc.filefd;
383 else
384 fd = wsi->desc.sockfd;
385
386 wsi->w_read.event.watcher = event_new(pt->event.io_loop, fd,
387 (EV_READ | EV_PERSIST),
388 lws_event_cb, &wsi->w_read);
389 wsi->w_write.event.watcher = event_new(pt->event.io_loop, fd,
390 (EV_WRITE | EV_PERSIST),
391 lws_event_cb, &wsi->w_write);
392
393 elops_io_event(wsi, LWS_EV_START | LWS_EV_READ);
394
395 return 0;
396 }
397
398 static int
elops_destroy_context2_event(struct lws_context * context)399 elops_destroy_context2_event(struct lws_context *context)
400 {
401 struct lws_context_per_thread *pt;
402 int n, m;
403
404 lwsl_debug("%s: in\n", __func__);
405
406 for (n = 0; n < context->count_threads; n++) {
407 int budget = 1000;
408
409 pt = &context->pt[n];
410
411 /* only for internal loops... */
412
413 if (pt->event_loop_foreign || !pt->event.io_loop)
414 continue;
415
416 if (!context->finalize_destroy_after_internal_loops_stopped) {
417 event_base_loopexit(pt->event.io_loop, NULL);
418 continue;
419 }
420 while (budget-- &&
421 (m = event_base_loop(pt->event.io_loop, EVLOOP_NONBLOCK)))
422 ;
423 #if 0
424 if (m) {
425 lwsl_err("%s: tsi %d: NOT everything closed\n",
426 __func__, n);
427 event_base_dump_events(pt->event.io_loop, stderr);
428 } else
429 lwsl_debug("%s: %d: everything closed OK\n", __func__, n);
430 #endif
431 event_base_free(pt->event.io_loop);
432
433 }
434
435 lwsl_debug("%s: out\n", __func__);
436
437 return 0;
438 }
439
440 struct lws_event_loop_ops event_loop_ops_event = {
441 /* name */ "libevent",
442 /* init_context */ elops_init_context_event,
443 /* destroy_context1 */ NULL,
444 /* destroy_context2 */ elops_destroy_context2_event,
445 /* init_vhost_listen_wsi */ elops_init_vhost_listen_wsi_event,
446 /* init_pt */ elops_init_pt_event,
447 /* wsi_logical_close */ elops_wsi_logical_close_event,
448 /* check_client_connect_ok */ NULL,
449 /* close_handle_manually */ NULL,
450 /* accept */ elops_accept_event,
451 /* io */ elops_io_event,
452 /* run_pt */ elops_run_pt_event,
453 /* destroy_pt */ elops_destroy_pt_event,
454 /* destroy wsi */ elops_destroy_wsi_event,
455
456 /* flags */ 0,
457 };
458