1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26 #include "private-lib-event-libs-libuv.h"
27
28 #define pt_to_priv_uv(_pt) ((struct lws_pt_eventlibs_libuv *)(_pt)->evlib_pt)
29 #define wsi_to_priv_uv(_w) ((struct lws_wsi_eventlibs_libuv *)(_w)->evlib_wsi)
30
31 static void
lws_uv_sultimer_cb(uv_timer_t * timer,int status)32 lws_uv_sultimer_cb(uv_timer_t *timer
33 #if UV_VERSION_MAJOR == 0
34 , int status
35 #endif
36 )
37 {
38 struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(timer,
39 struct lws_pt_eventlibs_libuv, sultimer);
40 struct lws_context_per_thread *pt = ptpr->pt;
41 lws_usec_t us;
42
43 lws_context_lock(pt->context, __func__);
44 lws_pt_lock(pt, __func__);
45 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
46 lws_now_usecs());
47 if (us)
48 uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
49 LWS_US_TO_MS((uint64_t)us), 0);
50 lws_pt_unlock(pt);
51 lws_context_unlock(pt->context);
52 }
53
54 static void
lws_uv_idle(uv_idle_t * handle,int status)55 lws_uv_idle(uv_idle_t *handle
56 #if UV_VERSION_MAJOR == 0
57 , int status
58 #endif
59 )
60 { struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(handle,
61 struct lws_pt_eventlibs_libuv, idle);
62 struct lws_context_per_thread *pt = ptpr->pt;
63 lws_usec_t us;
64
65 lws_service_do_ripe_rxflow(pt);
66
67 lws_context_lock(pt->context, __func__);
68 lws_pt_lock(pt, __func__);
69
70 /*
71 * is there anybody with pending stuff that needs service forcing?
72 */
73 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
74 /* -1 timeout means just do forced service */
75 _lws_plat_service_forced_tsi(pt->context, pt->tid);
76
77 /* account for sultimer */
78
79 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
80 lws_now_usecs());
81 if (us)
82 uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
83 LWS_US_TO_MS((uint64_t)us), 0);
84
85 /* if there is nobody who needs service forcing, shut down idle */
86 if (lws_service_adjust_timeout(pt->context, 1, pt->tid))
87 uv_idle_stop(handle);
88
89 lws_pt_unlock(pt);
90 lws_context_unlock(pt->context);
91 }
92
93 static void
lws_io_cb(uv_poll_t * watcher,int status,int revents)94 lws_io_cb(uv_poll_t *watcher, int status, int revents)
95 {
96 struct lws *wsi = (struct lws *)((uv_handle_t *)watcher)->data;
97 struct lws_context *context = wsi->a.context;
98 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
99 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
100 struct lws_pollfd eventfd;
101
102 lws_context_lock(pt->context, __func__);
103 lws_pt_lock(pt, __func__);
104
105 if (pt->is_destroyed)
106 goto bail;
107
108 if (!ptpriv->thread_valid) {
109 /* record the thread id that gave us our first event */
110 ptpriv->uv_thread = uv_thread_self();
111 ptpriv->thread_valid = 1;
112 }
113
114 #if defined(WIN32) || defined(_WIN32)
115 eventfd.fd = watcher->socket;
116 #else
117 eventfd.fd = watcher->io_watcher.fd;
118 #endif
119 eventfd.events = 0;
120 eventfd.revents = 0;
121
122 if (status < 0) {
123 /*
124 * At this point status will be an UV error, like UV_EBADF,
125 * we treat all errors as LWS_POLLHUP
126 *
127 * You might want to return; instead of servicing the fd in
128 * some cases */
129 if (status == UV_EAGAIN)
130 goto bail;
131
132 eventfd.events |= LWS_POLLHUP;
133 eventfd.revents |= LWS_POLLHUP;
134 } else {
135 if (revents & UV_READABLE) {
136 eventfd.events |= LWS_POLLIN;
137 eventfd.revents |= LWS_POLLIN;
138 }
139 if (revents & UV_WRITABLE) {
140 eventfd.events |= LWS_POLLOUT;
141 eventfd.revents |= LWS_POLLOUT;
142 }
143 }
144
145 lws_pt_unlock(pt);
146 lws_context_unlock(pt->context);
147
148 lws_service_fd_tsi(context, &eventfd, wsi->tsi);
149
150 if (pt->destroy_self) {
151 lws_context_destroy(pt->context);
152 return;
153 }
154
155 uv_idle_start(&ptpriv->idle, lws_uv_idle);
156 return;
157
158 bail:
159 lws_pt_unlock(pt);
160 lws_context_unlock(pt->context);
161 }
162
163 /*
164 * This does not actually stop the event loop. The reason is we have to pass
165 * libuv handle closures through its event loop. So this tries to close all
166 * wsi, and set a flag; when all the wsi closures are finalized then we
167 * actually stop the libuv event loops.
168 */
169 static void
lws_libuv_stop(struct lws_context * context)170 lws_libuv_stop(struct lws_context *context)
171 {
172 if (context->requested_stop_internal_loops) {
173 lwsl_cx_err(context, "ignoring");
174 return;
175 }
176
177 context->requested_stop_internal_loops = 1;
178 lws_context_destroy(context);
179 }
180
181 static void
lws_uv_signal_handler(uv_signal_t * watcher,int signum)182 lws_uv_signal_handler(uv_signal_t *watcher, int signum)
183 {
184 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
185 watcher->data;
186
187 if (pt->context->eventlib_signal_cb) {
188 pt->context->eventlib_signal_cb((void *)watcher, signum);
189
190 return;
191 }
192
193 lwsl_cx_err(pt->context, "internal signal handler caught signal %d",
194 signum);
195 lws_libuv_stop(pt->context);
196 }
197
198 static int
lws_uv_finalize_pt(struct lws_context_per_thread * pt)199 lws_uv_finalize_pt(struct lws_context_per_thread *pt)
200 {
201 pt->event_loop_pt_unused = 1;
202
203 lwsl_cx_info(pt->context, "thr %d", (int)(pt - pt->context->pt));
204
205 lws_context_lock(pt->context, __func__);
206
207 if (!--pt->context->undestroyed_threads) {
208 struct lws_vhost *vh = pt->context->vhost_list;
209
210 /*
211 * eventually, we emptied all the pts...
212 */
213
214 lwsl_cx_debug(pt->context, "all pts down now");
215
216 /* protocols may have initialized libuv objects */
217
218 while (vh) {
219 lws_vhost_destroy1(vh);
220 vh = vh->vhost_next;
221 }
222
223 if (!pt->count_event_loop_static_asset_handles &&
224 pt->event_loop_foreign) {
225 lwsl_cx_info(pt->context, "resuming context_destroy");
226 lws_context_unlock(pt->context);
227 lws_context_destroy(pt->context);
228 /*
229 * For foreign, we're being called from the foreign
230 * thread context the loop is associated with, we must
231 * return to it cleanly even though we are done with it.
232 */
233 return 1;
234 }
235 } else
236 lwsl_cx_debug(pt->context, "still %d undestroyed",
237 pt->context->undestroyed_threads);
238
239 lws_context_unlock(pt->context);
240
241 return 0;
242 }
243
244 // static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
245 // {
246 // if (!uv_is_closing(handle))
247 // lwsl_err("%s: handle %p still alive on loop\n", __func__, handle);
248 // }
249
250
251 static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
252
253 /*
254 * Closing Phase 2: Close callback for a static UV asset
255 */
256
257 static void
lws_uv_close_cb_sa(uv_handle_t * handle)258 lws_uv_close_cb_sa(uv_handle_t *handle)
259 {
260 struct lws_context_per_thread *pt =
261 LWS_UV_REFCOUNT_STATIC_HANDLE_TO_PT(handle);
262 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
263 struct lws_context *context = pt->context;
264 #if !defined(LWS_WITH_NO_LOGS) && defined(_DEBUG)
265 int tsi = (int)(pt - &context->pt[0]);
266 #endif
267
268 lwsl_cx_info(context, "thr %d: sa left %d: dyn left: %d (rk %d)",
269 tsi,
270 pt->count_event_loop_static_asset_handles - 1,
271 ptpriv->extant_handles,
272 context->requested_stop_internal_loops);
273
274 /* any static assets left? */
275
276 if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) ||
277 ptpriv->extant_handles)
278 return;
279
280 /*
281 * So we believe nothing of ours left on the loop. Let's sanity
282 * check it to count what's still on the loop
283 */
284
285 // uv_walk(pt_to_priv_uv(pt)->io_loop, lws_uv_walk_cb, NULL);
286
287 /*
288 * That's it... all wsi were down, and now every
289 * static asset lws had a UV handle for is down.
290 *
291 * Stop the loop so we can get out of here.
292 */
293
294 lwsl_cx_info(context, "thr %d: seen final static handle gone", tsi);
295
296 if (!pt->event_loop_foreign)
297 lws_context_destroy(context);
298
299 lws_uv_finalize_pt(pt);
300
301 lwsl_cx_info(context, "all done");
302 }
303
304 /*
305 * These must be called by protocols that want to use libuv objects directly...
306 *
307 * .... when the libuv object is created...
308 */
309
310 void
lws_libuv_static_refcount_add(uv_handle_t * h,struct lws_context * context,int tsi)311 lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context,
312 int tsi)
313 {
314 struct lws_context_per_thread *pt = &context->pt[tsi];
315
316 LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, pt);
317 }
318
319 /*
320 * ... and in the close callback when the object is closed.
321 */
322
323 void
lws_libuv_static_refcount_del(uv_handle_t * h)324 lws_libuv_static_refcount_del(uv_handle_t *h)
325 {
326 lws_uv_close_cb_sa(h);
327 }
328
329 void
lws_libuv_stop_without_kill(const struct lws_context * context,int tsi)330 lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
331 {
332 if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
333 uv_stop(pt_to_priv_uv(&context->pt[tsi])->io_loop);
334 }
335
336 uv_loop_t *
lws_uv_getloop(struct lws_context * context,int tsi)337 lws_uv_getloop(struct lws_context *context, int tsi)
338 {
339 if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
340 return pt_to_priv_uv(&context->pt[tsi])->io_loop;
341
342 return NULL;
343 }
344
345 int
lws_libuv_check_watcher_active(struct lws * wsi)346 lws_libuv_check_watcher_active(struct lws *wsi)
347 {
348 uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
349
350 if (!h)
351 return 0;
352
353 return uv_is_active(h);
354 }
355
356 static int
elops_init_context_uv(struct lws_context * context,const struct lws_context_creation_info * info)357 elops_init_context_uv(struct lws_context *context,
358 const struct lws_context_creation_info *info)
359 {
360 int n;
361
362 context->eventlib_signal_cb = info->signal_cb;
363
364 for (n = 0; n < context->count_threads; n++)
365 pt_to_priv_uv(&context->pt[n])->w_sigint.context = context;
366
367 return 0;
368 }
369
370 static int
elops_destroy_context1_uv(struct lws_context * context)371 elops_destroy_context1_uv(struct lws_context *context)
372 {
373 struct lws_context_per_thread *pt;
374 int n, m = 0;
375
376 for (n = 0; n < context->count_threads; n++) {
377 int budget = 10000;
378 pt = &context->pt[n];
379
380 /* only for internal loops... */
381
382 if (!pt->event_loop_foreign) {
383
384 while (budget-- && (m = uv_run(pt_to_priv_uv(pt)->io_loop,
385 UV_RUN_NOWAIT)))
386 ;
387 if (m)
388 lwsl_cx_info(context, "tsi %d: unclosed", n);
389
390 }
391 }
392
393 /* call destroy2 if internal loop */
394 return !context->pt[0].event_loop_foreign;
395 }
396
397 static int
elops_destroy_context2_uv(struct lws_context * context)398 elops_destroy_context2_uv(struct lws_context *context)
399 {
400 struct lws_context_per_thread *pt;
401 int n, internal = 0;
402
403 for (n = 0; n < context->count_threads; n++) {
404 pt = &context->pt[n];
405
406 /* only for internal loops... */
407
408 if (!pt->event_loop_foreign && pt_to_priv_uv(pt)->io_loop) {
409 internal = 1;
410 if (!context->evlib_finalize_destroy_after_int_loops_stop)
411 uv_stop(pt_to_priv_uv(pt)->io_loop);
412 else {
413 #if UV_VERSION_MAJOR > 0
414 uv_loop_close(pt_to_priv_uv(pt)->io_loop);
415 #endif
416 lws_free_set_NULL(pt_to_priv_uv(pt)->io_loop);
417 }
418 }
419 }
420
421 return internal;
422 }
423
424 static int
elops_wsi_logical_close_uv(struct lws * wsi)425 elops_wsi_logical_close_uv(struct lws *wsi)
426 {
427 if (!lws_socket_is_valid(wsi->desc.sockfd) &&
428 wsi->role_ops && strcmp(wsi->role_ops->name, "raw-file") &&
429 !wsi_to_priv_uv(wsi)->w_read.pwatcher)
430 return 0;
431
432 if (wsi->listener || wsi->event_pipe) {
433 lwsl_wsi_debug(wsi, "%d %d stop listener / pipe poll",
434 wsi->listener,
435 wsi->event_pipe);
436 if (wsi_to_priv_uv(wsi)->w_read.pwatcher)
437 uv_poll_stop(wsi_to_priv_uv(wsi)->w_read.pwatcher);
438 }
439 lwsl_wsi_debug(wsi, "lws_libuv_closehandle");
440 /*
441 * libuv has to do his own close handle processing asynchronously
442 */
443 lws_libuv_closehandle(wsi);
444
445 return 1; /* do not complete the wsi close, uv close cb will do it */
446 }
447
448 static int
elops_check_client_connect_ok_uv(struct lws * wsi)449 elops_check_client_connect_ok_uv(struct lws *wsi)
450 {
451 if (lws_libuv_check_watcher_active(wsi)) {
452 lwsl_wsi_warn(wsi, "Waiting for libuv watcher to close");
453 return 1;
454 }
455
456 return 0;
457 }
458
459 static void
lws_libuv_closewsi_m(uv_handle_t * handle)460 lws_libuv_closewsi_m(uv_handle_t* handle)
461 {
462 lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data;
463
464 lwsl_debug("%s: sockfd %d\n", __func__, sockfd);
465 compatible_close(sockfd);
466 lws_free(handle);
467 }
468
469 static void
elops_close_handle_manually_uv(struct lws * wsi)470 elops_close_handle_manually_uv(struct lws *wsi)
471 {
472 uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
473
474 lwsl_wsi_debug(wsi, "lws_libuv_closehandle");
475
476 /*
477 * the "manual" variant only closes the handle itself and the
478 * related fd. handle->data is the fd.
479 */
480 h->data = (void *)(lws_intptr_t)wsi->desc.sockfd;
481
482 /*
483 * We take responsibility to close / destroy these now.
484 * Remove any trace from the wsi.
485 */
486
487 wsi->desc.sockfd = LWS_SOCK_INVALID;
488 wsi_to_priv_uv(wsi)->w_read.pwatcher = NULL;
489 wsi->told_event_loop_closed = 1;
490
491 uv_close(h, lws_libuv_closewsi_m);
492 }
493
494 static int
elops_accept_uv(struct lws * wsi)495 elops_accept_uv(struct lws *wsi)
496 {
497 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
498 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
499 struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
500 int n;
501
502 if (!ptpriv->thread_valid) {
503 /* record the thread id that gave us our first event */
504 ptpriv->uv_thread = uv_thread_self();
505 ptpriv->thread_valid = 1;
506 }
507
508 w_read->context = wsi->a.context;
509
510 w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
511 if (!w_read->pwatcher)
512 return -1;
513
514 if (wsi->role_ops->file_handle)
515 n = uv_poll_init(pt_to_priv_uv(pt)->io_loop, w_read->pwatcher,
516 (int)(lws_intptr_t)wsi->desc.filefd);
517 else
518 n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
519 w_read->pwatcher, wsi->desc.sockfd);
520
521 if (n) {
522 lwsl_wsi_err(wsi, "uv_poll_init failed %d, sockfd=%p", n,
523 (void *)(lws_intptr_t)wsi->desc.sockfd);
524 lws_free(w_read->pwatcher);
525 w_read->pwatcher = NULL;
526 return -1;
527 }
528
529 ((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
530
531 ptpriv->extant_handles++;
532
533 lwsl_wsi_debug(wsi, "thr %d: sa left %d: dyn left: %d",
534 (int)(pt - &pt->context->pt[0]),
535 pt->count_event_loop_static_asset_handles,
536 ptpriv->extant_handles);
537
538 return 0;
539 }
540
541 static void
elops_io_uv(struct lws * wsi,unsigned int flags)542 elops_io_uv(struct lws *wsi, unsigned int flags)
543 {
544 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
545 struct lws_io_watcher_libuv *w = &(wsi_to_priv_uv(wsi)->w_read);
546 int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE);
547
548 lwsl_wsi_debug(wsi, "%d", flags);
549
550 /* w->context is set after the loop is initialized */
551
552 if (!pt_to_priv_uv(pt)->io_loop || !w->context) {
553 lwsl_wsi_info(wsi, "no io loop yet");
554 return;
555 }
556
557 if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
558 (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
559 lwsl_wsi_err(wsi, "assert: flags %d", flags);
560 assert(0);
561 }
562
563 if (!w->pwatcher || wsi->told_event_loop_closed) {
564 lwsl_wsi_info(wsi, "no watcher");
565
566 return;
567 }
568
569 if (flags & LWS_EV_START) {
570 if (flags & LWS_EV_WRITE)
571 current_events |= UV_WRITABLE;
572
573 if (flags & LWS_EV_READ)
574 current_events |= UV_READABLE;
575
576 uv_poll_start(w->pwatcher, current_events, lws_io_cb);
577 } else {
578 if (flags & LWS_EV_WRITE)
579 current_events &= ~UV_WRITABLE;
580
581 if (flags & LWS_EV_READ)
582 current_events &= ~UV_READABLE;
583
584 if (!(current_events & (UV_READABLE | UV_WRITABLE)))
585 uv_poll_stop(w->pwatcher);
586 else
587 uv_poll_start(w->pwatcher, current_events, lws_io_cb);
588 }
589
590 w->actual_events = (uint8_t)current_events;
591 }
592
593 static int
elops_init_vhost_listen_wsi_uv(struct lws * wsi)594 elops_init_vhost_listen_wsi_uv(struct lws *wsi)
595 {
596 struct lws_context_per_thread *pt;
597 struct lws_pt_eventlibs_libuv *ptpriv;
598 struct lws_io_watcher_libuv *w_read;
599 int n;
600
601 if (!wsi)
602 return 0;
603
604 w_read = &wsi_to_priv_uv(wsi)->w_read;
605
606 if (w_read->context)
607 return 0;
608
609 pt = &wsi->a.context->pt[(int)wsi->tsi];
610 ptpriv = pt_to_priv_uv(pt);
611 if (!ptpriv->io_loop)
612 return 0;
613
614 w_read->context = wsi->a.context;
615
616 w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
617 if (!w_read->pwatcher)
618 return -1;
619
620 n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
621 w_read->pwatcher, wsi->desc.sockfd);
622 if (n) {
623 lwsl_wsi_err(wsi, "uv_poll_init failed %d, sockfd=%p", n,
624 (void *)(lws_intptr_t)wsi->desc.sockfd);
625
626 return -1;
627 }
628
629 ptpriv->extant_handles++;
630
631 lwsl_wsi_debug(wsi, "thr %d: sa left %d: dyn left: %d",
632 (int)(pt - &pt->context->pt[0]),
633 pt->count_event_loop_static_asset_handles,
634 ptpriv->extant_handles);
635
636 ((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
637
638 elops_io_uv(wsi, LWS_EV_START | LWS_EV_READ);
639
640 return 0;
641 }
642
643 static void
elops_run_pt_uv(struct lws_context * context,int tsi)644 elops_run_pt_uv(struct lws_context *context, int tsi)
645 {
646 if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
647 uv_run(pt_to_priv_uv(&context->pt[tsi])->io_loop, 0);
648 }
649
650 static void
elops_destroy_pt_uv(struct lws_context * context,int tsi)651 elops_destroy_pt_uv(struct lws_context *context, int tsi)
652 {
653 struct lws_context_per_thread *pt = &context->pt[tsi];
654 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
655 int m, ns;
656
657 if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
658 return;
659
660 if (!ptpriv->io_loop)
661 return;
662
663 if (pt->event_loop_destroy_processing_done) {
664 if (!pt->event_loop_foreign) {
665 lwsl_warn("%s: stopping event loop\n", __func__);
666 uv_stop(pt_to_priv_uv(pt)->io_loop);
667 }
668 return;
669 }
670
671 pt->event_loop_destroy_processing_done = 1;
672 // lwsl_cx_debug(context, "%d", tsi);
673
674 if (!pt->event_loop_foreign) {
675
676 uv_signal_stop(&pt_to_priv_uv(pt)->w_sigint.watcher);
677
678 ns = LWS_ARRAY_SIZE(sigs);
679 if (lws_check_opt(context->options,
680 LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
681 ns = 2;
682
683 for (m = 0; m < ns; m++) {
684 uv_signal_stop(&pt_to_priv_uv(pt)->signals[m]);
685 uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->signals[m],
686 lws_uv_close_cb_sa);
687 }
688 } else
689 lwsl_cx_debug(context, "not closing pt signals");
690
691 uv_timer_stop(&pt_to_priv_uv(pt)->sultimer);
692 uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->sultimer, lws_uv_close_cb_sa);
693
694 uv_idle_stop(&pt_to_priv_uv(pt)->idle);
695 uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->idle, lws_uv_close_cb_sa);
696 }
697
698 static int
elops_listen_init_uv(struct lws_dll2 * d,void * user)699 elops_listen_init_uv(struct lws_dll2 *d, void *user)
700 {
701 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
702
703 if (elops_init_vhost_listen_wsi_uv(wsi) == -1)
704 return -1;
705
706 return 0;
707 }
708
709 /*
710 * This needs to be called after vhosts have been defined.
711 *
712 * If later, after server start, another vhost is added, this must be
713 * called again to bind the vhost
714 */
715
716 int
elops_init_pt_uv(struct lws_context * context,void * _loop,int tsi)717 elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
718 {
719 struct lws_context_per_thread *pt = &context->pt[tsi];
720 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
721 int status = 0, n, ns, first = 1;
722 uv_loop_t *loop = (uv_loop_t *)_loop;
723
724 ptpriv->pt = pt;
725
726 if (!ptpriv->io_loop) {
727 if (!loop) {
728 loop = lws_malloc(sizeof(*loop), "libuv loop");
729 if (!loop) {
730 lwsl_cx_err(context, "OOM");
731 return -1;
732 }
733 #if UV_VERSION_MAJOR > 0
734 uv_loop_init(loop);
735 #else
736 lwsl_cx_err(context, "This libuv is too old to work...");
737 return 1;
738 #endif
739 pt->event_loop_foreign = 0;
740 } else {
741 lwsl_cx_notice(context, " Using foreign event loop...");
742 pt->event_loop_foreign = 1;
743 }
744
745 ptpriv->io_loop = loop;
746 uv_idle_init(loop, &ptpriv->idle);
747 LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, pt);
748 uv_idle_start(&ptpriv->idle, lws_uv_idle);
749
750 ns = LWS_ARRAY_SIZE(sigs);
751 if (lws_check_opt(context->options,
752 LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
753 ns = 2;
754
755 if (!pt->event_loop_foreign) {
756 assert(ns <= (int)LWS_ARRAY_SIZE(ptpriv->signals));
757 for (n = 0; n < ns; n++) {
758 uv_signal_init(loop, &ptpriv->signals[n]);
759 LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(
760 &ptpriv->signals[n], pt);
761 ptpriv->signals[n].data = pt;
762 uv_signal_start(&ptpriv->signals[n],
763 lws_uv_signal_handler, sigs[n]);
764 }
765 }
766 } else
767 first = 0;
768
769 /*
770 * Initialize the accept wsi read watcher with all the listening sockets
771 * and register a callback for read operations
772 *
773 * We have to do it here because the uv loop(s) are not
774 * initialized until after context creation.
775 */
776 lws_vhost_foreach_listen_wsi(context, context, elops_listen_init_uv);
777
778 if (!first)
779 return status;
780
781 uv_timer_init(ptpriv->io_loop, &ptpriv->sultimer);
782 LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->sultimer, pt);
783
784 return status;
785 }
786
787 static void
lws_libuv_closewsi(uv_handle_t * handle)788 lws_libuv_closewsi(uv_handle_t* handle)
789 {
790 struct lws *wsi = (struct lws *)handle->data;
791 struct lws_context *context = lws_get_context(wsi);
792 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
793 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
794 #if defined(LWS_WITH_SERVER)
795 int lspd = 0;
796 #endif
797
798 // lwsl_wsi_notice(wsi, "in");
799
800 lws_context_lock(context, __func__);
801
802 /*
803 * We get called back here for every wsi that closes
804 */
805
806 #if defined(LWS_WITH_SERVER)
807 if (wsi->role_ops && !strcmp(wsi->role_ops->name, "listen") &&
808 wsi->a.context->deprecated) {
809 lspd = 1;
810 context->deprecation_pending_listen_close_count--;
811 if (!context->deprecation_pending_listen_close_count)
812 lspd = 2;
813 }
814 #endif
815
816 lws_pt_lock(pt, __func__);
817
818 lwsl_wsi_info(wsi, "thr %d: sa left %d: dyn left: %d (rk %d)",
819 (int)(pt - &pt->context->pt[0]),
820 pt->count_event_loop_static_asset_handles,
821 ptpriv->extant_handles - 1,
822 context->requested_stop_internal_loops);
823
824 __lws_close_free_wsi_final(wsi);
825 assert(ptpriv->extant_handles);
826 ptpriv->extant_handles--;
827 lws_pt_unlock(pt);
828
829 /* it's our job to close the handle finally */
830 lws_free(handle);
831
832 #if defined(LWS_WITH_SERVER)
833 if (lspd == 2 && context->deprecation_cb) {
834 lwsl_cx_notice(context, "calling deprecation callback");
835 context->deprecation_cb();
836 }
837 #endif
838
839 /*
840 * eventually, we closed all the wsi...
841 */
842
843 if (context->requested_stop_internal_loops &&
844 !ptpriv->extant_handles &&
845 !pt->count_event_loop_static_asset_handles) {
846
847 /*
848 * we closed everything on this pt
849 */
850
851 lws_context_unlock(context);
852 lws_uv_finalize_pt(pt);
853
854 return;
855 }
856
857 lws_context_unlock(context);
858 }
859
860 void
lws_libuv_closehandle(struct lws * wsi)861 lws_libuv_closehandle(struct lws *wsi)
862 {
863 uv_handle_t* handle;
864 struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
865
866 if (!w_read->pwatcher)
867 return;
868
869 if (wsi->told_event_loop_closed)
870 return;
871
872 // lwsl_wsi_debug(wsi, "in");
873
874 wsi->told_event_loop_closed = 1;
875
876 /*
877 * The normal close path attaches the related wsi as the
878 * handle->data.
879 */
880
881 handle = (uv_handle_t *)w_read->pwatcher;
882
883 /* ensure we can only do this once */
884
885 w_read->pwatcher = NULL;
886
887 uv_close(handle, lws_libuv_closewsi);
888 }
889
890 static int
elops_foreign_thread_uv(struct lws_context * cx,int tsi)891 elops_foreign_thread_uv(struct lws_context *cx, int tsi)
892 {
893 struct lws_context_per_thread *pt = &cx->pt[tsi];
894 struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
895 uv_thread_t th = uv_thread_self();
896
897 if (!ptpriv->thread_valid)
898 /*
899 * We can't judge it until we get the first event from the loop
900 */
901 return 0;
902
903 /*
904 * This is the same thread that gave us the first event on this loop?
905 * Return 0 if so.
906 */
907
908 return !uv_thread_equal(&th, &ptpriv->uv_thread);
909 }
910
911 static const struct lws_event_loop_ops event_loop_ops_uv = {
912 /* name */ "libuv",
913 /* init_context */ elops_init_context_uv,
914 /* destroy_context1 */ elops_destroy_context1_uv,
915 /* destroy_context2 */ elops_destroy_context2_uv,
916 /* init_vhost_listen_wsi */ elops_init_vhost_listen_wsi_uv,
917 /* init_pt */ elops_init_pt_uv,
918 /* wsi_logical_close */ elops_wsi_logical_close_uv,
919 /* check_client_connect_ok */ elops_check_client_connect_ok_uv,
920 /* close_handle_manually */ elops_close_handle_manually_uv,
921 /* accept */ elops_accept_uv,
922 /* io */ elops_io_uv,
923 /* run_pt */ elops_run_pt_uv,
924 /* destroy_pt */ elops_destroy_pt_uv,
925 /* destroy wsi */ NULL,
926 /* foreign_thread */ elops_foreign_thread_uv,
927
928 /* flags */ 0,
929
930 /* evlib_size_ctx */ sizeof(struct lws_context_eventlibs_libuv),
931 /* evlib_size_pt */ sizeof(struct lws_pt_eventlibs_libuv),
932 /* evlib_size_vh */ 0,
933 /* evlib_size_wsi */ sizeof(struct lws_io_watcher_libuv),
934 };
935
936 #if defined(LWS_WITH_EVLIB_PLUGINS)
937 LWS_VISIBLE
938 #endif
939 const lws_plugin_evlib_t evlib_uv = {
940 .hdr = {
941 "libuv event loop",
942 "lws_evlib_plugin",
943 LWS_BUILD_HASH,
944 LWS_PLUGIN_API_MAGIC
945 },
946
947 .ops = &event_loop_ops_uv
948 };
949
950