• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 #include "private-lib-event-libs-libuv.h"
27 
28 #define pt_to_priv_uv(_pt) ((struct lws_pt_eventlibs_libuv *)(_pt)->evlib_pt)
29 #define wsi_to_priv_uv(_w) ((struct lws_wsi_eventlibs_libuv *)(_w)->evlib_wsi)
30 
31 static void
lws_uv_sultimer_cb(uv_timer_t * timer,int status)32 lws_uv_sultimer_cb(uv_timer_t *timer
33 #if UV_VERSION_MAJOR == 0
34 		, int status
35 #endif
36 )
37 {
38 	struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(timer,
39 				struct lws_pt_eventlibs_libuv, sultimer);
40 	struct lws_context_per_thread *pt = ptpr->pt;
41 	lws_usec_t us;
42 
43 	lws_context_lock(pt->context, __func__);
44 	lws_pt_lock(pt, __func__);
45 	us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
46 				    lws_now_usecs());
47 	if (us)
48 		uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
49 			       LWS_US_TO_MS((uint64_t)us), 0);
50 	lws_pt_unlock(pt);
51 	lws_context_unlock(pt->context);
52 }
53 
54 static void
lws_uv_idle(uv_idle_t * handle,int status)55 lws_uv_idle(uv_idle_t *handle
56 #if UV_VERSION_MAJOR == 0
57 		, int status
58 #endif
59 )
60 {	struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(handle,
61 		struct lws_pt_eventlibs_libuv, idle);
62 	struct lws_context_per_thread *pt = ptpr->pt;
63 	lws_usec_t us;
64 
65 	lws_service_do_ripe_rxflow(pt);
66 
67 	lws_context_lock(pt->context, __func__);
68 	lws_pt_lock(pt, __func__);
69 
70 	/*
71 	 * is there anybody with pending stuff that needs service forcing?
72 	 */
73 	if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
74 		/* -1 timeout means just do forced service */
75 		_lws_plat_service_forced_tsi(pt->context, pt->tid);
76 
77 	/* account for sultimer */
78 
79 	us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
80 				    lws_now_usecs());
81 	if (us)
82 		uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
83 			       LWS_US_TO_MS((uint64_t)us), 0);
84 
85 	/* there is nobody who needs service forcing, shut down idle */
86 	uv_idle_stop(handle);
87 
88 	lws_pt_unlock(pt);
89 	lws_context_unlock(pt->context);
90 }
91 
92 static void
lws_io_cb(uv_poll_t * watcher,int status,int revents)93 lws_io_cb(uv_poll_t *watcher, int status, int revents)
94 {
95 	struct lws *wsi = (struct lws *)((uv_handle_t *)watcher)->data;
96 	struct lws_context *context = wsi->a.context;
97 	struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
98 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
99 	struct lws_pollfd eventfd;
100 
101 	lws_context_lock(pt->context, __func__);
102 	lws_pt_lock(pt, __func__);
103 
104 	if (pt->is_destroyed)
105 		goto bail;
106 
107 	if (!ptpriv->thread_valid) {
108 		/* record the thread id that gave us our first event */
109 		ptpriv->uv_thread = uv_thread_self();
110 		ptpriv->thread_valid = 1;
111 	}
112 
113 #if defined(WIN32) || defined(_WIN32)
114 	eventfd.fd = watcher->socket;
115 #else
116 	eventfd.fd = watcher->io_watcher.fd;
117 #endif
118 	eventfd.events = 0;
119 	eventfd.revents = 0;
120 
121 	if (status < 0) {
122 		/*
123 		 * At this point status will be an UV error, like UV_EBADF,
124 		 * we treat all errors as LWS_POLLHUP
125 		 *
126 		 * You might want to return; instead of servicing the fd in
127 		 * some cases */
128 		if (status == UV_EAGAIN)
129 			goto bail;
130 
131 		eventfd.events |= LWS_POLLHUP;
132 		eventfd.revents |= LWS_POLLHUP;
133 	} else {
134 		if (revents & UV_READABLE) {
135 			eventfd.events |= LWS_POLLIN;
136 			eventfd.revents |= LWS_POLLIN;
137 		}
138 		if (revents & UV_WRITABLE) {
139 			eventfd.events |= LWS_POLLOUT;
140 			eventfd.revents |= LWS_POLLOUT;
141 		}
142 	}
143 
144 	lws_pt_unlock(pt);
145 	lws_context_unlock(pt->context);
146 
147 	lws_service_fd_tsi(context, &eventfd, wsi->tsi);
148 
149 	if (pt->destroy_self) {
150 		lws_context_destroy(pt->context);
151 		return;
152 	}
153 
154 	uv_idle_start(&ptpriv->idle, lws_uv_idle);
155 	return;
156 
157 bail:
158 	lws_pt_unlock(pt);
159 	lws_context_unlock(pt->context);
160 }
161 
162 /*
163  * This does not actually stop the event loop.  The reason is we have to pass
164  * libuv handle closures through its event loop.  So this tries to close all
165  * wsi, and set a flag; when all the wsi closures are finalized then we
166  * actually stop the libuv event loops.
167  */
168 static void
lws_libuv_stop(struct lws_context * context)169 lws_libuv_stop(struct lws_context *context)
170 {
171 	if (context->requested_stop_internal_loops) {
172 		lwsl_cx_err(context, "ignoring");
173 		return;
174 	}
175 
176 	context->requested_stop_internal_loops = 1;
177 	lws_context_destroy(context);
178 }
179 
180 static void
lws_uv_signal_handler(uv_signal_t * watcher,int signum)181 lws_uv_signal_handler(uv_signal_t *watcher, int signum)
182 {
183 	struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
184 							watcher->data;
185 
186 	if (pt->context->eventlib_signal_cb) {
187 		pt->context->eventlib_signal_cb((void *)watcher, signum);
188 
189 		return;
190 	}
191 
192 	lwsl_cx_err(pt->context, "internal signal handler caught signal %d",
193 				 signum);
194 	lws_libuv_stop(pt->context);
195 }
196 
197 static int
lws_uv_finalize_pt(struct lws_context_per_thread * pt)198 lws_uv_finalize_pt(struct lws_context_per_thread *pt)
199 {
200 	pt->event_loop_pt_unused = 1;
201 
202 	lwsl_cx_info(pt->context, "thr %d", (int)(pt - pt->context->pt));
203 
204 	lws_context_lock(pt->context, __func__);
205 
206 	if (!--pt->context->undestroyed_threads) {
207 		struct lws_vhost *vh = pt->context->vhost_list;
208 
209 		/*
210 		 * eventually, we emptied all the pts...
211 		 */
212 
213 		lwsl_cx_debug(pt->context, "all pts down now");
214 
215 		/* protocols may have initialized libuv objects */
216 
217 		while (vh) {
218 			lws_vhost_destroy1(vh);
219 			vh = vh->vhost_next;
220 		}
221 
222 		if (!pt->count_event_loop_static_asset_handles &&
223 		    pt->event_loop_foreign) {
224 			lwsl_cx_info(pt->context, "resuming context_destroy");
225 			lws_context_unlock(pt->context);
226 			lws_context_destroy(pt->context);
227 			/*
228 			 * For foreign, we're being called from the foreign
229 			 * thread context the loop is associated with, we must
230 			 * return to it cleanly even though we are done with it.
231 			 */
232 			return 1;
233 		}
234 	} else
235 		lwsl_cx_debug(pt->context, "still %d undestroyed",
236 					   pt->context->undestroyed_threads);
237 
238 	lws_context_unlock(pt->context);
239 
240 	return 0;
241 }
242 
243 // static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
244 // {
245 //      if (!uv_is_closing(handle))
246 //	      lwsl_err("%s: handle %p still alive on loop\n", __func__, handle);
247 // }
248 
249 
250 static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
251 
252 /*
253  * Closing Phase 2: Close callback for a static UV asset
254  */
255 
256 static void
lws_uv_close_cb_sa(uv_handle_t * handle)257 lws_uv_close_cb_sa(uv_handle_t *handle)
258 {
259 	struct lws_context_per_thread *pt =
260 			LWS_UV_REFCOUNT_STATIC_HANDLE_TO_PT(handle);
261 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
262 	struct lws_context *context = pt->context;
263 #if !defined(LWS_WITH_NO_LOGS) && defined(_DEBUG)
264 	int tsi = (int)(pt - &context->pt[0]);
265 #endif
266 
267 	lwsl_cx_info(context, "thr %d: sa left %d: dyn left: %d (rk %d)",
268 			      tsi,
269 			      pt->count_event_loop_static_asset_handles - 1,
270 			      ptpriv->extant_handles,
271 			      context->requested_stop_internal_loops);
272 
273 	/* any static assets left? */
274 
275 	if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) ||
276 	    ptpriv->extant_handles)
277 		return;
278 
279 	/*
280 	 * So we believe nothing of ours left on the loop.  Let's sanity
281 	 * check it to count what's still on the loop
282 	 */
283 
284 	// uv_walk(pt_to_priv_uv(pt)->io_loop, lws_uv_walk_cb, NULL);
285 
286 	/*
287 	 * That's it... all wsi were down, and now every
288 	 * static asset lws had a UV handle for is down.
289 	 *
290 	 * Stop the loop so we can get out of here.
291 	 */
292 
293 	lwsl_cx_info(context, "thr %d: seen final static handle gone", tsi);
294 
295 	if (!pt->event_loop_foreign)
296 		lws_context_destroy(context);
297 
298 	lws_uv_finalize_pt(pt);
299 
300 	lwsl_cx_info(context, "all done");
301 }
302 
303 /*
304  * These must be called by protocols that want to use libuv objects directly...
305  *
306  * .... when the libuv object is created...
307  */
308 
309 void
lws_libuv_static_refcount_add(uv_handle_t * h,struct lws_context * context,int tsi)310 lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context,
311 				int tsi)
312 {
313 	struct lws_context_per_thread *pt = &context->pt[tsi];
314 
315 	LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, pt);
316 }
317 
318 /*
319  * ... and in the close callback when the object is closed.
320  */
321 
322 void
lws_libuv_static_refcount_del(uv_handle_t * h)323 lws_libuv_static_refcount_del(uv_handle_t *h)
324 {
325 	lws_uv_close_cb_sa(h);
326 }
327 
328 void
lws_libuv_stop_without_kill(const struct lws_context * context,int tsi)329 lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
330 {
331 	if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
332 		uv_stop(pt_to_priv_uv(&context->pt[tsi])->io_loop);
333 }
334 
335 uv_loop_t *
lws_uv_getloop(struct lws_context * context,int tsi)336 lws_uv_getloop(struct lws_context *context, int tsi)
337 {
338 	if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
339 		return pt_to_priv_uv(&context->pt[tsi])->io_loop;
340 
341 	return NULL;
342 }
343 
344 int
lws_libuv_check_watcher_active(struct lws * wsi)345 lws_libuv_check_watcher_active(struct lws *wsi)
346 {
347 	uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
348 
349 	if (!h)
350 		return 0;
351 
352 	return uv_is_active(h);
353 }
354 
355 static int
elops_init_context_uv(struct lws_context * context,const struct lws_context_creation_info * info)356 elops_init_context_uv(struct lws_context *context,
357 		      const struct lws_context_creation_info *info)
358 {
359 	int n;
360 
361 	context->eventlib_signal_cb = info->signal_cb;
362 
363 	for (n = 0; n < context->count_threads; n++)
364 		pt_to_priv_uv(&context->pt[n])->w_sigint.context = context;
365 
366 	return 0;
367 }
368 
369 static int
elops_destroy_context1_uv(struct lws_context * context)370 elops_destroy_context1_uv(struct lws_context *context)
371 {
372 	struct lws_context_per_thread *pt;
373 	int n, m = 0;
374 
375 	for (n = 0; n < context->count_threads; n++) {
376 		int budget = 10000;
377 		pt = &context->pt[n];
378 
379 		/* only for internal loops... */
380 
381 		if (!pt->event_loop_foreign) {
382 
383 			while (budget-- && (m = uv_run(pt_to_priv_uv(pt)->io_loop,
384 						  UV_RUN_NOWAIT)))
385 					;
386 			if (m)
387 				lwsl_cx_info(context, "tsi %d: unclosed", n);
388 
389 		}
390 	}
391 
392 	/* call destroy2 if internal loop */
393 	return !context->pt[0].event_loop_foreign;
394 }
395 
396 static int
elops_destroy_context2_uv(struct lws_context * context)397 elops_destroy_context2_uv(struct lws_context *context)
398 {
399 	struct lws_context_per_thread *pt;
400 	int n, internal = 0;
401 
402 	for (n = 0; n < context->count_threads; n++) {
403 		pt = &context->pt[n];
404 
405 		/* only for internal loops... */
406 
407 		if (!pt->event_loop_foreign && pt_to_priv_uv(pt)->io_loop) {
408 			internal = 1;
409 			if (!context->evlib_finalize_destroy_after_int_loops_stop)
410 				uv_stop(pt_to_priv_uv(pt)->io_loop);
411 			else {
412 #if UV_VERSION_MAJOR > 0
413 				uv_loop_close(pt_to_priv_uv(pt)->io_loop);
414 #endif
415 				lws_free_set_NULL(pt_to_priv_uv(pt)->io_loop);
416 			}
417 		}
418 	}
419 
420 	return internal;
421 }
422 
423 static int
elops_wsi_logical_close_uv(struct lws * wsi)424 elops_wsi_logical_close_uv(struct lws *wsi)
425 {
426 	if (!lws_socket_is_valid(wsi->desc.sockfd) &&
427 	    wsi->role_ops && strcmp(wsi->role_ops->name, "raw-file") &&
428 	    !wsi_to_priv_uv(wsi)->w_read.pwatcher)
429 		return 0;
430 
431 	if (wsi->listener || wsi->event_pipe) {
432 		lwsl_wsi_debug(wsi, "%d %d stop listener / pipe poll",
433 				    wsi->listener,
434 				    wsi->event_pipe);
435 		if (wsi_to_priv_uv(wsi)->w_read.pwatcher)
436 			uv_poll_stop(wsi_to_priv_uv(wsi)->w_read.pwatcher);
437 	}
438 	lwsl_wsi_debug(wsi, "lws_libuv_closehandle");
439 	/*
440 	 * libuv has to do his own close handle processing asynchronously
441 	 */
442 	lws_libuv_closehandle(wsi);
443 
444 	return 1; /* do not complete the wsi close, uv close cb will do it */
445 }
446 
447 static int
elops_check_client_connect_ok_uv(struct lws * wsi)448 elops_check_client_connect_ok_uv(struct lws *wsi)
449 {
450 	if (lws_libuv_check_watcher_active(wsi)) {
451 		lwsl_wsi_warn(wsi, "Waiting for libuv watcher to close");
452 		return 1;
453 	}
454 
455 	return 0;
456 }
457 
458 static void
lws_libuv_closewsi_m(uv_handle_t * handle)459 lws_libuv_closewsi_m(uv_handle_t* handle)
460 {
461 	lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data;
462 
463 	lwsl_debug("%s: sockfd %d\n", __func__, sockfd);
464 	compatible_close(sockfd);
465 	lws_free(handle);
466 }
467 
468 static void
elops_close_handle_manually_uv(struct lws * wsi)469 elops_close_handle_manually_uv(struct lws *wsi)
470 {
471 	uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
472 
473 	lwsl_wsi_debug(wsi, "lws_libuv_closehandle");
474 
475 	/*
476 	 * the "manual" variant only closes the handle itself and the
477 	 * related fd.  handle->data is the fd.
478 	 */
479 	h->data = (void *)(lws_intptr_t)wsi->desc.sockfd;
480 
481 	/*
482 	 * We take responsibility to close / destroy these now.
483 	 * Remove any trace from the wsi.
484 	 */
485 
486 	wsi->desc.sockfd = LWS_SOCK_INVALID;
487 	wsi_to_priv_uv(wsi)->w_read.pwatcher = NULL;
488 	wsi->told_event_loop_closed = 1;
489 
490 	uv_close(h, lws_libuv_closewsi_m);
491 }
492 
493 static int
elops_accept_uv(struct lws * wsi)494 elops_accept_uv(struct lws *wsi)
495 {
496 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
497 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
498 	struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
499 	int n;
500 
501 	if (!ptpriv->thread_valid) {
502 		/* record the thread id that gave us our first event */
503 		ptpriv->uv_thread = uv_thread_self();
504 		ptpriv->thread_valid = 1;
505 	}
506 
507 	w_read->context = wsi->a.context;
508 
509 	w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
510 	if (!w_read->pwatcher)
511 		return -1;
512 
513 	if (wsi->role_ops->file_handle)
514 		n = uv_poll_init(pt_to_priv_uv(pt)->io_loop, w_read->pwatcher,
515 			     (int)(lws_intptr_t)wsi->desc.filefd);
516 	else
517 		n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
518 				    w_read->pwatcher, wsi->desc.sockfd);
519 
520 	if (n) {
521 		lwsl_wsi_err(wsi, "uv_poll_init failed %d, sockfd=%p", n,
522 				  (void *)(lws_intptr_t)wsi->desc.sockfd);
523 		lws_free(w_read->pwatcher);
524 		w_read->pwatcher = NULL;
525 		return -1;
526 	}
527 
528 	((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
529 
530 	ptpriv->extant_handles++;
531 
532 	lwsl_wsi_debug(wsi, "thr %d: sa left %d: dyn left: %d",
533 			    (int)(pt - &pt->context->pt[0]),
534 			    pt->count_event_loop_static_asset_handles,
535 			    ptpriv->extant_handles);
536 
537 	return 0;
538 }
539 
540 static void
elops_io_uv(struct lws * wsi,unsigned int flags)541 elops_io_uv(struct lws *wsi, unsigned int flags)
542 {
543 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
544 	struct lws_io_watcher_libuv *w = &(wsi_to_priv_uv(wsi)->w_read);
545 	int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE);
546 
547 	lwsl_wsi_debug(wsi, "%d", flags);
548 
549 	/* w->context is set after the loop is initialized */
550 
551 	if (!pt_to_priv_uv(pt)->io_loop || !w->context) {
552 		lwsl_wsi_info(wsi, "no io loop yet");
553 		return;
554 	}
555 
556 	if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
557 	      (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
558 		lwsl_wsi_err(wsi, "assert: flags %d", flags);
559 		assert(0);
560 	}
561 
562 	if (!w->pwatcher || wsi->told_event_loop_closed) {
563 		lwsl_wsi_info(wsi, "no watcher");
564 
565 		return;
566 	}
567 
568 	if (flags & LWS_EV_START) {
569 		if (flags & LWS_EV_WRITE)
570 			current_events |= UV_WRITABLE;
571 
572 		if (flags & LWS_EV_READ)
573 			current_events |= UV_READABLE;
574 
575 		uv_poll_start(w->pwatcher, current_events, lws_io_cb);
576 	} else {
577 		if (flags & LWS_EV_WRITE)
578 			current_events &= ~UV_WRITABLE;
579 
580 		if (flags & LWS_EV_READ)
581 			current_events &= ~UV_READABLE;
582 
583 		if (!(current_events & (UV_READABLE | UV_WRITABLE)))
584 			uv_poll_stop(w->pwatcher);
585 		else
586 			uv_poll_start(w->pwatcher, current_events, lws_io_cb);
587 	}
588 
589 	w->actual_events = (uint8_t)current_events;
590 }
591 
592 static int
elops_init_vhost_listen_wsi_uv(struct lws * wsi)593 elops_init_vhost_listen_wsi_uv(struct lws *wsi)
594 {
595 	struct lws_context_per_thread *pt;
596 	struct lws_pt_eventlibs_libuv *ptpriv;
597 	struct lws_io_watcher_libuv *w_read;
598 	int n;
599 
600 	if (!wsi)
601 		return 0;
602 
603 	w_read = &wsi_to_priv_uv(wsi)->w_read;
604 
605 	if (w_read->context)
606 		return 0;
607 
608 	pt = &wsi->a.context->pt[(int)wsi->tsi];
609 	ptpriv = pt_to_priv_uv(pt);
610 	if (!ptpriv->io_loop)
611 		return 0;
612 
613 	w_read->context = wsi->a.context;
614 
615 	w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
616 	if (!w_read->pwatcher)
617 		return -1;
618 
619 	n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
620 				w_read->pwatcher, wsi->desc.sockfd);
621 	if (n) {
622 		lwsl_wsi_err(wsi, "uv_poll_init failed %d, sockfd=%p", n,
623 				  (void *)(lws_intptr_t)wsi->desc.sockfd);
624 
625 		return -1;
626 	}
627 
628 	ptpriv->extant_handles++;
629 
630 	lwsl_wsi_debug(wsi, "thr %d: sa left %d: dyn left: %d",
631 			    (int)(pt - &pt->context->pt[0]),
632 			    pt->count_event_loop_static_asset_handles,
633 			    ptpriv->extant_handles);
634 
635 	((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
636 
637 	elops_io_uv(wsi, LWS_EV_START | LWS_EV_READ);
638 
639 	return 0;
640 }
641 
642 static void
elops_run_pt_uv(struct lws_context * context,int tsi)643 elops_run_pt_uv(struct lws_context *context, int tsi)
644 {
645 	if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
646 		uv_run(pt_to_priv_uv(&context->pt[tsi])->io_loop, 0);
647 }
648 
649 static void
elops_destroy_pt_uv(struct lws_context * context,int tsi)650 elops_destroy_pt_uv(struct lws_context *context, int tsi)
651 {
652 	struct lws_context_per_thread *pt = &context->pt[tsi];
653 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
654 	int m, ns;
655 
656 	if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
657 		return;
658 
659 	if (!ptpriv->io_loop)
660 		return;
661 
662 	if (pt->event_loop_destroy_processing_done) {
663 		if (!pt->event_loop_foreign) {
664 			lwsl_warn("%s: stopping event loop\n", __func__);
665 			uv_stop(pt_to_priv_uv(pt)->io_loop);
666 		}
667 		return;
668 	}
669 
670 	pt->event_loop_destroy_processing_done = 1;
671 	// lwsl_cx_debug(context, "%d", tsi);
672 
673 	if (!pt->event_loop_foreign) {
674 
675 		uv_signal_stop(&pt_to_priv_uv(pt)->w_sigint.watcher);
676 
677 		ns = LWS_ARRAY_SIZE(sigs);
678 		if (lws_check_opt(context->options,
679 				  LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
680 			ns = 2;
681 
682 		for (m = 0; m < ns; m++) {
683 			uv_signal_stop(&pt_to_priv_uv(pt)->signals[m]);
684 			uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->signals[m],
685 				 lws_uv_close_cb_sa);
686 		}
687 	} else
688 		lwsl_cx_debug(context, "not closing pt signals");
689 
690 	uv_timer_stop(&pt_to_priv_uv(pt)->sultimer);
691 	uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->sultimer, lws_uv_close_cb_sa);
692 
693 	uv_idle_stop(&pt_to_priv_uv(pt)->idle);
694 	uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->idle, lws_uv_close_cb_sa);
695 }
696 
697 static int
elops_listen_init_uv(struct lws_dll2 * d,void * user)698 elops_listen_init_uv(struct lws_dll2 *d, void *user)
699 {
700 	struct lws *wsi = lws_container_of(d, struct lws, listen_list);
701 
702 	if (elops_init_vhost_listen_wsi_uv(wsi) == -1)
703 		return -1;
704 
705 	return 0;
706 }
707 
708 /*
709  * This needs to be called after vhosts have been defined.
710  *
711  * If later, after server start, another vhost is added, this must be
712  * called again to bind the vhost
713  */
714 
715 int
elops_init_pt_uv(struct lws_context * context,void * _loop,int tsi)716 elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
717 {
718 	struct lws_context_per_thread *pt = &context->pt[tsi];
719 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
720 	int status = 0, n, ns, first = 1;
721 	uv_loop_t *loop = (uv_loop_t *)_loop;
722 
723 	ptpriv->pt = pt;
724 
725 	if (!ptpriv->io_loop) {
726 		if (!loop) {
727 			loop = lws_malloc(sizeof(*loop), "libuv loop");
728 			if (!loop) {
729 				lwsl_cx_err(context, "OOM");
730 				return -1;
731 			}
732 #if UV_VERSION_MAJOR > 0
733 			uv_loop_init(loop);
734 #else
735 			lwsl_cx_err(context, "This libuv is too old to work...");
736 			return 1;
737 #endif
738 			pt->event_loop_foreign = 0;
739 		} else {
740 			lwsl_cx_notice(context, " Using foreign event loop...");
741 			pt->event_loop_foreign = 1;
742 		}
743 
744 		ptpriv->io_loop = loop;
745 		uv_idle_init(loop, &ptpriv->idle);
746 		LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, pt);
747 		uv_idle_start(&ptpriv->idle, lws_uv_idle);
748 
749 		ns = LWS_ARRAY_SIZE(sigs);
750 		if (lws_check_opt(context->options,
751 				  LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
752 			ns = 2;
753 
754 		if (!pt->event_loop_foreign) {
755 			assert(ns <= (int)LWS_ARRAY_SIZE(ptpriv->signals));
756 			for (n = 0; n < ns; n++) {
757 				uv_signal_init(loop, &ptpriv->signals[n]);
758 				LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(
759 						&ptpriv->signals[n], pt);
760 				ptpriv->signals[n].data = pt;
761 				uv_signal_start(&ptpriv->signals[n],
762 						lws_uv_signal_handler, sigs[n]);
763 			}
764 		}
765 	} else
766 		first = 0;
767 
768 	/*
769 	 * Initialize the accept wsi read watcher with all the listening sockets
770 	 * and register a callback for read operations
771 	 *
772 	 * We have to do it here because the uv loop(s) are not
773 	 * initialized until after context creation.
774 	 */
775 	lws_vhost_foreach_listen_wsi(context, context, elops_listen_init_uv);
776 
777 	if (!first)
778 		return status;
779 
780 	uv_timer_init(ptpriv->io_loop, &ptpriv->sultimer);
781 	LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->sultimer, pt);
782 
783 	return status;
784 }
785 
786 static void
lws_libuv_closewsi(uv_handle_t * handle)787 lws_libuv_closewsi(uv_handle_t* handle)
788 {
789 	struct lws *wsi = (struct lws *)handle->data;
790 	struct lws_context *context = lws_get_context(wsi);
791 	struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
792 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
793 #if defined(LWS_WITH_SERVER)
794 	int lspd = 0;
795 #endif
796 
797 	// lwsl_wsi_notice(wsi, "in");
798 
799 	lws_context_lock(context, __func__);
800 
801 	/*
802 	 * We get called back here for every wsi that closes
803 	 */
804 
805 #if defined(LWS_WITH_SERVER)
806 	if (wsi->role_ops && !strcmp(wsi->role_ops->name, "listen") &&
807 	    wsi->a.context->deprecated) {
808 		lspd = 1;
809 		context->deprecation_pending_listen_close_count--;
810 		if (!context->deprecation_pending_listen_close_count)
811 			lspd = 2;
812 	}
813 #endif
814 
815 	lws_pt_lock(pt, __func__);
816 
817 	lwsl_wsi_info(wsi, "thr %d: sa left %d: dyn left: %d (rk %d)",
818 			   (int)(pt - &pt->context->pt[0]),
819 			   pt->count_event_loop_static_asset_handles,
820 			   ptpriv->extant_handles - 1,
821 			   context->requested_stop_internal_loops);
822 
823 	__lws_close_free_wsi_final(wsi);
824 	assert(ptpriv->extant_handles);
825 	ptpriv->extant_handles--;
826 	lws_pt_unlock(pt);
827 
828 	/* it's our job to close the handle finally */
829 	lws_free(handle);
830 
831 #if defined(LWS_WITH_SERVER)
832 	if (lspd == 2 && context->deprecation_cb) {
833 		lwsl_cx_notice(context, "calling deprecation callback");
834 		context->deprecation_cb();
835 	}
836 #endif
837 
838 	/*
839 	 * eventually, we closed all the wsi...
840 	 */
841 
842 	if (context->requested_stop_internal_loops &&
843 	    !ptpriv->extant_handles &&
844 	    !pt->count_event_loop_static_asset_handles) {
845 
846 		/*
847 		 * we closed everything on this pt
848 		 */
849 
850 		lws_context_unlock(context);
851 		lws_uv_finalize_pt(pt);
852 
853 		return;
854 	}
855 
856 	lws_context_unlock(context);
857 }
858 
859 void
lws_libuv_closehandle(struct lws * wsi)860 lws_libuv_closehandle(struct lws *wsi)
861 {
862 	uv_handle_t* handle;
863 	struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
864 
865 	if (!w_read->pwatcher)
866 		return;
867 
868 	if (wsi->told_event_loop_closed)
869 		return;
870 
871 //	lwsl_wsi_debug(wsi, "in");
872 
873 	wsi->told_event_loop_closed = 1;
874 
875 	/*
876 	 * The normal close path attaches the related wsi as the
877 	 * handle->data.
878 	 */
879 
880 	handle = (uv_handle_t *)w_read->pwatcher;
881 
882 	/* ensure we can only do this once */
883 
884 	w_read->pwatcher = NULL;
885 
886 	uv_close(handle, lws_libuv_closewsi);
887 }
888 
889 static int
elops_foreign_thread_uv(struct lws_context * cx,int tsi)890 elops_foreign_thread_uv(struct lws_context *cx, int tsi)
891 {
892 	struct lws_context_per_thread *pt = &cx->pt[tsi];
893 	struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
894 	uv_thread_t th = uv_thread_self();
895 
896 	if (!ptpriv->thread_valid)
897 		/*
898 		 * We can't judge it until we get the first event from the loop
899 		 */
900 		return 0;
901 
902 	/*
903 	 * This is the same thread that gave us the first event on this loop?
904 	 * Return 0 if so.
905 	 */
906 
907 	return !uv_thread_equal(&th, &ptpriv->uv_thread);
908 }
909 
910 static const struct lws_event_loop_ops event_loop_ops_uv = {
911 	/* name */			"libuv",
912 	/* init_context */		elops_init_context_uv,
913 	/* destroy_context1 */		elops_destroy_context1_uv,
914 	/* destroy_context2 */		elops_destroy_context2_uv,
915 	/* init_vhost_listen_wsi */	elops_init_vhost_listen_wsi_uv,
916 	/* init_pt */			elops_init_pt_uv,
917 	/* wsi_logical_close */		elops_wsi_logical_close_uv,
918 	/* check_client_connect_ok */	elops_check_client_connect_ok_uv,
919 	/* close_handle_manually */	elops_close_handle_manually_uv,
920 	/* accept */			elops_accept_uv,
921 	/* io */			elops_io_uv,
922 	/* run_pt */			elops_run_pt_uv,
923 	/* destroy_pt */		elops_destroy_pt_uv,
924 	/* destroy wsi */		NULL,
925 	/* foreign_thread */		elops_foreign_thread_uv,
926 
927 	/* flags */			0,
928 
929 	/* evlib_size_ctx */	sizeof(struct lws_context_eventlibs_libuv),
930 	/* evlib_size_pt */	sizeof(struct lws_pt_eventlibs_libuv),
931 	/* evlib_size_vh */	0,
932 	/* evlib_size_wsi */	sizeof(struct lws_io_watcher_libuv),
933 };
934 
935 #if defined(LWS_WITH_EVLIB_PLUGINS)
936 LWS_VISIBLE
937 #endif
938 const lws_plugin_evlib_t evlib_uv = {
939 	.hdr = {
940 		"libuv event loop",
941 		"lws_evlib_plugin",
942 		LWS_BUILD_HASH,
943 		LWS_PLUGIN_API_MAGIC
944 	},
945 
946 	.ops	= &event_loop_ops_uv
947 };
948 
949