• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 static void
lws_uv_sultimer_cb(uv_timer_t * timer,int status)28 lws_uv_sultimer_cb(uv_timer_t *timer
29 #if UV_VERSION_MAJOR == 0
30 		, int status
31 #endif
32 )
33 {
34 	struct lws_context_per_thread *pt = lws_container_of(timer,
35 				struct lws_context_per_thread, uv.sultimer);
36 	lws_usec_t us;
37 
38 	lws_pt_lock(pt, __func__);
39 	us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
40 	if (us)
41 		uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb,
42 			       LWS_US_TO_MS(us), 0);
43 	lws_pt_unlock(pt);
44 }
45 
46 static void
lws_uv_idle(uv_idle_t * handle,int status)47 lws_uv_idle(uv_idle_t *handle
48 #if UV_VERSION_MAJOR == 0
49 		, int status
50 #endif
51 )
52 {
53 	struct lws_context_per_thread *pt = lws_container_of(handle,
54 					struct lws_context_per_thread, uv.idle);
55 	lws_usec_t us;
56 
57 	lws_service_do_ripe_rxflow(pt);
58 
59 	/*
60 	 * is there anybody with pending stuff that needs service forcing?
61 	 */
62 	if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
63 		/* -1 timeout means just do forced service */
64 		_lws_plat_service_forced_tsi(pt->context, pt->tid);
65 
66 	/* account for sultimer */
67 
68 	lws_pt_lock(pt, __func__);
69 	us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
70 	if (us)
71 		uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb,
72 			       LWS_US_TO_MS(us), 0);
73 	lws_pt_unlock(pt);
74 
75 	/* there is nobody who needs service forcing, shut down idle */
76 	uv_idle_stop(handle);
77 }
78 
79 static void
lws_io_cb(uv_poll_t * watcher,int status,int revents)80 lws_io_cb(uv_poll_t *watcher, int status, int revents)
81 {
82 	struct lws *wsi = (struct lws *)((uv_handle_t *)watcher)->data;
83 	struct lws_context *context = wsi->context;
84 	struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
85 	struct lws_pollfd eventfd;
86 
87 	if (pt->is_destroyed)
88 		return;
89 
90 #if defined(WIN32) || defined(_WIN32)
91 	eventfd.fd = watcher->socket;
92 #else
93 	eventfd.fd = watcher->io_watcher.fd;
94 #endif
95 	eventfd.events = 0;
96 	eventfd.revents = 0;
97 
98 	if (status < 0) {
99 		/*
100 		 * At this point status will be an UV error, like UV_EBADF,
101 		 * we treat all errors as LWS_POLLHUP
102 		 *
103 		 * You might want to return; instead of servicing the fd in
104 		 * some cases */
105 		if (status == UV_EAGAIN)
106 			return;
107 
108 		eventfd.events |= LWS_POLLHUP;
109 		eventfd.revents |= LWS_POLLHUP;
110 	} else {
111 		if (revents & UV_READABLE) {
112 			eventfd.events |= LWS_POLLIN;
113 			eventfd.revents |= LWS_POLLIN;
114 		}
115 		if (revents & UV_WRITABLE) {
116 			eventfd.events |= LWS_POLLOUT;
117 			eventfd.revents |= LWS_POLLOUT;
118 		}
119 	}
120 	lws_service_fd_tsi(context, &eventfd, wsi->tsi);
121 
122 	if (pt->destroy_self) {
123 		lws_context_destroy(pt->context);
124 		return;
125 	}
126 
127 	uv_idle_start(&pt->uv.idle, lws_uv_idle);
128 }
129 
130 /*
131  * This does not actually stop the event loop.  The reason is we have to pass
132  * libuv handle closures through its event loop.  So this tries to close all
133  * wsi, and set a flag; when all the wsi closures are finalized then we
134  * actually stop the libuv event loops.
135  */
136 static void
lws_libuv_stop(struct lws_context * context)137 lws_libuv_stop(struct lws_context *context)
138 {
139 	struct lws_context_per_thread *pt;
140 	int n, m;
141 
142 	lwsl_err("%s\n", __func__);
143 
144 	if (context->requested_kill) {
145 		lwsl_err("%s: ignoring\n", __func__);
146 		return;
147 	}
148 
149 	context->requested_kill = 1;
150 
151 	m = context->count_threads;
152 	context->being_destroyed = 1;
153 
154 	/*
155 	 * Phase 1: start the close of every dynamic uv handle
156 	 */
157 
158 	while (m--) {
159 		pt = &context->pt[m];
160 
161 		if (pt->pipe_wsi) {
162 			uv_poll_stop(pt->pipe_wsi->w_read.uv.pwatcher);
163 			lws_destroy_event_pipe(pt->pipe_wsi);
164 			pt->pipe_wsi = NULL;
165 		}
166 
167 		for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
168 			struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
169 
170 			if (!wsi)
171 				continue;
172 			lws_close_free_wsi(wsi,
173 				LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
174 				__func__ /* no protocol close */);
175 			n--;
176 		}
177 	}
178 
179 	lwsl_info("%s: started closing all wsi\n", __func__);
180 
181 	/* we cannot have completed... there are at least the cancel pipes */
182 }
183 
184 static void
lws_uv_signal_handler(uv_signal_t * watcher,int signum)185 lws_uv_signal_handler(uv_signal_t *watcher, int signum)
186 {
187 	struct lws_context *context = watcher->data;
188 
189 	if (context->eventlib_signal_cb) {
190 		context->eventlib_signal_cb((void *)watcher, signum);
191 
192 		return;
193 	}
194 
195 	lwsl_err("internal signal handler caught signal %d\n", signum);
196 	lws_libuv_stop(watcher->data);
197 }
198 
199 static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
200 
201 /*
202  * Closing Phase 2: Close callback for a static UV asset
203  */
204 
205 static void
lws_uv_close_cb_sa(uv_handle_t * handle)206 lws_uv_close_cb_sa(uv_handle_t *handle)
207 {
208 	struct lws_context *context =
209 			LWS_UV_REFCOUNT_STATIC_HANDLE_TO_CONTEXT(handle);
210 	int n;
211 
212 	lwsl_info("%s: sa left %d: dyn left: %d\n", __func__,
213 		    context->count_event_loop_static_asset_handles,
214 		    context->count_wsi_allocated);
215 
216 	/* any static assets left? */
217 
218 	if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) ||
219 	    context->count_wsi_allocated)
220 		return;
221 
222 	/*
223 	 * That's it... all wsi were down, and now every
224 	 * static asset lws had a UV handle for is down.
225 	 *
226 	 * Stop the loop so we can get out of here.
227 	 */
228 
229 	for (n = 0; n < context->count_threads; n++) {
230 		struct lws_context_per_thread *pt = &context->pt[n];
231 
232 		if (pt->uv.io_loop && !pt->event_loop_foreign)
233 			uv_stop(pt->uv.io_loop);
234 	}
235 
236 	if (!context->pt[0].event_loop_foreign) {
237 		lwsl_info("%s: calling lws_context_destroy2\n", __func__);
238 		lws_context_destroy2(context);
239 	}
240 
241 	lwsl_info("%s: all done\n", __func__);
242 }
243 
244 /*
245  * These must be called by protocols that want to use libuv objects directly...
246  *
247  * .... when the libuv object is created...
248  */
249 
250 void
lws_libuv_static_refcount_add(uv_handle_t * h,struct lws_context * context)251 lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context)
252 {
253 	LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, context);
254 }
255 
256 /*
257  * ... and in the close callback when the object is closed.
258  */
259 
260 void
lws_libuv_static_refcount_del(uv_handle_t * h)261 lws_libuv_static_refcount_del(uv_handle_t *h)
262 {
263 	lws_uv_close_cb_sa(h);
264 }
265 
266 
lws_uv_close_cb(uv_handle_t * handle)267 static void lws_uv_close_cb(uv_handle_t *handle)
268 {
269 }
270 
lws_uv_walk_cb(uv_handle_t * handle,void * arg)271 static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
272 {
273 	if (!uv_is_closing(handle))
274 		uv_close(handle, lws_uv_close_cb);
275 }
276 
277 void
lws_close_all_handles_in_loop(uv_loop_t * loop)278 lws_close_all_handles_in_loop(uv_loop_t *loop)
279 {
280 	uv_walk(loop, lws_uv_walk_cb, NULL);
281 }
282 
283 
284 void
lws_libuv_stop_without_kill(const struct lws_context * context,int tsi)285 lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
286 {
287 	if (context->pt[tsi].uv.io_loop)
288 		uv_stop(context->pt[tsi].uv.io_loop);
289 }
290 
291 
292 
293 uv_loop_t *
lws_uv_getloop(struct lws_context * context,int tsi)294 lws_uv_getloop(struct lws_context *context, int tsi)
295 {
296 	if (context->pt[tsi].uv.io_loop)
297 		return context->pt[tsi].uv.io_loop;
298 
299 	return NULL;
300 }
301 
302 int
lws_libuv_check_watcher_active(struct lws * wsi)303 lws_libuv_check_watcher_active(struct lws *wsi)
304 {
305 	uv_handle_t *h = (uv_handle_t *)wsi->w_read.uv.pwatcher;
306 
307 	if (!h)
308 		return 0;
309 
310 	return uv_is_active(h);
311 }
312 
313 
314 #if defined(LWS_WITH_PLUGINS) && (UV_VERSION_MAJOR > 0)
315 
316 int
lws_uv_plugins_init(struct lws_context * context,const char * const * d)317 lws_uv_plugins_init(struct lws_context *context, const char * const *d)
318 {
319 	struct lws_plugin_capability lcaps;
320 	struct lws_plugin *plugin;
321 	lws_plugin_init_func initfunc;
322 	int m, ret = 0;
323 	void *v;
324 	uv_dirent_t dent;
325 	uv_fs_t req;
326 	char path[256];
327 	uv_lib_t lib;
328 	int pofs = 0;
329 
330 #if  defined(__MINGW32__) || !defined(WIN32)
331 	pofs = 3;
332 #endif
333 
334 	lib.errmsg = NULL;
335 	lib.handle = NULL;
336 
337 	uv_loop_init(&context->uv.loop);
338 
339 	lwsl_notice("  Plugins:\n");
340 
341 	while (d && *d) {
342 
343 		lwsl_notice("  Scanning %s\n", *d);
344 		m =uv_fs_scandir(&context->uv.loop, &req, *d, 0, NULL);
345 		if (m < 1) {
346 			lwsl_err("Scandir on %s failed\n", *d);
347 			return 1;
348 		}
349 
350 		while (uv_fs_scandir_next(&req, &dent) != UV_EOF) {
351 			if (strlen(dent.name) < 7)
352 				continue;
353 
354 			lwsl_notice("   %s\n", dent.name);
355 
356 			lws_snprintf(path, sizeof(path) - 1, "%s/%s", *d,
357 				     dent.name);
358 			if (uv_dlopen(path, &lib)) {
359 				uv_dlerror(&lib);
360 				lwsl_err("Error loading DSO: %s\n", lib.errmsg);
361 				uv_dlclose(&lib);
362 				goto bail;
363 			}
364 
365 			/* we could open it, can we get his init function? */
366 
367 #if !defined(WIN32) && !defined(__MINGW32__)
368 			m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
369 				     dent.name + pofs /* snip lib... */);
370 			path[m - 3] = '\0'; /* snip the .so */
371 #else
372 			m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
373 				     dent.name + pofs);
374 			path[m - 4] = '\0'; /* snip the .dll */
375 #endif
376 			if (uv_dlsym(&lib, path, &v)) {
377 				uv_dlerror(&lib);
378 				lwsl_err("%s: Failed to get '%s' on %s: %s\n",
379 					 __func__, path, dent.name, lib.errmsg);
380 				uv_dlclose(&lib);
381 				goto bail;
382 			}
383 			initfunc = (lws_plugin_init_func)v;
384 			lcaps.api_magic = LWS_PLUGIN_API_MAGIC;
385 			m = initfunc(context, &lcaps);
386 			if (m) {
387 				lwsl_err("Init %s failed %d\n", dent.name, m);
388 				goto skip;
389 			}
390 
391 			plugin = lws_malloc(sizeof(*plugin), "plugin");
392 			if (!plugin) {
393 				uv_dlclose(&lib);
394 				lwsl_err("OOM\n");
395 				goto bail;
396 			}
397 			plugin->list = context->plugin_list;
398 			context->plugin_list = plugin;
399 			lws_strncpy(plugin->name, dent.name, sizeof(plugin->name));
400 			plugin->lib = lib;
401 			plugin->caps = lcaps;
402 			context->plugin_protocol_count += lcaps.count_protocols;
403 			context->plugin_extension_count += lcaps.count_extensions;
404 
405 			continue;
406 
407 skip:
408 			uv_dlclose(&lib);
409 		}
410 bail:
411 		uv_fs_req_cleanup(&req);
412 		d++;
413 	}
414 
415 	return ret;
416 }
417 
418 int
lws_uv_plugins_destroy(struct lws_context * context)419 lws_uv_plugins_destroy(struct lws_context *context)
420 {
421 	struct lws_plugin *plugin = context->plugin_list, *p;
422 	lws_plugin_destroy_func func;
423 	char path[256];
424 	int pofs = 0;
425 	void *v;
426 	int m;
427 
428 #if  defined(__MINGW32__) || !defined(WIN32)
429 	pofs = 3;
430 #endif
431 
432 	if (!plugin)
433 		return 0;
434 
435 	while (plugin) {
436 		p = plugin;
437 
438 #if !defined(WIN32) && !defined(__MINGW32__)
439 		m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s",
440 				 plugin->name + pofs);
441 		path[m - 3] = '\0';
442 #else
443 		m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s",
444 				 plugin->name + pofs);
445 		path[m - 4] = '\0';
446 #endif
447 
448 		if (uv_dlsym(&plugin->lib, path, &v)) {
449 			uv_dlerror(&plugin->lib);
450 			lwsl_err("Failed to get %s on %s: %s", path,
451 					plugin->name, plugin->lib.errmsg);
452 		} else {
453 			func = (lws_plugin_destroy_func)v;
454 			m = func(context);
455 			if (m)
456 				lwsl_err("Destroying %s failed %d\n",
457 						plugin->name, m);
458 		}
459 
460 		uv_dlclose(&p->lib);
461 		plugin = p->list;
462 		p->list = NULL;
463 		free(p);
464 	}
465 
466 	context->plugin_list = NULL;
467 
468 	while (uv_loop_close(&context->uv.loop))
469 		;
470 
471 	return 0;
472 }
473 
474 #endif
475 
476 static int
elops_init_context_uv(struct lws_context * context,const struct lws_context_creation_info * info)477 elops_init_context_uv(struct lws_context *context,
478 		      const struct lws_context_creation_info *info)
479 {
480 	int n;
481 
482 	context->eventlib_signal_cb = info->signal_cb;
483 
484 	for (n = 0; n < context->count_threads; n++)
485 		context->pt[n].w_sigint.context = context;
486 
487 	return 0;
488 }
489 
490 static int
elops_destroy_context1_uv(struct lws_context * context)491 elops_destroy_context1_uv(struct lws_context *context)
492 {
493 	struct lws_context_per_thread *pt;
494 	int n, m = 0;
495 
496 	for (n = 0; n < context->count_threads; n++) {
497 		int budget = 10000;
498 		pt = &context->pt[n];
499 
500 		/* only for internal loops... */
501 
502 		if (!pt->event_loop_foreign) {
503 
504 			while (budget-- && (m = uv_run(pt->uv.io_loop,
505 						  UV_RUN_NOWAIT)))
506 					;
507 			if (m)
508 				lwsl_info("%s: tsi %d: not all closed\n",
509 					 __func__, n);
510 
511 		}
512 	}
513 
514 	/* call destroy2 if internal loop */
515 	return !context->pt[0].event_loop_foreign;
516 }
517 
518 static int
elops_destroy_context2_uv(struct lws_context * context)519 elops_destroy_context2_uv(struct lws_context *context)
520 {
521 	struct lws_context_per_thread *pt;
522 	int n, internal = 0;
523 
524 	for (n = 0; n < context->count_threads; n++) {
525 		pt = &context->pt[n];
526 
527 		/* only for internal loops... */
528 
529 		if (!pt->event_loop_foreign && pt->uv.io_loop) {
530 			internal = 1;
531 			if (!context->finalize_destroy_after_internal_loops_stopped)
532 				uv_stop(pt->uv.io_loop);
533 			else {
534 #if UV_VERSION_MAJOR > 0
535 				uv_loop_close(pt->uv.io_loop);
536 #endif
537 				lws_free_set_NULL(pt->uv.io_loop);
538 			}
539 		}
540 	}
541 
542 	return internal;
543 }
544 
545 static int
elops_wsi_logical_close_uv(struct lws * wsi)546 elops_wsi_logical_close_uv(struct lws *wsi)
547 {
548 	if (!lws_socket_is_valid(wsi->desc.sockfd))
549 		return 0;
550 
551 	if (wsi->listener || wsi->event_pipe) {
552 		lwsl_debug("%s: %p: %d %d stop listener / pipe poll\n",
553 			   __func__, wsi, wsi->listener, wsi->event_pipe);
554 		if (wsi->w_read.uv.pwatcher)
555 			uv_poll_stop(wsi->w_read.uv.pwatcher);
556 	}
557 	lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi);
558 	/*
559 	 * libuv has to do his own close handle processing asynchronously
560 	 */
561 	lws_libuv_closehandle(wsi);
562 
563 	return 1; /* do not complete the wsi close, uv close cb will do it */
564 }
565 
566 static int
elops_check_client_connect_ok_uv(struct lws * wsi)567 elops_check_client_connect_ok_uv(struct lws *wsi)
568 {
569 	if (lws_libuv_check_watcher_active(wsi)) {
570 		lwsl_warn("Waiting for libuv watcher to close\n");
571 		return 1;
572 	}
573 
574 	return 0;
575 }
576 
577 static void
lws_libuv_closewsi_m(uv_handle_t * handle)578 lws_libuv_closewsi_m(uv_handle_t* handle)
579 {
580 	lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data;
581 	lwsl_debug("%s: sockfd %d\n", __func__, sockfd);
582 	compatible_close(sockfd);
583 	lws_free(handle);
584 }
585 
586 static void
elops_close_handle_manually_uv(struct lws * wsi)587 elops_close_handle_manually_uv(struct lws *wsi)
588 {
589 	uv_handle_t *h = (uv_handle_t *)wsi->w_read.uv.pwatcher;
590 
591 	lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi);
592 
593 	/*
594 	 * the "manual" variant only closes the handle itself and the
595 	 * related fd.  handle->data is the fd.
596 	 */
597 	h->data = (void *)(lws_intptr_t)wsi->desc.sockfd;
598 
599 	/*
600 	 * We take responsibility to close / destroy these now.
601 	 * Remove any trace from the wsi.
602 	 */
603 
604 	wsi->desc.sockfd = LWS_SOCK_INVALID;
605 	wsi->w_read.uv.pwatcher = NULL;
606 	wsi->told_event_loop_closed = 1;
607 
608 	uv_close(h, lws_libuv_closewsi_m);
609 }
610 
611 static int
elops_accept_uv(struct lws * wsi)612 elops_accept_uv(struct lws *wsi)
613 {
614 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
615 
616 	wsi->w_read.context = wsi->context;
617 
618 	wsi->w_read.uv.pwatcher =
619 		lws_malloc(sizeof(*wsi->w_read.uv.pwatcher), "uvh");
620 	if (!wsi->w_read.uv.pwatcher)
621 		return -1;
622 
623 	if (wsi->role_ops->file_handle)
624 		uv_poll_init(pt->uv.io_loop, wsi->w_read.uv.pwatcher,
625 			     (int)(long long)wsi->desc.filefd);
626 	else
627 		uv_poll_init_socket(pt->uv.io_loop,
628 				    wsi->w_read.uv.pwatcher,
629 				    wsi->desc.sockfd);
630 
631 	((uv_handle_t *)wsi->w_read.uv.pwatcher)->data = (void *)wsi;
632 
633 	return 0;
634 }
635 
636 static void
elops_io_uv(struct lws * wsi,int flags)637 elops_io_uv(struct lws *wsi, int flags)
638 {
639 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
640 	struct lws_io_watcher *w = &wsi->w_read;
641 	int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE);
642 
643 	lwsl_debug("%s: %p: %d\n", __func__, wsi, flags);
644 
645 	/* w->context is set after the loop is initialized */
646 
647 	if (!pt->uv.io_loop || !w->context) {
648 		lwsl_info("%s: no io loop yet\n", __func__);
649 		return;
650 	}
651 
652 	if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
653 	      (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
654 		lwsl_err("%s: assert: flags %d", __func__, flags);
655 		assert(0);
656 	}
657 
658 	if (!w->uv.pwatcher || wsi->told_event_loop_closed) {
659 		lwsl_err("%s: no watcher\n", __func__);
660 
661 		return;
662 	}
663 
664 	if (flags & LWS_EV_START) {
665 		if (flags & LWS_EV_WRITE)
666 			current_events |= UV_WRITABLE;
667 
668 		if (flags & LWS_EV_READ)
669 			current_events |= UV_READABLE;
670 
671 		uv_poll_start(w->uv.pwatcher, current_events, lws_io_cb);
672 	} else {
673 		if (flags & LWS_EV_WRITE)
674 			current_events &= ~UV_WRITABLE;
675 
676 		if (flags & LWS_EV_READ)
677 			current_events &= ~UV_READABLE;
678 
679 		if (!(current_events & (UV_READABLE | UV_WRITABLE)))
680 			uv_poll_stop(w->uv.pwatcher);
681 		else
682 			uv_poll_start(w->uv.pwatcher, current_events,
683 				      lws_io_cb);
684 	}
685 
686 	w->actual_events = current_events;
687 }
688 
689 static int
elops_init_vhost_listen_wsi_uv(struct lws * wsi)690 elops_init_vhost_listen_wsi_uv(struct lws *wsi)
691 {
692 	struct lws_context_per_thread *pt;
693 	int n;
694 
695 	if (!wsi)
696 		return 0;
697 	if (wsi->w_read.context)
698 		return 0;
699 
700 	pt = &wsi->context->pt[(int)wsi->tsi];
701 	if (!pt->uv.io_loop)
702 		return 0;
703 
704 	wsi->w_read.context = wsi->context;
705 
706 	wsi->w_read.uv.pwatcher =
707 		lws_malloc(sizeof(*wsi->w_read.uv.pwatcher), "uvh");
708 	if (!wsi->w_read.uv.pwatcher)
709 		return -1;
710 
711 	n = uv_poll_init_socket(pt->uv.io_loop, wsi->w_read.uv.pwatcher,
712 				   wsi->desc.sockfd);
713 	if (n) {
714 		lwsl_err("uv_poll_init failed %d, sockfd=%p\n", n,
715 				(void *)(lws_intptr_t)wsi->desc.sockfd);
716 
717 		return -1;
718 	}
719 
720 	((uv_handle_t *)wsi->w_read.uv.pwatcher)->data = (void *)wsi;
721 
722 	elops_io_uv(wsi, LWS_EV_START | LWS_EV_READ);
723 
724 	return 0;
725 }
726 
727 static void
elops_run_pt_uv(struct lws_context * context,int tsi)728 elops_run_pt_uv(struct lws_context *context, int tsi)
729 {
730 	if (context->pt[tsi].uv.io_loop)
731 		uv_run(context->pt[tsi].uv.io_loop, 0);
732 }
733 
734 static void
elops_destroy_pt_uv(struct lws_context * context,int tsi)735 elops_destroy_pt_uv(struct lws_context *context, int tsi)
736 {
737 	struct lws_context_per_thread *pt = &context->pt[tsi];
738 	int m, ns;
739 
740 	lwsl_info("%s: %d\n", __func__, tsi);
741 
742 	if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
743 		return;
744 
745 	if (!pt->uv.io_loop)
746 		return;
747 
748 	if (pt->event_loop_destroy_processing_done)
749 		return;
750 
751 	pt->event_loop_destroy_processing_done = 1;
752 
753 	if (!pt->event_loop_foreign) {
754 		uv_signal_stop(&pt->w_sigint.uv.watcher);
755 
756 		ns = LWS_ARRAY_SIZE(sigs);
757 		if (lws_check_opt(context->options,
758 				  LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
759 			ns = 2;
760 
761 		for (m = 0; m < ns; m++) {
762 			uv_signal_stop(&pt->uv.signals[m]);
763 			uv_close((uv_handle_t *)&pt->uv.signals[m],
764 				 lws_uv_close_cb_sa);
765 		}
766 	} else
767 		lwsl_debug("%s: not closing pt signals\n", __func__);
768 
769 	uv_timer_stop(&pt->uv.sultimer);
770 	uv_close((uv_handle_t *)&pt->uv.sultimer, lws_uv_close_cb_sa);
771 
772 	uv_idle_stop(&pt->uv.idle);
773 	uv_close((uv_handle_t *)&pt->uv.idle, lws_uv_close_cb_sa);
774 }
775 
776 /*
777  * This needs to be called after vhosts have been defined.
778  *
779  * If later, after server start, another vhost is added, this must be
780  * called again to bind the vhost
781  */
782 
783 int
elops_init_pt_uv(struct lws_context * context,void * _loop,int tsi)784 elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
785 {
786 	struct lws_context_per_thread *pt = &context->pt[tsi];
787 	struct lws_vhost *vh = context->vhost_list;
788 	int status = 0, n, ns, first = 1;
789 	uv_loop_t *loop = (uv_loop_t *)_loop;
790 
791 	if (!pt->uv.io_loop) {
792 		if (!loop) {
793 			loop = lws_malloc(sizeof(*loop), "libuv loop");
794 			if (!loop) {
795 				lwsl_err("OOM\n");
796 				return -1;
797 			}
798 	#if UV_VERSION_MAJOR > 0
799 			uv_loop_init(loop);
800 	#else
801 			lwsl_err("This libuv is too old to work...\n");
802 			return 1;
803 	#endif
804 			pt->event_loop_foreign = 0;
805 		} else {
806 			lwsl_notice(" Using foreign event loop...\n");
807 			pt->event_loop_foreign = 1;
808 		}
809 
810 		pt->uv.io_loop = loop;
811 		uv_idle_init(loop, &pt->uv.idle);
812 		LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&pt->uv.idle, context);
813 
814 
815 		ns = LWS_ARRAY_SIZE(sigs);
816 		if (lws_check_opt(context->options,
817 				  LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
818 			ns = 2;
819 
820 		if (!pt->event_loop_foreign) {
821 			assert(ns <= (int)LWS_ARRAY_SIZE(pt->uv.signals));
822 			for (n = 0; n < ns; n++) {
823 				uv_signal_init(loop, &pt->uv.signals[n]);
824 				LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&pt->uv.signals[n],
825 								  context);
826 				pt->uv.signals[n].data = pt->context;
827 				uv_signal_start(&pt->uv.signals[n],
828 						lws_uv_signal_handler, sigs[n]);
829 			}
830 		}
831 	} else
832 		first = 0;
833 
834 	/*
835 	 * Initialize the accept wsi read watcher with all the listening sockets
836 	 * and register a callback for read operations
837 	 *
838 	 * We have to do it here because the uv loop(s) are not
839 	 * initialized until after context creation.
840 	 */
841 	while (vh) {
842 		if (elops_init_vhost_listen_wsi_uv(vh->lserv_wsi) == -1)
843 			return -1;
844 		vh = vh->vhost_next;
845 	}
846 
847 	if (!first)
848 		return status;
849 
850 	uv_timer_init(pt->uv.io_loop, &pt->uv.sultimer);
851 	LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&pt->uv.sultimer, context);
852 
853 	return status;
854 }
855 
856 static void
lws_libuv_closewsi(uv_handle_t * handle)857 lws_libuv_closewsi(uv_handle_t* handle)
858 {
859 	struct lws *wsi = (struct lws *)handle->data;
860 	struct lws_context *context = lws_get_context(wsi);
861 	struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
862 #if defined(LWS_WITH_SERVER)
863 	int lspd = 0;
864 #endif
865 
866 	lwsl_info("%s: %p\n", __func__, wsi);
867 
868 	/*
869 	 * We get called back here for every wsi that closes
870 	 */
871 
872 #if defined(LWS_WITH_SERVER)
873 	if (wsi->role_ops == &role_ops_listen && wsi->context->deprecated) {
874 		lspd = 1;
875 		context->deprecation_pending_listen_close_count--;
876 		if (!context->deprecation_pending_listen_close_count)
877 			lspd = 2;
878 	}
879 #endif
880 
881 	lws_pt_lock(pt, __func__);
882 	__lws_close_free_wsi_final(wsi);
883 	lws_pt_unlock(pt);
884 
885 	/* it's our job to close the handle finally */
886 	lws_free(handle);
887 
888 #if defined(LWS_WITH_SERVER)
889 	if (lspd == 2 && context->deprecation_cb) {
890 		lwsl_notice("calling deprecation callback\n");
891 		context->deprecation_cb();
892 	}
893 #endif
894 
895 	lwsl_info("%s: sa left %d: dyn left: %d (rk %d)\n", __func__,
896 		    context->count_event_loop_static_asset_handles,
897 		    context->count_wsi_allocated, context->requested_kill);
898 
899 	/*
900 	 * eventually, we closed all the wsi...
901 	 */
902 
903 	if (context->requested_kill && !context->count_wsi_allocated) {
904 		struct lws_vhost *vh = context->vhost_list;
905 		int m;
906 
907 		/*
908 		 * Start Closing Phase 2: close of static handles
909 		 */
910 
911 		lwsl_info("%s: all lws dynamic handles down, closing static\n",
912 			    __func__);
913 
914 		for (m = 0; m < context->count_threads; m++)
915 			elops_destroy_pt_uv(context, m);
916 
917 		/* protocols may have initialized libuv objects */
918 
919 		while (vh) {
920 			lws_vhost_destroy1(vh);
921 			vh = vh->vhost_next;
922 		}
923 
924 		if (!context->count_event_loop_static_asset_handles &&
925 		    context->pt[0].event_loop_foreign) {
926 			lwsl_info("%s: call lws_context_destroy2\n", __func__);
927 			lws_context_destroy2(context);
928 		}
929 	}
930 }
931 
932 void
lws_libuv_closehandle(struct lws * wsi)933 lws_libuv_closehandle(struct lws *wsi)
934 {
935 	uv_handle_t* handle;
936 
937 	if (!wsi->w_read.uv.pwatcher)
938 		return;
939 
940 	if (wsi->told_event_loop_closed) {
941 	//	assert(0);
942 		return;
943 	}
944 
945 	lwsl_debug("%s: %p\n", __func__, wsi);
946 
947 	wsi->told_event_loop_closed = 1;
948 
949 	/*
950 	 * The normal close path attaches the related wsi as the
951 	 * handle->data.
952 	 */
953 
954 	handle = (uv_handle_t *)wsi->w_read.uv.pwatcher;
955 
956 	/* ensure we can only do this once */
957 
958 	wsi->w_read.uv.pwatcher = NULL;
959 
960 	uv_close(handle, lws_libuv_closewsi);
961 }
962 
963 struct lws_event_loop_ops event_loop_ops_uv = {
964 	/* name */			"libuv",
965 	/* init_context */		elops_init_context_uv,
966 	/* destroy_context1 */		elops_destroy_context1_uv,
967 	/* destroy_context2 */		elops_destroy_context2_uv,
968 	/* init_vhost_listen_wsi */	elops_init_vhost_listen_wsi_uv,
969 	/* init_pt */			elops_init_pt_uv,
970 	/* wsi_logical_close */		elops_wsi_logical_close_uv,
971 	/* check_client_connect_ok */	elops_check_client_connect_ok_uv,
972 	/* close_handle_manually */	elops_close_handle_manually_uv,
973 	/* accept */			elops_accept_uv,
974 	/* io */			elops_io_uv,
975 	/* run_pt */			elops_run_pt_uv,
976 	/* destroy_pt */		elops_destroy_pt_uv,
977 	/* destroy wsi */		NULL,
978 
979 	/* flags */			0,
980 };
981