• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 static void
lws_ev_hrtimer_cb(struct ev_loop * loop,struct ev_timer * watcher,int revents)28 lws_ev_hrtimer_cb(struct ev_loop *loop, struct ev_timer *watcher, int revents)
29 {
30 	struct lws_context_per_thread *pt =
31 			(struct lws_context_per_thread *)watcher->data;
32 	lws_usec_t us;
33 
34 	lws_pt_lock(pt, __func__);
35 	us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
36 	if (us) {
37 		ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0);
38 		ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer);
39 	}
40 	lws_pt_unlock(pt);
41 }
42 
43 static void
lws_ev_idle_cb(struct ev_loop * loop,struct ev_idle * handle,int revents)44 lws_ev_idle_cb(struct ev_loop *loop, struct ev_idle *handle, int revents)
45 {
46 	struct lws_context_per_thread *pt = lws_container_of(handle,
47 					struct lws_context_per_thread, ev.idle);
48 	lws_usec_t us;
49 	int reschedule = 0;
50 
51 	lws_service_do_ripe_rxflow(pt);
52 
53 	/*
54 	 * is there anybody with pending stuff that needs service forcing?
55 	 */
56 	if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
57 		/* -1 timeout means just do forced service */
58 		reschedule = _lws_plat_service_forced_tsi(pt->context, pt->tid);
59 
60 	/* account for hrtimer */
61 
62 	lws_pt_lock(pt, __func__);
63 	us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
64 	if (us) {
65 		ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0);
66 		ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer);
67 	}
68 	lws_pt_unlock(pt);
69 
70 	/* there is nobody who needs service forcing, shut down idle */
71 	if (!reschedule)
72 		ev_idle_stop(loop, handle);
73 
74 	if (pt->destroy_self)
75 		lws_context_destroy(pt->context);
76 }
77 
78 static void
lws_accept_cb(struct ev_loop * loop,struct ev_io * watcher,int revents)79 lws_accept_cb(struct ev_loop *loop, struct ev_io *watcher, int revents)
80 {
81 	struct lws_io_watcher *lws_io = lws_container_of(watcher,
82 					struct lws_io_watcher, ev.watcher);
83 	struct lws_context *context = lws_io->context;
84 	struct lws_context_per_thread *pt;
85 	struct lws_pollfd eventfd;
86 	struct lws *wsi;
87 
88 	if (revents & EV_ERROR)
89 		return;
90 
91 	eventfd.fd = watcher->fd;
92 	eventfd.events = 0;
93 	eventfd.revents = EV_NONE;
94 
95 	if (revents & EV_READ) {
96 		eventfd.events |= LWS_POLLIN;
97 		eventfd.revents |= LWS_POLLIN;
98 	}
99 	if (revents & EV_WRITE) {
100 		eventfd.events |= LWS_POLLOUT;
101 		eventfd.revents |= LWS_POLLOUT;
102 	}
103 
104 	wsi = wsi_from_fd(context, watcher->fd);
105 	pt = &context->pt[(int)wsi->tsi];
106 
107 	lws_service_fd_tsi(context, &eventfd, (int)wsi->tsi);
108 
109 	ev_idle_start(pt->ev.io_loop, &pt->ev.idle);
110 }
111 
112 void
lws_ev_sigint_cb(struct ev_loop * loop,struct ev_signal * watcher,int revents)113 lws_ev_sigint_cb(struct ev_loop *loop, struct ev_signal *watcher, int revents)
114 {
115 	struct lws_context *context = watcher->data;
116 
117 	if (context->eventlib_signal_cb) {
118 		context->eventlib_signal_cb((void *)watcher, watcher->signum);
119 
120 		return;
121 	}
122 	ev_break(loop, EVBREAK_ALL);
123 }
124 
125 static int
elops_init_pt_ev(struct lws_context * context,void * _loop,int tsi)126 elops_init_pt_ev(struct lws_context *context, void *_loop, int tsi)
127 {
128 	struct lws_context_per_thread *pt = &context->pt[tsi];
129 	struct ev_signal *w_sigint = &context->pt[tsi].w_sigint.ev.watcher;
130 	struct ev_loop *loop = (struct ev_loop *)_loop;
131 	struct lws_vhost *vh = context->vhost_list;
132 	const char *backend_name;
133 	int status = 0;
134 	int backend;
135 
136 	lwsl_info("%s: loop %p\n", __func__, _loop);
137 
138 	if (!loop)
139 		loop = ev_loop_new(0);
140 	else
141 		context->pt[tsi].event_loop_foreign = 1;
142 
143 	if (!loop) {
144 		lwsl_err("%s: creating event base failed\n", __func__);
145 
146 		return -1;
147 	}
148 
149 	pt->ev.io_loop = loop;
150 
151 	/*
152 	 * Initialize the accept w_accept with all the listening sockets
153 	 * and register a callback for read operations
154 	 */
155 	while (vh) {
156 		if (vh->lserv_wsi) {
157 			vh->lserv_wsi->w_read.context = context;
158 			vh->w_accept.context = context;
159 
160 			ev_io_init(&vh->w_accept.ev.watcher, lws_accept_cb,
161 				   vh->lserv_wsi->desc.sockfd, EV_READ);
162 			ev_io_start(loop, &vh->w_accept.ev.watcher);
163 
164 		}
165 		vh = vh->vhost_next;
166 	}
167 
168 	/* Register the signal watcher unless it's a foreign loop */
169 	if (!context->pt[tsi].event_loop_foreign) {
170 		ev_signal_init(w_sigint, lws_ev_sigint_cb, SIGINT);
171 		w_sigint->data = context;
172 		ev_signal_start(loop, w_sigint);
173 	}
174 
175 	backend = ev_backend(loop);
176 	switch (backend) {
177 	case EVBACKEND_SELECT:
178 		backend_name = "select";
179 		break;
180 	case EVBACKEND_POLL:
181 		backend_name = "poll";
182 		break;
183 	case EVBACKEND_EPOLL:
184 		backend_name = "epoll";
185 		break;
186 #if defined(LWS_HAVE_EVBACKEND_LINUXAIO)
187        case EVBACKEND_LINUXAIO:
188                backend_name = "Linux AIO";
189                break;
190 #endif
191 #if defined(LWS_HAVE_EVBACKEND_IOURING)
192        case EVBACKEND_IOURING:
193                backend_name = "Linux io_uring";
194                break;
195 #endif
196        case EVBACKEND_KQUEUE:
197 		backend_name = "kqueue";
198 		break;
199 	case EVBACKEND_DEVPOLL:
200 		backend_name = "/dev/poll";
201 		break;
202 	case EVBACKEND_PORT:
203 		backend_name = "Solaris 10 \"port\"";
204 		break;
205 	default:
206 		backend_name = "Unknown libev backend";
207 		break;
208 	}
209 
210 	lwsl_info(" libev backend: %s\n", backend_name);
211 	(void)backend_name;
212 
213 	ev_timer_init(&pt->ev.hrtimer, lws_ev_hrtimer_cb, 0, 0);
214 	pt->ev.hrtimer.data = pt;
215 
216 	ev_idle_init(&pt->ev.idle, lws_ev_idle_cb);
217 
218 	return status;
219 }
220 
221 static void
elops_destroy_pt_ev(struct lws_context * context,int tsi)222 elops_destroy_pt_ev(struct lws_context *context, int tsi)
223 {
224 	struct lws_context_per_thread *pt = &context->pt[tsi];
225 	struct lws_vhost *vh = context->vhost_list;
226 
227 	while (vh) {
228 		if (vh->lserv_wsi)
229 			ev_io_stop(pt->ev.io_loop, &vh->w_accept.ev.watcher);
230 		vh = vh->vhost_next;
231 	}
232 
233 	/* static assets */
234 
235 	ev_timer_stop(pt->ev.io_loop, &pt->ev.hrtimer);
236 	ev_idle_stop(pt->ev.io_loop, &pt->ev.idle);
237 
238 	if (!pt->event_loop_foreign)
239 		ev_signal_stop(pt->ev.io_loop, &pt->w_sigint.ev.watcher);
240 }
241 
242 static int
elops_init_context_ev(struct lws_context * context,const struct lws_context_creation_info * info)243 elops_init_context_ev(struct lws_context *context,
244 		      const struct lws_context_creation_info *info)
245 {
246 	int n;
247 
248 	context->eventlib_signal_cb = info->signal_cb;
249 
250 	for (n = 0; n < context->count_threads; n++)
251 		context->pt[n].w_sigint.context = context;
252 
253 	return 0;
254 }
255 
256 static int
elops_accept_ev(struct lws * wsi)257 elops_accept_ev(struct lws *wsi)
258 {
259 	int fd;
260 
261 	if (wsi->role_ops->file_handle)
262 		fd = wsi->desc.filefd;
263 	else
264 		fd = wsi->desc.sockfd;
265 
266 	wsi->w_read.context = wsi->context;
267 	wsi->w_write.context = wsi->context;
268 
269 	ev_io_init(&wsi->w_read.ev.watcher, lws_accept_cb, fd, EV_READ);
270 	ev_io_init(&wsi->w_write.ev.watcher, lws_accept_cb, fd, EV_WRITE);
271 
272 	return 0;
273 }
274 
275 static void
elops_io_ev(struct lws * wsi,int flags)276 elops_io_ev(struct lws *wsi, int flags)
277 {
278 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
279 
280 	if (!pt->ev.io_loop || pt->is_destroyed)
281 		return;
282 
283 	assert((flags & (LWS_EV_START | LWS_EV_STOP)) &&
284 	       (flags & (LWS_EV_READ | LWS_EV_WRITE)));
285 
286 	if (flags & LWS_EV_START) {
287 		if (flags & LWS_EV_WRITE)
288 			ev_io_start(pt->ev.io_loop, &wsi->w_write.ev.watcher);
289 		if (flags & LWS_EV_READ)
290 			ev_io_start(pt->ev.io_loop, &wsi->w_read.ev.watcher);
291 	} else {
292 		if (flags & LWS_EV_WRITE)
293 			ev_io_stop(pt->ev.io_loop, &wsi->w_write.ev.watcher);
294 		if (flags & LWS_EV_READ)
295 			ev_io_stop(pt->ev.io_loop, &wsi->w_read.ev.watcher);
296 	}
297 
298 	if (pt->destroy_self)
299 		lws_context_destroy(pt->context);
300 }
301 
302 static void
elops_run_pt_ev(struct lws_context * context,int tsi)303 elops_run_pt_ev(struct lws_context *context, int tsi)
304 {
305 	if (context->pt[tsi].ev.io_loop)
306 		ev_run(context->pt[tsi].ev.io_loop, 0);
307 }
308 
309 static int
elops_destroy_context2_ev(struct lws_context * context)310 elops_destroy_context2_ev(struct lws_context *context)
311 {
312 	struct lws_context_per_thread *pt;
313 	int n, m;
314 
315 	lwsl_debug("%s\n", __func__);
316 
317 	for (n = 0; n < context->count_threads; n++) {
318 		int budget = 1000;
319 
320 		pt = &context->pt[n];
321 
322 		/* only for internal loops... */
323 
324 		if (pt->event_loop_foreign || !pt->ev.io_loop)
325 			continue;
326 
327 		if (!context->finalize_destroy_after_internal_loops_stopped) {
328 			ev_break(pt->ev.io_loop, EVBREAK_ONE);
329 			continue;
330 		}
331 		while (budget-- &&
332 		       (m = ev_run(pt->ev.io_loop, 0)))
333 			;
334 
335 		ev_loop_destroy(pt->ev.io_loop);
336 	}
337 
338 	return 0;
339 }
340 
341 static int
elops_init_vhost_listen_wsi_ev(struct lws * wsi)342 elops_init_vhost_listen_wsi_ev(struct lws *wsi)
343 {
344 	int fd;
345 
346 	if (!wsi) {
347 		assert(0);
348 		return 0;
349 	}
350 
351 	wsi->w_read.context = wsi->context;
352 	wsi->w_write.context = wsi->context;
353 
354 	if (wsi->role_ops->file_handle)
355 		fd = wsi->desc.filefd;
356 	else
357 		fd = wsi->desc.sockfd;
358 
359 	ev_io_init(&wsi->w_read.ev.watcher, lws_accept_cb, fd, EV_READ);
360 	ev_io_init(&wsi->w_write.ev.watcher, lws_accept_cb, fd, EV_WRITE);
361 
362 	elops_io_ev(wsi, LWS_EV_START | LWS_EV_READ);
363 
364 	return 0;
365 }
366 
367 static void
elops_destroy_wsi_ev(struct lws * wsi)368 elops_destroy_wsi_ev(struct lws *wsi)
369 {
370 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
371 
372 	ev_io_stop(pt->ev.io_loop, &wsi->w_read.ev.watcher);
373 	ev_io_stop(pt->ev.io_loop, &wsi->w_write.ev.watcher);
374 }
375 
376 struct lws_event_loop_ops event_loop_ops_ev = {
377 	/* name */			"libev",
378 	/* init_context */		elops_init_context_ev,
379 	/* destroy_context1 */		NULL,
380 	/* destroy_context2 */		elops_destroy_context2_ev,
381 	/* init_vhost_listen_wsi */	elops_init_vhost_listen_wsi_ev,
382 	/* init_pt */			elops_init_pt_ev,
383 	/* wsi_logical_close */		NULL,
384 	/* check_client_connect_ok */	NULL,
385 	/* close_handle_manually */	NULL,
386 	/* accept */			elops_accept_ev,
387 	/* io */			elops_io_ev,
388 	/* run_pt */			elops_run_pt_ev,
389 	/* destroy_pt */		elops_destroy_pt_ev,
390 	/* destroy wsi */		elops_destroy_wsi_ev,
391 
392 	/* flags */			0,
393 };
394