1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 #include <glib-unix.h>
28
29 #define wsi_to_subclass(_w) ((_w)->w_read.glib.source)
30 #define wsi_to_gsource(_w) ((GSource *)wsi_to_subclass(_w))
31 #define pt_to_loop(_pt) ((_pt)->glib.loop)
32 #define pt_to_g_main_context(_pt) g_main_loop_get_context(pt_to_loop(_pt))
33
34 static gboolean
35 lws_glib_idle_timer_cb(void *p);
36
37 static gboolean
38 lws_glib_hrtimer_cb(void *p);
39
40 static gboolean
lws_glib_check(GSource * src)41 lws_glib_check(GSource *src)
42 {
43 struct lws_io_watcher_glib_subclass *sub =
44 (struct lws_io_watcher_glib_subclass *)src;
45
46 return !!g_source_query_unix_fd(src, sub->tag);
47 }
48
49 /*
50 * These helpers attach only to the main_context that belongs to the pt's glib
51 * mainloop. The simpler g_timeout_add() and g_idle_add() are forbidden
52 * because they implicitly choose the default main context to attach to
53 * instead of specifically the loop bound to the pt.
54 *
55 * https://developer.gnome.org/programming-guidelines/unstable/main-contexts.html.en#what-is-gmaincontext
56 */
57
58 static int
lws_glib_set_idle(struct lws_context_per_thread * pt)59 lws_glib_set_idle(struct lws_context_per_thread *pt)
60 {
61 GSource *gis;
62
63 if (pt->glib.idle_tag)
64 return 0;
65
66 gis = g_idle_source_new();
67 if (!gis)
68 return 1;
69
70 g_source_set_callback(gis, lws_glib_idle_timer_cb, pt, NULL);
71 pt->glib.idle_tag = g_source_attach(gis, pt_to_g_main_context(pt));
72
73 return 0;
74 }
75
76 static int
lws_glib_set_timeout(struct lws_context_per_thread * pt,unsigned int ms)77 lws_glib_set_timeout(struct lws_context_per_thread *pt, unsigned int ms)
78 {
79 GSource *gts;
80
81 gts = g_timeout_source_new(ms);
82 if (!gts)
83 return 1;
84
85 g_source_set_callback(gts, lws_glib_hrtimer_cb, pt, NULL);
86 pt->glib.hrtimer_tag = g_source_attach(gts, pt_to_g_main_context(pt));
87
88 return 0;
89 }
90
91 static gboolean
lws_glib_dispatch(GSource * src,GSourceFunc x,gpointer userData)92 lws_glib_dispatch(GSource *src, GSourceFunc x, gpointer userData)
93 {
94 struct lws_io_watcher_glib_subclass *sub =
95 (struct lws_io_watcher_glib_subclass *)src;
96 struct lws_context_per_thread *pt;
97 struct lws_pollfd eventfd;
98 GIOCondition cond;
99
100 cond = g_source_query_unix_fd(src, sub->tag);
101 eventfd.revents = cond;
102
103 /* translate from glib event namespace to platform */
104
105 if (cond & G_IO_IN)
106 eventfd.revents |= LWS_POLLIN;
107 if (cond & G_IO_OUT)
108 eventfd.revents |= LWS_POLLOUT;
109 if (cond & G_IO_ERR)
110 eventfd.revents |= LWS_POLLHUP;
111 if (cond & G_IO_HUP)
112 eventfd.revents |= LWS_POLLHUP;
113
114 eventfd.events = eventfd.revents;
115 eventfd.fd = sub->wsi->desc.sockfd;
116
117 lwsl_debug("%s: wsi %p: fd %d, events %d\n", __func__, sub->wsi,
118 eventfd.fd, eventfd.revents);
119
120 pt = &sub->wsi->context->pt[(int)sub->wsi->tsi];
121 if (pt->is_destroyed)
122 return G_SOURCE_CONTINUE;
123
124 lws_service_fd_tsi(sub->wsi->context, &eventfd, sub->wsi->tsi);
125
126 if (!pt->glib.idle_tag)
127 lws_glib_set_idle(pt);
128
129 if (pt->destroy_self)
130 lws_context_destroy(pt->context);
131
132 return G_SOURCE_CONTINUE;
133 }
134
135 static const GSourceFuncs lws_glib_source_ops = {
136 .prepare = NULL,
137 .check = lws_glib_check,
138 .dispatch = lws_glib_dispatch,
139 .finalize = NULL,
140 };
141
142 /*
143 * This is the callback for a timer object that is set to the earliest scheduled
144 * lws event... it services any lws scheduled events that are ready, and then
145 * resets the event loop timer to the earliest remaining event, if any.
146 */
147
148 static gboolean
lws_glib_hrtimer_cb(void * p)149 lws_glib_hrtimer_cb(void *p)
150 {
151 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)p;
152 unsigned int ms;
153 lws_usec_t us;
154
155 lws_pt_lock(pt, __func__);
156 us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
157 if (us) {
158 ms = us / LWS_US_PER_MS;
159 if (!ms)
160 ms = 1;
161
162 lws_glib_set_timeout(pt, ms);
163 }
164
165 lws_pt_unlock(pt);
166
167 lws_glib_set_idle(pt);
168
169 return FALSE; /* stop it repeating */
170 }
171
172 static gboolean
lws_glib_idle_timer_cb(void * p)173 lws_glib_idle_timer_cb(void *p)
174 {
175 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)p;
176
177 if (pt->is_destroyed)
178 return FALSE;
179
180 lws_service_do_ripe_rxflow(pt);
181 lws_glib_hrtimer_cb(pt);
182
183 /*
184 * is there anybody with pending stuff that needs service forcing?
185 */
186 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
187 /* -1 timeout means just do forced service */
188 _lws_plat_service_forced_tsi(pt->context, pt->tid);
189 /* still somebody left who wants forced service? */
190 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
191 return TRUE;
192 }
193
194 if (pt->destroy_self)
195 lws_context_destroy(pt->context);
196
197 /*
198 * For glib, this disables the idle callback. Otherwise we keep
199 * coming back here immediately endlessly.
200 *
201 * We reenable the idle callback on the next network or scheduled event
202 */
203
204 pt->glib.idle_tag = 0;
205
206 return FALSE;
207 }
208
209 void
lws_glib_sigint_cb(void * ctx)210 lws_glib_sigint_cb(void *ctx)
211 {
212 struct lws_context_per_thread *pt = ctx;
213
214 pt->inside_service = 1;
215
216 if (pt->context->eventlib_signal_cb) {
217 pt->context->eventlib_signal_cb(NULL, 0);
218
219 return;
220 }
221 if (!pt->event_loop_foreign)
222 g_main_loop_quit(pt_to_loop(pt));
223 }
224
225 static int
elops_init_context_glib(struct lws_context * context,const struct lws_context_creation_info * info)226 elops_init_context_glib(struct lws_context *context,
227 const struct lws_context_creation_info *info)
228 {
229 int n;
230
231 context->eventlib_signal_cb = info->signal_cb;
232
233 for (n = 0; n < context->count_threads; n++)
234 context->pt[n].w_sigint.context = context;
235
236 return 0;
237 }
238
239 static int
elops_accept_glib(struct lws * wsi)240 elops_accept_glib(struct lws *wsi)
241 {
242 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
243 int fd;
244
245 assert(!wsi_to_subclass(wsi));
246
247 wsi_to_subclass(wsi) = (struct lws_io_watcher_glib_subclass *)
248 g_source_new((GSourceFuncs *)&lws_glib_source_ops,
249 sizeof(*wsi_to_subclass(wsi)));
250 if (!wsi_to_subclass(wsi))
251 return 1;
252
253 wsi->w_read.context = wsi->context;
254 wsi_to_subclass(wsi)->wsi = wsi;
255
256 if (wsi->role_ops->file_handle)
257 fd = wsi->desc.filefd;
258 else
259 fd = wsi->desc.sockfd;
260
261 wsi_to_subclass(wsi)->tag = g_source_add_unix_fd(wsi_to_gsource(wsi),
262 fd, (GIOCondition)LWS_POLLIN);
263 wsi->w_read.actual_events = LWS_POLLIN;
264
265 g_source_set_callback(wsi_to_gsource(wsi),
266 G_SOURCE_FUNC(lws_service_fd), wsi->context, NULL);
267
268 g_source_attach(wsi_to_gsource(wsi), pt_to_g_main_context(pt));
269
270 return 0;
271 }
272
273 static int
elops_init_pt_glib(struct lws_context * context,void * _loop,int tsi)274 elops_init_pt_glib(struct lws_context *context, void *_loop, int tsi)
275 {
276 struct lws_context_per_thread *pt = &context->pt[tsi];
277 struct lws_vhost *vh = context->vhost_list;
278 GMainLoop *loop = (GMainLoop *)_loop;
279
280 if (!loop)
281 loop = g_main_loop_new(NULL, 0);
282 else
283 context->pt[tsi].event_loop_foreign = 1;
284
285 if (!loop) {
286 lwsl_err("%s: creating glib loop failed\n", __func__);
287
288 return -1;
289 }
290
291 pt->glib.loop = loop;
292
293 /*
294 * Initialize all events with the listening sockets
295 * and register a callback for read operations
296 */
297
298 while (vh) {
299 if (vh->lserv_wsi)
300 elops_accept_glib(vh->lserv_wsi);
301
302 vh = vh->vhost_next;
303 }
304
305 lws_glib_set_idle(pt);
306
307 /* Register the signal watcher unless it's a foreign loop */
308
309 if (pt->event_loop_foreign)
310 return 0;
311
312 pt->glib.sigint_tag = g_unix_signal_add(SIGINT,
313 G_SOURCE_FUNC(lws_glib_sigint_cb), pt);
314
315 return 0;
316 }
317
318 /*
319 * We are changing the event wait for this guy
320 */
321
322 static void
elops_io_glib(struct lws * wsi,int flags)323 elops_io_glib(struct lws *wsi, int flags)
324 {
325 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
326 GIOCondition cond = wsi->w_read.actual_events | G_IO_ERR;
327
328 if (!pt_to_loop(pt) || wsi->context->being_destroyed || pt->is_destroyed)
329 return;
330
331 /*
332 * We are being given individual set / clear operations using
333 * LWS_EV_ common namespace, convert them to glib namespace bitfield
334 */
335
336 if (flags & LWS_EV_READ) {
337 if (flags & LWS_EV_STOP)
338 cond &= ~(G_IO_IN | G_IO_HUP);
339 else
340 cond |= G_IO_IN | G_IO_HUP;
341 }
342
343 if (flags & LWS_EV_WRITE) {
344 if (flags & LWS_EV_STOP)
345 cond &= ~G_IO_OUT;
346 else
347 cond |= G_IO_OUT;
348 }
349
350 wsi->w_read.actual_events = cond;
351
352 lwsl_debug("%s: wsi %p, fd %d, 0x%x/0x%x\n", __func__, wsi,
353 wsi->desc.sockfd, flags, (int)cond);
354
355 g_source_modify_unix_fd(wsi_to_gsource(wsi), wsi_to_subclass(wsi)->tag,
356 cond);
357 }
358
359 static void
elops_run_pt_glib(struct lws_context * context,int tsi)360 elops_run_pt_glib(struct lws_context *context, int tsi)
361 {
362 struct lws_context_per_thread *pt = &context->pt[tsi];
363
364 if (pt_to_loop(pt))
365 g_main_loop_run(pt_to_loop(pt));
366 }
367
368 static void
elops_destroy_wsi_glib(struct lws * wsi)369 elops_destroy_wsi_glib(struct lws *wsi)
370 {
371 struct lws_context_per_thread *pt;
372
373 if (!wsi)
374 return;
375
376 pt = &wsi->context->pt[(int)wsi->tsi];
377 if (pt->is_destroyed)
378 return;
379
380 if (!wsi_to_gsource(wsi))
381 return;
382
383 if (wsi_to_subclass(wsi)->tag) {
384 g_source_remove_unix_fd(wsi_to_gsource(wsi),
385 wsi_to_subclass(wsi)->tag);
386 wsi_to_subclass(wsi)->tag = NULL;
387 }
388
389 g_source_destroy(wsi_to_gsource(wsi));
390 wsi_to_subclass(wsi) = NULL;
391 }
392
393 static void
elops_destroy_pt_glib(struct lws_context * context,int tsi)394 elops_destroy_pt_glib(struct lws_context *context, int tsi)
395 {
396 struct lws_context_per_thread *pt = &context->pt[tsi];
397 struct lws_vhost *vh = context->vhost_list;
398
399 if (!pt_to_loop(pt))
400 return;
401
402 /*
403 * Free all events with the listening sockets
404 */
405 while (vh) {
406 if (vh->lserv_wsi)
407 elops_destroy_wsi_glib(vh->lserv_wsi);
408
409 vh = vh->vhost_next;
410 }
411
412 if (pt->glib.hrtimer_tag)
413 g_source_remove(pt->glib.hrtimer_tag);
414
415 if (!pt->event_loop_foreign) {
416 g_main_loop_quit(pt_to_loop(pt));
417 g_source_remove(pt->glib.sigint_tag);
418 g_main_loop_unref(pt_to_loop(pt));
419 }
420
421 pt_to_loop(pt) = NULL;
422 }
423
424 static int
elops_destroy_context2_glib(struct lws_context * context)425 elops_destroy_context2_glib(struct lws_context *context)
426 {
427 struct lws_context_per_thread *pt = &context->pt[0];
428 int n;
429
430 for (n = 0; n < (int)context->count_threads; n++) {
431 if (!pt->event_loop_foreign)
432 g_main_loop_quit(pt_to_loop(pt));
433 pt++;
434 }
435
436 return 0;
437 }
438
439 static int
elops_wsi_logical_close_glib(struct lws * wsi)440 elops_wsi_logical_close_glib(struct lws *wsi)
441 {
442 elops_destroy_wsi_glib(wsi);
443
444 return 0;
445 }
446
447 struct lws_event_loop_ops event_loop_ops_glib = {
448 /* name */ "glib",
449 /* init_context */ elops_init_context_glib,
450 /* destroy_context1 */ NULL,
451 /* destroy_context2 */ elops_destroy_context2_glib,
452 /* init_vhost_listen_wsi */ elops_accept_glib,
453 /* init_pt */ elops_init_pt_glib,
454 /* wsi_logical_close */ elops_wsi_logical_close_glib,
455 /* check_client_connect_ok */ NULL,
456 /* close_handle_manually */ NULL,
457 /* accept */ elops_accept_glib,
458 /* io */ elops_io_glib,
459 /* run_pt */ elops_run_pt_glib,
460 /* destroy_pt */ elops_destroy_pt_glib,
461 /* destroy wsi */ elops_destroy_wsi_glib,
462
463 /* flags */ LELOF_DESTROY_FINAL,
464 };
465