1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 * This role for wrapping dbus fds in a wsi + role is unusual in that the
25 * wsi it creates and binds to the role do not have control over the related fd
26 * lifecycle. In fact dbus doesn't inform us directly about the lifecycle of
27 * the fds it wants to be managed by the lws event loop.
28 *
29 * What it does tell us is when it wants to wait on POLLOUT and / or POLLIN,
30 * and since it should stop any watchers before close, we take the approach to
31 * create a lightweight "shadow" wsi for any fd from dbus that has a POLLIN or
32 * POLLOUT wait active. When the dbus fd asks to have no wait active, we
33 * destroy the wsi, since this is indistinguishable from dbus close path
34 * behaviour. If it actually stays alive and later asks to wait again, well no
35 * worries we create a new shadow wsi until it looks like it is closing again.
36 */
37
38 #include <private-lib-core.h>
39
40 #include <libwebsockets/lws-dbus.h>
41
42 /*
43 * retreives existing or creates new shadow wsi for fd owned by dbus stuff.
44 *
45 * Requires context + vhost lock
46 */
47
48 static struct lws *
__lws_shadow_wsi(struct lws_dbus_ctx * ctx,DBusWatch * w,int fd,int create_ok)49 __lws_shadow_wsi(struct lws_dbus_ctx *ctx, DBusWatch *w, int fd, int create_ok)
50 {
51 struct lws *wsi;
52
53 if (fd < 0 || fd >= (int)ctx->vh->context->fd_limit_per_thread) {
54 lwsl_err("%s: fd %d vs fds_count %d\n", __func__, fd,
55 (int)ctx->vh->context->fd_limit_per_thread);
56 assert(0);
57
58 return NULL;
59 }
60
61 wsi = wsi_from_fd(ctx->vh->context, fd);
62 if (wsi) {
63 assert(wsi->opaque_parent_data == ctx);
64
65 return wsi;
66 }
67
68 if (!create_ok)
69 return NULL;
70
71 lws_context_assert_lock_held(wsi->a.context);
72 lws_vhost_assert_lock_held(wsi->a.vhost);
73
74 /* requires context lock */
75 wsi = __lws_wsi_create_with_role(ctx->vh->context, ctx->tsi, NULL,
76 ctx->vh->lc.log_cx);
77 if (wsi == NULL) {
78 lwsl_err("Out of mem\n");
79 return NULL;
80 }
81
82 lwsl_info("%s: creating shadow wsi\n", __func__);
83
84 wsi->desc.sockfd = fd;
85 lws_role_transition(wsi, 0, LRS_ESTABLISHED, &role_ops_dbus);
86 wsi->a.protocol = ctx->vh->protocols;
87 wsi->shadow = 1;
88 wsi->opaque_parent_data = ctx;
89 ctx->w[0] = w;
90
91 __lws_lc_tag(ctx->vh->context, &ctx->vh->context->lcg[LWSLCG_WSI],
92 &wsi->lc, "dbus|%s", ctx->vh->name);
93
94 lws_vhost_bind_wsi(ctx->vh, wsi);
95 if (__insert_wsi_socket_into_fds(ctx->vh->context, wsi)) {
96 lwsl_err("inserting wsi socket into fds failed\n");
97 __lws_vhost_unbind_wsi(wsi); /* cx + vh lock */
98 lws_free(wsi);
99 return NULL;
100 }
101
102 return wsi;
103 }
104
105 /*
106 * Requires cx + vhost lock
107 */
108
109 static int
__lws_shadow_wsi_destroy(struct lws_dbus_ctx * ctx,struct lws * wsi)110 __lws_shadow_wsi_destroy(struct lws_dbus_ctx *ctx, struct lws *wsi)
111 {
112 lwsl_info("%s: destroying shadow wsi\n", __func__);
113
114 lws_context_assert_lock_held(wsi->a.context);
115 lws_vhost_assert_lock_held(wsi->a.vhost);
116
117 if (__remove_wsi_socket_from_fds(wsi)) {
118 lwsl_err("%s: unable to remove %d from fds\n", __func__,
119 wsi->desc.sockfd);
120
121 return 1;
122 }
123
124 __lws_vhost_unbind_wsi(wsi);
125
126 lws_free(wsi);
127
128 return 0;
129 }
130
131
132 static void
handle_dispatch_status(DBusConnection * c,DBusDispatchStatus s,void * data)133 handle_dispatch_status(DBusConnection *c, DBusDispatchStatus s, void *data)
134 {
135 lwsl_info("%s: new dbus dispatch status: %d\n", __func__, s);
136 }
137
138 /*
139 * These are complicated by the fact libdbus can have two separate DBusWatch
140 * objects for the same fd, to control watching POLLIN and POLLOUT individually.
141 *
142 * However we will actually watch using poll(), where the unit is the fd, and
143 * it has a unified events field with just POLLIN / POLLOUT flags.
144 *
145 * So we have to be prepared for one or two watchers coming in any order.
146 */
147
148 static dbus_bool_t
lws_dbus_add_watch(DBusWatch * w,void * data)149 lws_dbus_add_watch(DBusWatch *w, void *data)
150 {
151 struct lws_dbus_ctx *ctx = (struct lws_dbus_ctx *)data;
152 struct lws_context_per_thread *pt = &ctx->vh->context->pt[ctx->tsi];
153 unsigned int flags = 0, lws_flags = 0;
154 struct lws *wsi;
155 int n;
156
157 lws_context_lock(pt->context, __func__);
158 lws_pt_lock(pt, __func__);
159
160 wsi = __lws_shadow_wsi(ctx, w, dbus_watch_get_unix_fd(w), 1);
161 if (!wsi) {
162 lws_pt_unlock(pt);
163 lws_context_unlock(pt->context);
164 lwsl_err("%s: unable to get wsi\n", __func__);
165
166 return FALSE;
167 }
168
169 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
170 if (w == ctx->w[n])
171 break;
172
173 if (n == (int)LWS_ARRAY_SIZE(ctx->w))
174 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
175 if (!ctx->w[n]) {
176 ctx->w[n] = w;
177 break;
178 }
179
180 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
181 if (ctx->w[n] && dbus_watch_get_enabled(ctx->w[n]))
182 flags |= dbus_watch_get_flags(ctx->w[n]);
183
184 if (flags & DBUS_WATCH_READABLE)
185 lws_flags |= LWS_POLLIN;
186 if (flags & DBUS_WATCH_WRITABLE)
187 lws_flags |= LWS_POLLOUT;
188
189 lwsl_info("%s: %s: %p, fd %d, data %p, fl %d\n", __func__,
190 lws_wsi_tag(wsi), w, dbus_watch_get_unix_fd(w),
191 data, lws_flags);
192
193 if (lws_flags)
194 __lws_change_pollfd(wsi, 0, (int)lws_flags);
195
196 lws_pt_unlock(pt);
197 lws_context_unlock(pt->context);
198
199 return TRUE;
200 }
201
202 /* cx + vh lock */
203 static int
__check_destroy_shadow_wsi(struct lws_dbus_ctx * ctx,struct lws * wsi)204 __check_destroy_shadow_wsi(struct lws_dbus_ctx *ctx, struct lws *wsi)
205 {
206 int n;
207
208 if (!wsi)
209 return 0;
210
211 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
212 if (ctx->w[n])
213 return 0;
214
215 __lws_shadow_wsi_destroy(ctx, wsi);
216
217 if (!ctx->conn || !ctx->hup || ctx->timeouts)
218 return 0;
219
220 if (dbus_connection_get_dispatch_status(ctx->conn) ==
221 DBUS_DISPATCH_DATA_REMAINS)
222 return 0;
223
224 if (ctx->cb_closing)
225 ctx->cb_closing(ctx);
226
227 return 1;
228 }
229
230 static void
lws_dbus_remove_watch(DBusWatch * w,void * data)231 lws_dbus_remove_watch(DBusWatch *w, void *data)
232 {
233 struct lws_dbus_ctx *ctx = (struct lws_dbus_ctx *)data;
234 struct lws_context_per_thread *pt = &ctx->vh->context->pt[ctx->tsi];
235 unsigned int flags = 0, lws_flags = 0;
236 struct lws *wsi;
237 int n;
238
239 lws_context_lock(pt->context, __func__);
240 lws_pt_lock(pt, __func__);
241
242 wsi = __lws_shadow_wsi(ctx, w, dbus_watch_get_unix_fd(w), 0);
243 if (!wsi)
244 goto bail;
245
246 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
247 if (w == ctx->w[n]) {
248 ctx->w[n] = NULL;
249 break;
250 }
251
252 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
253 if (ctx->w[n])
254 flags |= dbus_watch_get_flags(ctx->w[n]);
255
256 if ((~flags) & DBUS_WATCH_READABLE)
257 lws_flags |= LWS_POLLIN;
258 if ((~flags) & DBUS_WATCH_WRITABLE)
259 lws_flags |= LWS_POLLOUT;
260
261 lwsl_info("%s: %p, fd %d, data %p, clearing lws flags %d\n",
262 __func__, w, dbus_watch_get_unix_fd(w),
263 data, lws_flags);
264
265 __lws_change_pollfd(wsi, (int)lws_flags, 0);
266
267 bail:
268 lws_pt_unlock(pt);
269 lws_context_unlock(pt->context);
270 }
271
272 static void
lws_dbus_toggle_watch(DBusWatch * w,void * data)273 lws_dbus_toggle_watch(DBusWatch *w, void *data)
274 {
275 if (dbus_watch_get_enabled(w))
276 lws_dbus_add_watch(w, data);
277 else
278 lws_dbus_remove_watch(w, data);
279 }
280
281 static void
lws_dbus_sul_cb(lws_sorted_usec_list_t * sul)282 lws_dbus_sul_cb(lws_sorted_usec_list_t *sul)
283 {
284 struct lws_context_per_thread *pt = lws_container_of(sul,
285 struct lws_context_per_thread, dbus.sul);
286
287 lws_start_foreach_dll_safe(struct lws_dll2 *, rdt, nx,
288 lws_dll2_get_head(&pt->dbus.timer_list_owner)) {
289 struct lws_role_dbus_timer *r = lws_container_of(rdt,
290 struct lws_role_dbus_timer, timer_list);
291
292 if (time(NULL) > r->fire) {
293 lwsl_notice("%s: firing timer\n", __func__);
294 dbus_timeout_handle(r->data);
295 lws_dll2_remove(rdt);
296 lws_free(rdt);
297 }
298 } lws_end_foreach_dll_safe(rdt, nx);
299
300 if (pt->dbus.timer_list_owner.count)
301 lws_sul_schedule(pt->context, pt->tid, &pt->dbus.sul,
302 lws_dbus_sul_cb, 3 * LWS_US_PER_SEC);
303 }
304
305 static dbus_bool_t
lws_dbus_add_timeout(DBusTimeout * t,void * data)306 lws_dbus_add_timeout(DBusTimeout *t, void *data)
307 {
308 struct lws_dbus_ctx *ctx = (struct lws_dbus_ctx *)data;
309 struct lws_context_per_thread *pt = &ctx->vh->context->pt[ctx->tsi];
310 int ms = dbus_timeout_get_interval(t);
311 struct lws_role_dbus_timer *dbt;
312 time_t ti = time(NULL);
313
314 if (!dbus_timeout_get_enabled(t))
315 return TRUE;
316
317 if (ms < 1000)
318 ms = 1000;
319
320 dbt = lws_malloc(sizeof(*dbt), "dbus timer");
321 if (!dbt)
322 return FALSE;
323
324 lwsl_info("%s: adding timeout %dms\n", __func__,
325 dbus_timeout_get_interval(t));
326
327 dbt->data = t;
328 dbt->fire = ti + (ms < 1000);
329 dbt->timer_list.prev = NULL;
330 dbt->timer_list.next = NULL;
331 dbt->timer_list.owner = NULL;
332 lws_dll2_add_head(&dbt->timer_list, &pt->dbus.timer_list_owner);
333
334 if (!pt->dbus.sul.list.owner)
335 lws_sul_schedule(pt->context, pt->tid, &pt->dbus.sul,
336 lws_dbus_sul_cb, 3 * LWS_US_PER_SEC);
337
338 ctx->timeouts++;
339
340 return TRUE;
341 }
342
343 static void
lws_dbus_remove_timeout(DBusTimeout * t,void * data)344 lws_dbus_remove_timeout(DBusTimeout *t, void *data)
345 {
346 struct lws_dbus_ctx *ctx = (struct lws_dbus_ctx *)data;
347 struct lws_context_per_thread *pt = &ctx->vh->context->pt[ctx->tsi];
348
349 lwsl_info("%s: t %p, data %p\n", __func__, t, data);
350
351 lws_start_foreach_dll_safe(struct lws_dll2 *, rdt, nx,
352 lws_dll2_get_head(&pt->dbus.timer_list_owner)) {
353 struct lws_role_dbus_timer *r = lws_container_of(rdt,
354 struct lws_role_dbus_timer, timer_list);
355 if (t == r->data) {
356 lws_dll2_remove(rdt);
357 lws_free(rdt);
358 ctx->timeouts--;
359 break;
360 }
361 } lws_end_foreach_dll_safe(rdt, nx);
362
363 if (!pt->dbus.timer_list_owner.count)
364 lws_sul_cancel(&pt->dbus.sul);
365 }
366
367 static void
lws_dbus_toggle_timeout(DBusTimeout * t,void * data)368 lws_dbus_toggle_timeout(DBusTimeout *t, void *data)
369 {
370 if (dbus_timeout_get_enabled(t))
371 lws_dbus_add_timeout(t, data);
372 else
373 lws_dbus_remove_timeout(t, data);
374 }
375
376 /*
377 * This sets up a connection along the same lines as
378 * dbus_connection_setup_with_g_main(), but for using the lws event loop.
379 */
380
381 int
lws_dbus_connection_setup(struct lws_dbus_ctx * ctx,DBusConnection * conn,lws_dbus_closing_t cb_closing)382 lws_dbus_connection_setup(struct lws_dbus_ctx *ctx, DBusConnection *conn,
383 lws_dbus_closing_t cb_closing)
384 {
385 int n;
386
387 ctx->conn = conn;
388 ctx->cb_closing = cb_closing;
389 ctx->hup = 0;
390 ctx->timeouts = 0;
391 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
392 ctx->w[n] = NULL;
393
394 if (!dbus_connection_set_watch_functions(conn, lws_dbus_add_watch,
395 lws_dbus_remove_watch,
396 lws_dbus_toggle_watch,
397 ctx, NULL)) {
398 lwsl_err("%s: dbus_connection_set_watch_functions fail\n",
399 __func__);
400 return 1;
401 }
402
403 if (!dbus_connection_set_timeout_functions(conn,
404 lws_dbus_add_timeout,
405 lws_dbus_remove_timeout,
406 lws_dbus_toggle_timeout,
407 ctx, NULL)) {
408 lwsl_err("%s: dbus_connection_set_timeout_functions fail\n",
409 __func__);
410 return 1;
411 }
412
413 dbus_connection_set_dispatch_status_function(conn,
414 handle_dispatch_status,
415 ctx, NULL);
416
417 return 0;
418 }
419
420 /*
421 * This wraps dbus_server_listen(), additionally taking care of the event loop
422 * -related setups.
423 */
424
425 DBusServer *
lws_dbus_server_listen(struct lws_dbus_ctx * ctx,const char * ads,DBusError * e,DBusNewConnectionFunction new_conn)426 lws_dbus_server_listen(struct lws_dbus_ctx *ctx, const char *ads, DBusError *e,
427 DBusNewConnectionFunction new_conn)
428 {
429 ctx->cb_closing = NULL;
430 ctx->hup = 0;
431 ctx->timeouts = 0;
432
433 ctx->dbs = dbus_server_listen(ads, e);
434 if (!ctx->dbs)
435 return NULL;
436
437 dbus_server_set_new_connection_function(ctx->dbs, new_conn, ctx, NULL);
438
439 if (!dbus_server_set_watch_functions(ctx->dbs, lws_dbus_add_watch,
440 lws_dbus_remove_watch,
441 lws_dbus_toggle_watch,
442 ctx, NULL)) {
443 lwsl_err("%s: dbus_connection_set_watch_functions fail\n",
444 __func__);
445 goto bail;
446 }
447
448 if (!dbus_server_set_timeout_functions(ctx->dbs, lws_dbus_add_timeout,
449 lws_dbus_remove_timeout,
450 lws_dbus_toggle_timeout,
451 ctx, NULL)) {
452 lwsl_err("%s: dbus_connection_set_timeout_functions fail\n",
453 __func__);
454 goto bail;
455 }
456
457 return ctx->dbs;
458
459 bail:
460 dbus_server_disconnect(ctx->dbs);
461 dbus_server_unref(ctx->dbs);
462
463 return NULL;
464 }
465
466
467 /*
468 * There shouldn't be a race here with watcher removal and poll wait, because
469 * everything including the dbus activity is serialized in one event loop.
470 *
471 * If it removes the watcher and we remove the wsi and fd entry before this,
472 * actually we can no longer map the fd to this invalidated wsi pointer to call
473 * this.
474 */
475
476 static int
rops_handle_POLLIN_dbus(struct lws_context_per_thread * pt,struct lws * wsi,struct lws_pollfd * pollfd)477 rops_handle_POLLIN_dbus(struct lws_context_per_thread *pt, struct lws *wsi,
478 struct lws_pollfd *pollfd)
479 {
480 struct lws_dbus_ctx *ctx =
481 (struct lws_dbus_ctx *)wsi->opaque_parent_data;
482 unsigned int flags = 0;
483 int n;
484
485 if (pollfd->revents & LWS_POLLIN)
486 flags |= DBUS_WATCH_READABLE;
487 if (pollfd->revents & LWS_POLLOUT)
488 flags |= DBUS_WATCH_WRITABLE;
489
490 if (pollfd->revents & (LWS_POLLHUP))
491 ctx->hup = 1;
492
493 /*
494 * POLLIN + POLLOUT gets us called here on the corresponding shadow
495 * wsi. wsi->opaque_parent_data is the watcher handle bound to the wsi
496 */
497
498 for (n = 0; n < (int)LWS_ARRAY_SIZE(ctx->w); n++)
499 if (ctx->w[n] && !dbus_watch_handle(ctx->w[n], flags))
500 lwsl_err("%s: dbus_watch_handle failed\n", __func__);
501
502 if (ctx->conn) {
503 lwsl_info("%s: conn: flags %d\n", __func__, flags);
504
505 while (dbus_connection_get_dispatch_status(ctx->conn) ==
506 DBUS_DISPATCH_DATA_REMAINS)
507 dbus_connection_dispatch(ctx->conn);
508
509 handle_dispatch_status(NULL, DBUS_DISPATCH_DATA_REMAINS, NULL);
510
511 __check_destroy_shadow_wsi(ctx, wsi);
512 } else
513 if (ctx->dbs)
514 /* ??? */
515 lwsl_debug("%s: dbs: %d\n", __func__, flags);
516
517 return LWS_HPI_RET_HANDLED;
518 }
519
520 static int
rops_pt_init_destroy_dbus(struct lws_context * context,const struct lws_context_creation_info * info,struct lws_context_per_thread * pt,int destroy)521 rops_pt_init_destroy_dbus(struct lws_context *context,
522 const struct lws_context_creation_info *info,
523 struct lws_context_per_thread *pt, int destroy)
524 {
525 if (destroy)
526 lws_sul_cancel(&pt->dbus.sul);
527
528 return 0;
529 }
530
531 static const lws_rops_t rops_table_dbus[] = {
532 /* 1 */ { .pt_init_destroy = rops_pt_init_destroy_dbus },
533 /* 2 */ { .handle_POLLIN = rops_handle_POLLIN_dbus },
534 };
535
536 const struct lws_role_ops role_ops_dbus = {
537 /* role name */ "dbus",
538 /* alpn id */ NULL,
539
540 /* rops_table */ rops_table_dbus,
541 /* rops_idx */ {
542 /* LWS_ROPS_check_upgrades */
543 /* LWS_ROPS_pt_init_destroy */ 0x01,
544 /* LWS_ROPS_init_vhost */
545 /* LWS_ROPS_destroy_vhost */ 0x00,
546 /* LWS_ROPS_service_flag_pending */
547 /* LWS_ROPS_handle_POLLIN */ 0x02,
548 /* LWS_ROPS_handle_POLLOUT */
549 /* LWS_ROPS_perform_user_POLLOUT */ 0x00,
550 /* LWS_ROPS_callback_on_writable */
551 /* LWS_ROPS_tx_credit */ 0x00,
552 /* LWS_ROPS_write_role_protocol */
553 /* LWS_ROPS_encapsulation_parent */ 0x00,
554 /* LWS_ROPS_alpn_negotiated */
555 /* LWS_ROPS_close_via_role_protocol */ 0x00,
556 /* LWS_ROPS_close_role */
557 /* LWS_ROPS_close_kill_connection */ 0x00,
558 /* LWS_ROPS_destroy_role */
559 /* LWS_ROPS_adoption_bind */ 0x00,
560 /* LWS_ROPS_client_bind */
561 /* LWS_ROPS_issue_keepalive */ 0x00,
562 },
563
564 /* adoption_cb clnt, srv */ { 0, 0 },
565 /* rx_cb clnt, srv */ { 0, 0 },
566 /* writeable cb clnt, srv */ { 0, 0 },
567 /* close cb clnt, srv */ { 0, 0 },
568 /* protocol_bind_cb c,s */ { 0, 0 },
569 /* protocol_unbind_cb c,s */ { 0, 0 },
570 /* file_handle */ 0,
571 };
572