1 #include <systemd/sd-event.h>
2
3 #include <private-lib-core.h>
4 #include "private-lib-event-libs-sdevent.h"
5
6 #define pt_to_priv_sd(_pt) ((struct lws_pt_eventlibs_sdevent *)(_pt)->evlib_pt)
7 #define wsi_to_priv_sd(_w) ((struct lws_wsi_watcher_sdevent *)(_w)->evlib_wsi)
8
9 struct lws_pt_eventlibs_sdevent {
10 struct lws_context_per_thread *pt;
11 struct sd_event *io_loop;
12 struct sd_event_source *sultimer;
13 struct sd_event_source *idletimer;
14 };
15
16 struct lws_wsi_watcher_sdevent {
17 struct sd_event_source *source;
18 uint32_t events;
19 };
20
21 static int
sultimer_handler(sd_event_source * s,uint64_t usec,void * userdata)22 sultimer_handler(sd_event_source *s, uint64_t usec, void *userdata)
23 {
24 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)userdata;
25
26 lws_usec_t us;
27
28 lws_context_lock(pt->context, __func__);
29 lws_pt_lock(pt, __func__);
30
31 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
32 lws_now_usecs());
33 if (us) {
34 uint64_t at;
35
36 sd_event_now(sd_event_source_get_event(s), CLOCK_MONOTONIC, &at);
37 at += (uint64_t)us;
38 sd_event_source_set_time(pt_to_priv_sd(pt)->sultimer, at);
39 sd_event_source_set_enabled(pt_to_priv_sd(pt)->sultimer,
40 SD_EVENT_ONESHOT);
41 }
42
43 lws_pt_unlock(pt);
44 lws_context_unlock(pt->context);
45
46 return 0;
47 }
48
49 static int
idle_handler(sd_event_source * s,uint64_t usec,void * userdata)50 idle_handler(sd_event_source *s, uint64_t usec, void *userdata)
51 {
52 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)userdata;
53
54 lws_usec_t us;
55
56 lws_service_do_ripe_rxflow(pt);
57
58 lws_context_lock(pt->context, __func__);
59 lws_pt_lock(pt, __func__);
60
61 /*
62 * is there anybody with pending stuff that needs service forcing?
63 */
64 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
65 /* -1 timeout means just do forced service */
66 _lws_plat_service_forced_tsi(pt->context, pt->tid);
67
68 /* account for sultimer */
69
70 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
71 lws_now_usecs());
72
73 if (us) {
74 uint64_t at;
75
76 sd_event_now(sd_event_source_get_event(s), CLOCK_MONOTONIC, &at);
77 at += (uint64_t)us;
78 sd_event_source_set_time(pt_to_priv_sd(pt)->sultimer, at);
79 sd_event_source_set_enabled(pt_to_priv_sd(pt)->sultimer,
80 SD_EVENT_ONESHOT);
81 }
82
83 sd_event_source_set_enabled(pt_to_priv_sd(pt)->idletimer, SD_EVENT_OFF);
84
85 lws_pt_unlock(pt);
86 lws_context_unlock(pt->context);
87
88 return 0;
89 }
90
91 static int
sock_accept_handler(sd_event_source * s,int fd,uint32_t revents,void * userdata)92 sock_accept_handler(sd_event_source *s, int fd, uint32_t revents, void *userdata)
93 {
94 struct lws *wsi = (struct lws *)userdata;
95 struct lws_context *context = wsi->a.context;
96 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
97 struct sd_event_source *idletimer, *watcher;
98 struct lws_pollfd eventfd;
99
100 lws_context_lock(pt->context, __func__);
101 lws_pt_lock(pt, __func__);
102
103 if (pt->is_destroyed)
104 goto bail;
105
106 eventfd.fd = fd;
107 eventfd.events = 0;
108 eventfd.revents = 0;
109
110 if (revents & EPOLLIN) {
111 eventfd.events |= LWS_POLLIN;
112 eventfd.revents |= LWS_POLLIN;
113 }
114
115 if (revents & EPOLLOUT) {
116 eventfd.events |= LWS_POLLOUT;
117 eventfd.revents |= LWS_POLLOUT;
118 }
119
120 lws_pt_unlock(pt);
121 lws_context_unlock(pt->context);
122
123 lws_service_fd_tsi(context, &eventfd, wsi->tsi);
124
125 if (pt->destroy_self) {
126 lws_context_destroy(pt->context);
127 return -1;
128 }
129
130 /* fire idle handler */
131 idletimer = pt_to_priv_sd(pt)->idletimer;
132 if (idletimer) {
133 sd_event_source_set_time(idletimer, (uint64_t) 0);
134 sd_event_source_set_enabled(idletimer, SD_EVENT_ON);
135 }
136
137 /*
138 * allow further events
139 *
140 * Note:
141 * do not move the assignment up, lws_service_fd_tsi may invalidate it!
142 */
143 watcher = wsi_to_priv_sd(wsi)->source;
144 if (watcher)
145 sd_event_source_set_enabled(watcher, SD_EVENT_ONESHOT);
146
147 return 0;
148
149 bail:
150 lws_pt_unlock(pt);
151 lws_context_unlock(pt->context);
152
153 return -1;
154 }
155
156 static void
io_sd(struct lws * wsi,unsigned int flags)157 io_sd(struct lws *wsi, unsigned int flags)
158 {
159 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
160
161 /*
162 * Only manipulate if there is an event source, and if
163 * the pt is still alive
164 */
165 if (!pt_to_priv_sd(pt)->io_loop ||
166 !wsi_to_priv_sd(wsi)->source ||
167 pt->is_destroyed)
168 return;
169
170 // assert that the requested flags do not contain anything unexpected
171 if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
172 (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
173 lwsl_wsi_err(wsi, "assert: flags %d", flags);
174 assert(0);
175 }
176
177 // we are overdoing a bit here, so it resembles the structure in libuv.c
178 if (flags & LWS_EV_START) {
179 if (flags & LWS_EV_WRITE)
180 wsi_to_priv_sd(wsi)->events |= EPOLLOUT;
181
182 if (flags & LWS_EV_READ)
183 wsi_to_priv_sd(wsi)->events |= EPOLLIN;
184
185 sd_event_source_set_io_events(wsi_to_priv_sd(wsi)->source,
186 wsi_to_priv_sd(wsi)->events);
187 sd_event_source_set_enabled(wsi_to_priv_sd(wsi)->source,
188 SD_EVENT_ONESHOT);
189 } else {
190 if (flags & LWS_EV_WRITE)
191 wsi_to_priv_sd(wsi)->events =
192 wsi_to_priv_sd(wsi)->events &
193 (uint32_t)(~EPOLLOUT);
194
195 if (flags & LWS_EV_READ)
196 wsi_to_priv_sd(wsi)->events =
197 wsi_to_priv_sd(wsi)->events &
198 (uint32_t)(~EPOLLIN);
199
200 sd_event_source_set_io_events(wsi_to_priv_sd(wsi)->source,
201 wsi_to_priv_sd(wsi)->events);
202
203 if (!(wsi_to_priv_sd(wsi)->events & (EPOLLIN | EPOLLOUT)))
204 sd_event_source_set_enabled(wsi_to_priv_sd(wsi)->source,
205 SD_EVENT_ONESHOT);
206 else
207 sd_event_source_set_enabled(wsi_to_priv_sd(wsi)->source,
208 SD_EVENT_OFF);
209 }
210 }
211
212 static int
init_vhost_listen_wsi_sd(struct lws * wsi)213 init_vhost_listen_wsi_sd(struct lws *wsi)
214 {
215 struct lws_context_per_thread *pt;
216
217 if (!wsi)
218 return 0;
219
220 pt = &wsi->a.context->pt[(int)wsi->tsi];
221
222 sd_event_add_io(pt_to_priv_sd(pt)->io_loop,
223 &wsi_to_priv_sd(wsi)->source,
224 wsi->desc.sockfd,
225 wsi_to_priv_sd(wsi)->events,
226 sock_accept_handler,
227 wsi);
228
229 io_sd(wsi, LWS_EV_START | LWS_EV_READ);
230
231 return 0;
232 }
233
234 static int
elops_listen_init_sdevent(struct lws_dll2 * d,void * user)235 elops_listen_init_sdevent(struct lws_dll2 *d, void *user)
236 {
237 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
238
239 if (init_vhost_listen_wsi_sd(wsi) == -1)
240 return -1;
241
242 return 0;
243 }
244
245 static int
init_pt_sd(struct lws_context * context,void * _loop,int tsi)246 init_pt_sd(struct lws_context *context, void *_loop, int tsi)
247 {
248 struct lws_context_per_thread *pt = &context->pt[tsi];
249 struct lws_pt_eventlibs_sdevent *ptpriv = pt_to_priv_sd(pt);
250 struct sd_event *loop = (struct sd_event *)_loop;
251 int first = 1; /* first to create and initialize the loop */
252
253 ptpriv->pt = pt;
254
255 /* make sure we have an event loop */
256 if (!ptpriv->io_loop) {
257 if (!loop) {
258 if (sd_event_default(&loop) < 0) {
259 lwsl_cx_err(context, "sd_event_default failed");
260
261 return -1;
262 }
263 pt->event_loop_foreign = 0;
264 } else {
265 sd_event_ref(loop);
266 pt->event_loop_foreign = 1;
267 }
268
269 ptpriv->io_loop = loop;
270 } else
271 /*
272 * If the loop was initialized before, we do not need to
273 * do full initialization
274 */
275 first = 0;
276
277 lws_vhost_foreach_listen_wsi(context, NULL, elops_listen_init_sdevent);
278
279 if (first) {
280
281 if (0 > sd_event_add_time(loop,
282 &ptpriv->sultimer,
283 CLOCK_MONOTONIC,
284 UINT64_MAX,
285 0,
286 sultimer_handler,
287 (void*) pt
288 ))
289 return -1;
290
291 if (0 > sd_event_add_time(loop,
292 &ptpriv->idletimer,
293 CLOCK_MONOTONIC,
294 0,
295 0,
296 idle_handler,
297 (void *)pt))
298 return -1;
299
300 sd_event_source_set_enabled(ptpriv->idletimer, SD_EVENT_ON);
301
302 if (0 > sd_event_source_set_priority(ptpriv->idletimer,
303 SD_EVENT_PRIORITY_IDLE))
304 return -1;
305
306 }
307
308 return 0;
309 }
310
311 static void
wsi_destroy_sd(struct lws * wsi)312 wsi_destroy_sd(struct lws *wsi)
313 {
314 if (!wsi)
315 return;
316
317 io_sd(wsi, LWS_EV_STOP | (LWS_EV_READ | LWS_EV_WRITE));
318
319 if (wsi_to_priv_sd(wsi)->source) {
320 sd_event_source_set_enabled(wsi_to_priv_sd(wsi)->source,
321 SD_EVENT_OFF);
322 sd_event_source_unref(wsi_to_priv_sd(wsi)->source);
323 wsi_to_priv_sd(wsi)->source = NULL;
324 }
325 }
326
327 static int
wsi_logical_close_sd(struct lws * wsi)328 wsi_logical_close_sd(struct lws *wsi)
329 {
330 wsi_destroy_sd(wsi);
331
332 return 0;
333 }
334
335 static int
sock_accept_sd(struct lws * wsi)336 sock_accept_sd(struct lws *wsi)
337 {
338 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
339
340 if (wsi->role_ops->file_handle)
341 sd_event_add_io(pt_to_priv_sd(pt)->io_loop,
342 &wsi_to_priv_sd(wsi)->source,
343 wsi->desc.filefd,
344 wsi_to_priv_sd(wsi)->events,
345 sock_accept_handler,
346 wsi);
347 else
348 sd_event_add_io(pt_to_priv_sd(pt)->io_loop,
349 &wsi_to_priv_sd(wsi)->source,
350 wsi->desc.sockfd,
351 wsi_to_priv_sd(wsi)->events,
352 sock_accept_handler,
353 wsi);
354
355 return 0;
356 }
357
358 static void
run_pt_sd(struct lws_context * context,int tsi)359 run_pt_sd(struct lws_context *context, int tsi)
360 {
361 struct lws_context_per_thread *pt = &context->pt[tsi];
362 struct lws_pt_eventlibs_sdevent *ptpriv = pt_to_priv_sd(pt);
363
364 if (ptpriv->io_loop)
365 sd_event_run(ptpriv->io_loop, (uint64_t) -1);
366 }
367
368 static int
elops_listen_destroy_sdevent(struct lws_dll2 * d,void * user)369 elops_listen_destroy_sdevent(struct lws_dll2 *d, void *user)
370 {
371 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
372
373 wsi_logical_close_sd(wsi);
374
375 return 0;
376 }
377
378 static void
destroy_pt_sd(struct lws_context * context,int tsi)379 destroy_pt_sd(struct lws_context *context, int tsi)
380 {
381 struct lws_context_per_thread *pt = &context->pt[tsi];
382 struct lws_pt_eventlibs_sdevent *ptpriv = pt_to_priv_sd(pt);
383
384 lws_vhost_foreach_listen_wsi(context, NULL, elops_listen_destroy_sdevent);
385
386 if (ptpriv->sultimer) {
387 sd_event_source_set_enabled(ptpriv->sultimer,
388 SD_EVENT_OFF);
389 sd_event_source_unref(ptpriv->sultimer);
390 ptpriv->sultimer = NULL;
391 }
392
393 if (ptpriv->idletimer) {
394 sd_event_source_set_enabled(ptpriv->idletimer,
395 SD_EVENT_OFF);
396 sd_event_source_unref(ptpriv->idletimer);
397 ptpriv->idletimer = NULL;
398 }
399
400 if (ptpriv->io_loop) {
401 sd_event_unref(ptpriv->io_loop);
402 ptpriv->io_loop = NULL;
403 }
404 }
405
406 const struct lws_event_loop_ops event_loop_ops_sdevent = {
407 .name = "sdevent",
408 .init_context = NULL,
409 .destroy_context1 = NULL,
410 .destroy_context2 = NULL,
411 .init_vhost_listen_wsi = init_vhost_listen_wsi_sd,
412 .init_pt = init_pt_sd,
413 .wsi_logical_close = wsi_logical_close_sd,
414 .check_client_connect_ok = NULL,
415 .close_handle_manually = NULL,
416 .sock_accept = sock_accept_sd,
417 .io = io_sd,
418 .run_pt = run_pt_sd,
419 .destroy_pt = destroy_pt_sd,
420 .destroy_wsi = wsi_destroy_sd,
421
422 .flags = 0,
423
424 .evlib_size_ctx = 0,
425 .evlib_size_pt = sizeof(struct lws_pt_eventlibs_sdevent),
426 .evlib_size_vh = 0,
427 .evlib_size_wsi = sizeof(struct lws_wsi_watcher_sdevent),
428 };
429
430 #if defined(LWS_WITH_EVLIB_PLUGINS)
431 LWS_VISIBLE
432 #endif
433 const lws_plugin_evlib_t evlib_sd = {
434 .hdr = {
435 "systemd event loop",
436 "lws_evlib_plugin",
437 LWS_BUILD_HASH,
438 LWS_PLUGIN_API_MAGIC
439 },
440
441 .ops = &event_loop_ops_sdevent
442 };
443