1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26 #include "private-lib-event-libs-uloop.h"
27
28 #define pt_to_priv_uloop(_pt) ((struct lws_pt_eventlibs_uloop *)(_pt)->evlib_pt)
29 #define wsi_to_priv_uloop(_w) ((struct lws_wsi_eventlibs_uloop *)(_w)->evlib_wsi)
30
31 static void
lws_uloop_hrtimer_cb(struct uloop_timeout * ti)32 lws_uloop_hrtimer_cb(struct uloop_timeout *ti)
33 {
34 struct lws_pt_eventlibs_uloop *upt = lws_container_of(ti,
35 struct lws_pt_eventlibs_uloop, hrtimer);
36 struct lws_context_per_thread *pt = upt->pt;
37 lws_usec_t us;
38
39 lws_pt_lock(pt, __func__);
40 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
41 lws_now_usecs());
42 if (us)
43 uloop_timeout_set(ti, us < 1000 ? 1 : (int)(us / 1000));
44
45 lws_pt_unlock(pt);
46 }
47
48 static void
lws_uloop_idle_timer_cb(struct uloop_timeout * ti)49 lws_uloop_idle_timer_cb(struct uloop_timeout *ti)
50 {
51 struct lws_pt_eventlibs_uloop *upt = lws_container_of(ti,
52 struct lws_pt_eventlibs_uloop,
53 idle_timer);
54 struct lws_context_per_thread *pt = upt->pt;
55 lws_usec_t us;
56
57 if (pt->is_destroyed)
58 return;
59
60 lws_service_do_ripe_rxflow(pt);
61
62 /*
63 * is there anybody with pending stuff that needs service forcing?
64 */
65 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
66 /* -1 timeout means just do forced service */
67 _lws_plat_service_forced_tsi(pt->context, pt->tid);
68 /* still somebody left who wants forced service? */
69 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
70 /* yes... come back again later */
71
72 uloop_timeout_set(ti, 1 /* 1ms */);
73
74 return;
75 }
76 }
77
78 /* account for hrtimer */
79
80 lws_pt_lock(pt, __func__);
81 us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
82 lws_now_usecs());
83 if (us) {
84 uloop_timeout_cancel(&upt->hrtimer);
85 uloop_timeout_set(&upt->hrtimer,
86 us < 1000 ? 1 : (int)(us / 1000));
87 }
88
89 lws_pt_unlock(pt);
90
91 if (pt->destroy_self)
92 lws_context_destroy(pt->context);
93 }
94
95 static void
lws_uloop_cb(struct uloop_fd * ufd,unsigned int revents)96 lws_uloop_cb(struct uloop_fd *ufd, unsigned int revents)
97 {
98 struct lws_wsi_eventlibs_uloop *wu = lws_container_of(ufd,
99 struct lws_wsi_eventlibs_uloop, fd);
100 struct lws_context *context = wu->wsi->a.context;
101 struct lws_context_per_thread *pt;
102 struct lws_pollfd eventfd;
103
104 eventfd.fd = wu->wsi->desc.sockfd;
105 eventfd.events = 0;
106 eventfd.revents = 0;
107
108 if (revents & ULOOP_READ) {
109 eventfd.events = LWS_POLLIN;
110 eventfd.revents = LWS_POLLIN;
111 }
112 if (revents & ULOOP_WRITE) {
113 eventfd.events |= LWS_POLLOUT;
114 eventfd.revents |= LWS_POLLOUT;
115 }
116
117 pt = &context->pt[(int)wu->wsi->tsi];
118 if (pt->is_destroyed)
119 return;
120
121 lws_service_fd_tsi(context, &eventfd, wu->wsi->tsi);
122
123 if (pt->destroy_self) {
124 lwsl_cx_notice(context, "pt destroy self coming true");
125 lws_context_destroy(pt->context);
126 return;
127 }
128
129 /* set the idle timer for 1ms ahead */
130
131 uloop_timeout_cancel(&pt_to_priv_uloop(pt)->idle_timer);
132 uloop_timeout_set(&pt_to_priv_uloop(pt)->idle_timer, 1);
133 }
134
135 static int
elops_listen_init_uloop(struct lws_dll2 * d,void * user)136 elops_listen_init_uloop(struct lws_dll2 *d, void *user)
137 {
138 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
139 struct lws_wsi_eventlibs_uloop *wu = wsi_to_priv_uloop(wsi);
140
141 wu->wsi = wsi;
142 wu->fd.fd = wsi->desc.sockfd;
143 wu->fd.cb = lws_uloop_cb;
144 uloop_fd_add(&wu->fd, ULOOP_READ);
145 wu->actual_events = ULOOP_READ;
146
147 return 0;
148 }
149
150 static int
elops_init_pt_uloop(struct lws_context * context,void * v,int tsi)151 elops_init_pt_uloop(struct lws_context *context, void *v, int tsi)
152 {
153 struct lws_context_per_thread *pt = &context->pt[tsi];
154 struct lws_pt_eventlibs_uloop *ptpr = pt_to_priv_uloop(pt);
155
156 ptpr->pt = pt;
157
158 lws_vhost_foreach_listen_wsi(context, NULL, elops_listen_init_uloop);
159
160 /* static event loop objects */
161
162 ptpr->hrtimer.cb = lws_uloop_hrtimer_cb;
163 ptpr->idle_timer.cb = lws_uloop_idle_timer_cb;
164
165 uloop_timeout_add(&ptpr->hrtimer);
166 uloop_timeout_add(&ptpr->idle_timer);
167
168 uloop_timeout_set(&ptpr->hrtimer, 1);
169
170 return 0;
171 }
172
173 static int
elops_accept_uloop(struct lws * wsi)174 elops_accept_uloop(struct lws *wsi)
175 {
176 struct lws_wsi_eventlibs_uloop *wu = wsi_to_priv_uloop(wsi);
177
178 wu->wsi = wsi;
179 wu->fd.fd = wsi->desc.sockfd;
180 wu->fd.cb = lws_uloop_cb;
181 uloop_fd_add(&wu->fd, ULOOP_READ);
182 wu->actual_events = ULOOP_READ;
183
184 return 0;
185 }
186
187 static void
elops_io_uloop(struct lws * wsi,unsigned int flags)188 elops_io_uloop(struct lws *wsi, unsigned int flags)
189 {
190 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
191 struct lws_wsi_eventlibs_uloop *wu = wsi_to_priv_uloop(wsi);
192 unsigned int ulf = (unsigned int)(((flags & LWS_EV_WRITE) ? ULOOP_WRITE : 0) |
193 ((flags & LWS_EV_READ) ? ULOOP_READ : 0)), u;
194
195 if (wsi->a.context->being_destroyed || pt->is_destroyed)
196 return;
197
198 assert((flags & (LWS_EV_START | LWS_EV_STOP)) &&
199 (flags & (LWS_EV_READ | LWS_EV_WRITE)));
200
201 u = wu->actual_events;
202 if (flags & LWS_EV_START)
203 u |= ulf;
204 if (flags & LWS_EV_STOP)
205 u &= ~ulf;
206
207 uloop_fd_add(&wu->fd, u);
208 wu->actual_events = u;
209 }
210
211 static void
elops_run_pt_uloop(struct lws_context * context,int tsi)212 elops_run_pt_uloop(struct lws_context *context, int tsi)
213 {
214 uloop_run();
215 }
216
217 static int
elops_listen_destroy_uloop(struct lws_dll2 * d,void * user)218 elops_listen_destroy_uloop(struct lws_dll2 *d, void *user)
219 {
220 struct lws *wsi = lws_container_of(d, struct lws, listen_list);
221 struct lws_wsi_eventlibs_uloop *wu = wsi_to_priv_uloop(wsi);
222
223 uloop_fd_delete(&wu->fd);
224
225 return 0;
226 }
227
228 static void
elops_destroy_pt_uloop(struct lws_context * context,int tsi)229 elops_destroy_pt_uloop(struct lws_context *context, int tsi)
230 {
231 struct lws_context_per_thread *pt = &context->pt[tsi];
232 struct lws_pt_eventlibs_uloop *ptpr = pt_to_priv_uloop(pt);
233
234 lws_vhost_foreach_listen_wsi(context, NULL, elops_listen_destroy_uloop);
235
236 uloop_timeout_cancel(&ptpr->hrtimer);
237 uloop_timeout_cancel(&ptpr->idle_timer);
238 }
239
240 static void
elops_destroy_wsi_uloop(struct lws * wsi)241 elops_destroy_wsi_uloop(struct lws *wsi)
242 {
243 struct lws_context_per_thread *pt;
244
245 if (!wsi)
246 return;
247
248 pt = &wsi->a.context->pt[(int)wsi->tsi];
249 if (pt->is_destroyed)
250 return;
251
252 uloop_fd_delete(&wsi_to_priv_uloop(wsi)->fd);
253 }
254
255 static int
elops_wsi_logical_close_uloop(struct lws * wsi)256 elops_wsi_logical_close_uloop(struct lws *wsi)
257 {
258 elops_destroy_wsi_uloop(wsi);
259
260 return 0;
261 }
262
263 static int
elops_init_vhost_listen_wsi_uloop(struct lws * wsi)264 elops_init_vhost_listen_wsi_uloop(struct lws *wsi)
265 {
266 struct lws_wsi_eventlibs_uloop *wu;
267
268 if (!wsi) {
269 assert(0);
270 return 0;
271 }
272
273 wu = wsi_to_priv_uloop(wsi);
274 wu->wsi = wsi;
275 wu->fd.fd = wsi->desc.sockfd;
276 wu->fd.cb = lws_uloop_cb;
277 uloop_fd_add(&wu->fd, ULOOP_READ);
278
279 wu->actual_events = ULOOP_READ;
280
281 return 0;
282 }
283
284 static const struct lws_event_loop_ops event_loop_ops_uloop = {
285 /* name */ "uloop",
286 /* init_context */ NULL,
287 /* destroy_context1 */ NULL,
288 /* destroy_context2 */ NULL,
289 /* init_vhost_listen_wsi */ elops_init_vhost_listen_wsi_uloop,
290 /* init_pt */ elops_init_pt_uloop,
291 /* wsi_logical_close */ elops_wsi_logical_close_uloop,
292 /* check_client_connect_ok */ NULL,
293 /* close_handle_manually */ NULL,
294 /* accept */ elops_accept_uloop,
295 /* io */ elops_io_uloop,
296 /* run_pt */ elops_run_pt_uloop,
297 /* destroy_pt */ elops_destroy_pt_uloop,
298 /* destroy wsi */ elops_destroy_wsi_uloop,
299 /* foreign_thread */ NULL,
300
301 /* flags */ 0,
302
303 /* evlib_size_ctx */ 0,
304 /* evlib_size_pt */ sizeof(struct lws_pt_eventlibs_uloop),
305 /* evlib_size_vh */ 0,
306 /* evlib_size_wsi */ sizeof(struct lws_wsi_eventlibs_uloop),
307 };
308
309 #if defined(LWS_WITH_EVLIB_PLUGINS)
310 LWS_VISIBLE
311 #endif
312 const lws_plugin_evlib_t evlib_uloop = {
313 .hdr = {
314 "uloop event loop",
315 "lws_evlib_plugin",
316 LWS_BUILD_HASH,
317 LWS_PLUGIN_API_MAGIC
318 },
319
320 .ops = &event_loop_ops_uloop
321 };
322