1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 static int
sul_compare(const lws_dll2_t * d,const lws_dll2_t * i)28 sul_compare(const lws_dll2_t *d, const lws_dll2_t *i)
29 {
30 lws_usec_t a = ((lws_sorted_usec_list_t *)d)->us;
31 lws_usec_t b = ((lws_sorted_usec_list_t *)i)->us;
32
33 /*
34 * Simply returning (a - b) in an int
35 * may lead to an integer overflow bug
36 */
37
38 if (a > b)
39 return 1;
40 if (a < b)
41 return -1;
42
43 return 0;
44 }
45
46 /*
47 * notice owner was chosen already, and sul->us was already computed
48 */
49
50 int
__lws_sul_insert(lws_dll2_owner_t * own,lws_sorted_usec_list_t * sul)51 __lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul)
52 {
53 lws_dll2_remove(&sul->list);
54
55 assert(sul->cb);
56
57 /*
58 * we sort the pt's list of sequencers with pending timeouts, so it's
59 * cheap to check it every poll wait
60 */
61
62 lws_dll2_add_sorted(&sul->list, own, sul_compare);
63
64 return 0;
65 }
66
67 void
lws_sul_cancel(lws_sorted_usec_list_t * sul)68 lws_sul_cancel(lws_sorted_usec_list_t *sul)
69 {
70 lws_dll2_remove(&sul->list);
71
72 /* we are clearing the timeout and leaving ourselves detached */
73 sul->us = 0;
74 }
75
76 void
lws_sul2_schedule(struct lws_context * context,int tsi,int flags,lws_sorted_usec_list_t * sul)77 lws_sul2_schedule(struct lws_context *context, int tsi, int flags,
78 lws_sorted_usec_list_t *sul)
79 {
80 struct lws_context_per_thread *pt = &context->pt[tsi];
81
82 lws_pt_assert_lock_held(pt);
83
84 assert(sul->cb);
85
86 __lws_sul_insert(
87 &pt->pt_sul_owner[!!(flags & LWSSULLI_WAKE_IF_SUSPENDED)], sul);
88 }
89
90 /*
91 * own points to the first in an array of length own_len
92 *
93 * While any sul list owner has a "ripe", ie, ready to handle sul we do them
94 * strictly in order of sul time. When nobody has a ripe sul we return 0, if
95 * actually nobody has any sul, or the interval between usnow and the next
96 * earliest scheduled event on any list.
97 */
98
99 lws_usec_t
__lws_sul_service_ripe(lws_dll2_owner_t * own,int own_len,lws_usec_t usnow)100 __lws_sul_service_ripe(lws_dll2_owner_t *own, int own_len, lws_usec_t usnow)
101 {
102 struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
103 lws_container_of(own, struct lws_context_per_thread,
104 pt_sul_owner);
105
106 if (pt->attach_owner.count)
107 lws_system_do_attach(pt);
108
109 lws_pt_assert_lock_held(pt);
110
111 /* must be at least 1 */
112 assert(own_len > 0);
113
114 /*
115 * Of the own_len sul owning lists, the earliest next sul could be on
116 * any of them. We have to find it and handle each in turn until no
117 * ripe sul left on any owning list, and we can exit.
118 *
119 * This ensures the ripe sul are handled strictly in the right order no
120 * matter which owning list they are on.
121 */
122
123 do {
124 lws_sorted_usec_list_t *hit = NULL;
125 lws_usec_t lowest = 0;
126 int n = 0;
127
128 for (n = 0; n < own_len; n++) {
129 lws_sorted_usec_list_t *sul;
130 if (!own[n].count)
131 continue;
132 sul = (lws_sorted_usec_list_t *)
133 lws_dll2_get_head(&own[n]);
134
135 if (!hit || sul->us <= lowest) {
136 hit = sul;
137 lowest = sul->us;
138 }
139 }
140
141 if (!hit)
142 return 0;
143
144 if (lowest > usnow)
145 return lowest - usnow;
146
147 /* his moment has come... remove him from his owning list */
148
149 if (!hit->cb) {
150 lwsl_err("%s: sul with NULL callback (did not cancel on destory?)\n", __func__);
151
152 return 0;
153 }
154
155 lws_dll2_remove(&hit->list);
156 hit->us = 0;
157
158 // lwsl_notice("%s: sul: %p\n", __func__, hit->cb);
159
160 pt->inside_lws_service = 1;
161 hit->cb(hit);
162 pt->inside_lws_service = 0;
163
164 } while (1);
165
166 /* unreachable */
167
168 return 0;
169 }
170
171 /*
172 * Normally we use the OS monotonic time, which does not step when the
173 * gettimeofday() time is adjusted after, eg, ntpclient. But on some OSes,
174 * high resolution monotonic time doesn't exist; sul time is computed from and
175 * compared against gettimeofday() time and breaks when that steps.
176 *
177 * For those cases, this allows us to retrospectively adjust existing suls on
178 * all owning lists by the step amount, at the same time we adjust the
179 * nonmonotonic clock. Then nothing breaks so long as we do this when the
180 * gettimeofday() clock is stepped.
181 *
182 * Linux and so on offer Posix MONOTONIC, which lws uses. FreeRTOS doesn't
183 * have a high-resolution monotonic clock and has to use gettimeofday(), which
184 * requires this adjustment when it is stepped.
185 */
186
187 lws_usec_t
lws_sul_nonmonotonic_adjust(struct lws_context * ctx,int64_t step_us)188 lws_sul_nonmonotonic_adjust(struct lws_context *ctx, int64_t step_us)
189 {
190 struct lws_context_per_thread *pt = &ctx->pt[0];
191 int n, m;
192
193 /*
194 * for each pt
195 */
196
197 for (m = 0; m < ctx->count_threads; m++) {
198
199 /*
200 * For each owning list...
201 */
202
203 lws_pt_lock(pt, __func__);
204
205 for (n = 0; n < LWS_COUNT_PT_SUL_OWNERS; n++) {
206
207 if (!pt->pt_sul_owner[n].count)
208 continue;
209
210 /* ... and for every existing sul on a list... */
211
212 lws_start_foreach_dll(struct lws_dll2 *, p,
213 lws_dll2_get_head(
214 &pt->pt_sul_owner[n])) {
215 lws_sorted_usec_list_t *sul = lws_container_of(
216 p, lws_sorted_usec_list_t, list);
217
218 /*
219 * ... retrospectively step its ripe time by the
220 * step we will adjust the gettimeofday() clock
221 * with
222 */
223
224 sul->us += step_us;
225
226 } lws_end_foreach_dll(p);
227 }
228
229 lws_pt_unlock(pt);
230
231 pt++;
232 }
233
234 return 0;
235 }
236
237 /*
238 * Earliest wakeable event on any pt
239 */
240
241 int
lws_sul_earliest_wakeable_event(struct lws_context * ctx,lws_usec_t * pearliest)242 lws_sul_earliest_wakeable_event(struct lws_context *ctx, lws_usec_t *pearliest)
243 {
244 struct lws_context_per_thread *pt;
245 int n = 0, hit = -1;
246 lws_usec_t lowest = 0;
247
248 for (n = 0; n < ctx->count_threads; n++) {
249 pt = &ctx->pt[n];
250
251 lws_pt_lock(pt, __func__);
252
253 if (pt->pt_sul_owner[LWSSULLI_WAKE_IF_SUSPENDED].count) {
254 lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)
255 lws_dll2_get_head(&pt->pt_sul_owner[
256 LWSSULLI_WAKE_IF_SUSPENDED]);
257
258 if (hit == -1 || sul->us < lowest) {
259 hit = n;
260 lowest = sul->us;
261 }
262 }
263
264 lws_pt_unlock(pt);
265 }
266
267
268 if (hit == -1)
269 /* there is no pending event */
270 return 1;
271
272 *pearliest = lowest;
273
274 return 0;
275 }
276
277 void
lws_sul_schedule(struct lws_context * ctx,int tsi,lws_sorted_usec_list_t * sul,sul_cb_t _cb,lws_usec_t _us)278 lws_sul_schedule(struct lws_context *ctx, int tsi, lws_sorted_usec_list_t *sul,
279 sul_cb_t _cb, lws_usec_t _us)
280 {
281 struct lws_context_per_thread *_pt = &ctx->pt[tsi];
282
283 assert(_cb);
284
285 lws_pt_lock(_pt, __func__);
286
287 if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
288 lws_sul_cancel(sul);
289 else {
290 sul->cb = _cb;
291 sul->us = lws_now_usecs() + _us;
292 lws_sul2_schedule(ctx, tsi, LWSSULLI_MISS_IF_SUSPENDED, sul);
293 }
294
295 lws_pt_unlock(_pt);
296 }
297
298 void
lws_sul_schedule_wakesuspend(struct lws_context * ctx,int tsi,lws_sorted_usec_list_t * sul,sul_cb_t _cb,lws_usec_t _us)299 lws_sul_schedule_wakesuspend(struct lws_context *ctx, int tsi,
300 lws_sorted_usec_list_t *sul, sul_cb_t _cb,
301 lws_usec_t _us)
302 {
303 struct lws_context_per_thread *_pt = &ctx->pt[tsi];
304
305 assert(_cb);
306
307 lws_pt_lock(_pt, __func__);
308
309 if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
310 lws_sul_cancel(sul);
311 else {
312 sul->cb = _cb;
313 sul->us = lws_now_usecs() + _us;
314 lws_sul2_schedule(ctx, tsi, LWSSULLI_WAKE_IF_SUSPENDED, sul);
315 }
316
317 lws_pt_unlock(_pt);
318 }
319
320 #if defined(LWS_WITH_SUL_DEBUGGING)
321
322 /*
323 * Sanity checker for any sul left scheduled when its containing object is
324 * freed... code scheduling suls must take care to cancel them when destroying
325 * their object. This optional debugging helper checks that when an object is
326 * being destroyed, there is no live sul scheduled from inside the object.
327 */
328
329 void
lws_sul_debug_zombies(struct lws_context * ctx,void * po,size_t len,const char * destroy_description)330 lws_sul_debug_zombies(struct lws_context *ctx, void *po, size_t len,
331 const char *destroy_description)
332 {
333 struct lws_context_per_thread *pt;
334 int n, m;
335
336 for (n = 0; n < ctx->count_threads; n++) {
337 pt = &ctx->pt[n];
338
339 lws_pt_lock(pt, __func__);
340
341 for (m = 0; m < LWS_COUNT_PT_SUL_OWNERS; m++) {
342
343 lws_start_foreach_dll(struct lws_dll2 *, p,
344 lws_dll2_get_head(&pt->pt_sul_owner[m])) {
345 lws_sorted_usec_list_t *sul =
346 lws_container_of(p,
347 lws_sorted_usec_list_t, list);
348
349 if (!po) {
350 lwsl_cx_err(ctx, "%s",
351 destroy_description);
352 /* just sanity check the list */
353 assert(sul->cb);
354 }
355
356 /*
357 * Is the sul resident inside the object that is
358 * indicated as being deleted?
359 */
360
361 if (po &&
362 (void *)sul >= po &&
363 (size_t)lws_ptr_diff(sul, po) < len) {
364 lwsl_cx_err(ctx, "ERROR: Zombie Sul "
365 "(on list %d) %s, cb %p\n", m,
366 destroy_description, sul->cb);
367 /*
368 * This assert fires if you have left
369 * a sul scheduled to fire later, but
370 * are about to destroy the object the
371 * sul lives in. You must take care to
372 * do lws_sul_cancel(&sul) on any suls
373 * that may be scheduled before
374 * destroying the object the sul lives
375 * inside.
376 *
377 * You can look up the cb pointer in
378 * your mapfile to find out which
379 * callback function the sul was using
380 * which usually tells you which sul
381 * it is.
382 */
383 assert(0);
384 }
385
386 } lws_end_foreach_dll(p);
387 }
388
389 lws_pt_unlock(pt);
390 }
391 }
392
393 #endif
394