• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 static int
sul_compare(const lws_dll2_t * d,const lws_dll2_t * i)28 sul_compare(const lws_dll2_t *d, const lws_dll2_t *i)
29 {
30 	lws_usec_t a = ((lws_sorted_usec_list_t *)d)->us;
31 	lws_usec_t b = ((lws_sorted_usec_list_t *)i)->us;
32 
33 	/*
34 	 * Simply returning (a - b) in an int
35 	 * may lead to an integer overflow bug
36 	 */
37 
38 	if (a > b)
39 		return 1;
40 	if (a < b)
41 		return -1;
42 
43 	return 0;
44 }
45 
46 /*
47  * notice owner was chosen already, and sul->us was already computed
48  */
49 
50 int
__lws_sul_insert(lws_dll2_owner_t * own,lws_sorted_usec_list_t * sul)51 __lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul)
52 {
53 	lws_dll2_remove(&sul->list);
54 
55 	assert(sul->cb);
56 
57 	/*
58 	 * we sort the pt's list of sequencers with pending timeouts, so it's
59 	 * cheap to check it every poll wait
60 	 */
61 
62 	lws_dll2_add_sorted(&sul->list, own, sul_compare);
63 
64 	return 0;
65 }
66 
67 void
lws_sul_cancel(lws_sorted_usec_list_t * sul)68 lws_sul_cancel(lws_sorted_usec_list_t *sul)
69 {
70 	lws_dll2_remove(&sul->list);
71 
72 	/* we are clearing the timeout and leaving ourselves detached */
73 	sul->us = 0;
74 }
75 
76 void
lws_sul2_schedule(struct lws_context * context,int tsi,int flags,lws_sorted_usec_list_t * sul)77 lws_sul2_schedule(struct lws_context *context, int tsi, int flags,
78 	          lws_sorted_usec_list_t *sul)
79 {
80 	struct lws_context_per_thread *pt = &context->pt[tsi];
81 
82 	lws_pt_assert_lock_held(pt);
83 
84 	assert(sul->cb);
85 
86 	__lws_sul_insert(
87 		&pt->pt_sul_owner[!!(flags & LWSSULLI_WAKE_IF_SUSPENDED)], sul);
88 }
89 
90 /*
91  * own points to the first in an array of length own_len
92  *
93  * While any sul list owner has a "ripe", ie, ready to handle sul we do them
94  * strictly in order of sul time.  When nobody has a ripe sul we return 0, if
95  * actually nobody has any sul, or the interval between usnow and the next
96  * earliest scheduled event on any list.
97  */
98 
99 lws_usec_t
__lws_sul_service_ripe(lws_dll2_owner_t * own,int own_len,lws_usec_t usnow)100 __lws_sul_service_ripe(lws_dll2_owner_t *own, int own_len, lws_usec_t usnow)
101 {
102 	struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
103 			lws_container_of(own, struct lws_context_per_thread,
104 					 pt_sul_owner);
105 
106 	if (pt->attach_owner.count)
107 		lws_system_do_attach(pt);
108 
109 	lws_pt_assert_lock_held(pt);
110 
111 	/* must be at least 1 */
112 	assert(own_len > 0);
113 
114 	/*
115 	 * Of the own_len sul owning lists, the earliest next sul could be on
116 	 * any of them.  We have to find it and handle each in turn until no
117 	 * ripe sul left on any owning list, and we can exit.
118 	 *
119 	 * This ensures the ripe sul are handled strictly in the right order no
120 	 * matter which owning list they are on.
121 	 */
122 
123 	do {
124 		lws_sorted_usec_list_t *hit = NULL;
125 		lws_usec_t lowest = 0;
126 		int n = 0;
127 
128 		for (n = 0; n < own_len; n++) {
129 			lws_sorted_usec_list_t *sul;
130 			if (!own[n].count)
131 				continue;
132 			 sul = (lws_sorted_usec_list_t *)
133 						     lws_dll2_get_head(&own[n]);
134 
135 			if (!hit || sul->us <= lowest) {
136 				hit = sul;
137 				lowest = sul->us;
138 			}
139 		}
140 
141 		if (!hit)
142 			return 0;
143 
144 		if (lowest > usnow)
145 			return lowest - usnow;
146 
147 		/* his moment has come... remove him from his owning list */
148 
149 		lws_dll2_remove(&hit->list);
150 		hit->us = 0;
151 
152 		// lwsl_notice("%s: sul: %p\n", __func__, hit->cb);
153 
154 		pt->inside_lws_service = 1;
155 		hit->cb(hit);
156 		pt->inside_lws_service = 0;
157 
158 	} while (1);
159 
160 	/* unreachable */
161 
162 	return 0;
163 }
164 
165 /*
166  * Normally we use the OS monotonic time, which does not step when the
167  * gettimeofday() time is adjusted after, eg, ntpclient.  But on some OSes,
168  * high resolution monotonic time doesn't exist; sul time is computed from and
169  * compared against gettimeofday() time and breaks when that steps.
170  *
171  * For those cases, this allows us to retrospectively adjust existing suls on
172  * all owning lists by the step amount, at the same time we adjust the
173  * nonmonotonic clock.  Then nothing breaks so long as we do this when the
174  * gettimeofday() clock is stepped.
175  *
176  * Linux and so on offer Posix MONOTONIC, which lws uses.  FreeRTOS doesn't
177  * have a high-resolution monotonic clock and has to use gettimeofday(), which
178  * requires this adjustment when it is stepped.
179  */
180 
181 lws_usec_t
lws_sul_nonmonotonic_adjust(struct lws_context * ctx,int64_t step_us)182 lws_sul_nonmonotonic_adjust(struct lws_context *ctx, int64_t step_us)
183 {
184 	struct lws_context_per_thread *pt = &ctx->pt[0];
185 	int n, m;
186 
187 	/*
188 	 * for each pt
189 	 */
190 
191 	for (m = 0; m < ctx->count_threads; m++) {
192 
193 		/*
194 		 * For each owning list...
195 		 */
196 
197 		lws_pt_lock(pt, __func__);
198 
199 		for (n = 0; n < LWS_COUNT_PT_SUL_OWNERS; n++) {
200 
201 			if (!pt->pt_sul_owner[n].count)
202 				continue;
203 
204 			/* ... and for every existing sul on a list... */
205 
206 			lws_start_foreach_dll(struct lws_dll2 *, p,
207 					      lws_dll2_get_head(
208 							&pt->pt_sul_owner[n])) {
209 				lws_sorted_usec_list_t *sul = lws_container_of(
210 					       p, lws_sorted_usec_list_t, list);
211 
212 				/*
213 				 * ... retrospectively step its ripe time by the
214 				 * step we will adjust the gettimeofday() clock
215 				 * with
216 				 */
217 
218 				sul->us += step_us;
219 
220 			} lws_end_foreach_dll(p);
221 		}
222 
223 		lws_pt_unlock(pt);
224 
225 		pt++;
226 	}
227 
228 	return 0;
229 }
230 
231 /*
232  * Earliest wakeable event on any pt
233  */
234 
235 int
lws_sul_earliest_wakeable_event(struct lws_context * ctx,lws_usec_t * pearliest)236 lws_sul_earliest_wakeable_event(struct lws_context *ctx, lws_usec_t *pearliest)
237 {
238 	struct lws_context_per_thread *pt;
239 	int n = 0, hit = -1;
240 	lws_usec_t lowest = 0;
241 
242 	for (n = 0; n < ctx->count_threads; n++) {
243 		pt = &ctx->pt[n];
244 
245 		lws_pt_lock(pt, __func__);
246 
247 		if (pt->pt_sul_owner[LWSSULLI_WAKE_IF_SUSPENDED].count) {
248 			lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)
249 					lws_dll2_get_head(&pt->pt_sul_owner[
250 					           LWSSULLI_WAKE_IF_SUSPENDED]);
251 
252 			if (hit == -1 || sul->us < lowest) {
253 				hit = n;
254 				lowest = sul->us;
255 			}
256 		}
257 
258 		lws_pt_unlock(pt);
259 	}
260 
261 
262 	if (hit == -1)
263 		/* there is no pending event */
264 		return 1;
265 
266 	*pearliest = lowest;
267 
268 	return 0;
269 }
270 
271 void
lws_sul_schedule(struct lws_context * ctx,int tsi,lws_sorted_usec_list_t * sul,sul_cb_t _cb,lws_usec_t _us)272 lws_sul_schedule(struct lws_context *ctx, int tsi, lws_sorted_usec_list_t *sul,
273 		 sul_cb_t _cb, lws_usec_t _us)
274 {
275 	struct lws_context_per_thread *_pt = &ctx->pt[tsi];
276 
277 	assert(_cb);
278 
279 	lws_pt_lock(_pt, __func__);
280 
281 	if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
282 		lws_sul_cancel(sul);
283 	else {
284 		sul->cb = _cb;
285 		sul->us = lws_now_usecs() + _us;
286 		lws_sul2_schedule(ctx, tsi, LWSSULLI_MISS_IF_SUSPENDED, sul);
287 	}
288 
289 	lws_pt_unlock(_pt);
290 }
291 
292 void
lws_sul_schedule_wakesuspend(struct lws_context * ctx,int tsi,lws_sorted_usec_list_t * sul,sul_cb_t _cb,lws_usec_t _us)293 lws_sul_schedule_wakesuspend(struct lws_context *ctx, int tsi,
294 			     lws_sorted_usec_list_t *sul, sul_cb_t _cb,
295 			     lws_usec_t _us)
296 {
297 	struct lws_context_per_thread *_pt = &ctx->pt[tsi];
298 
299 	assert(_cb);
300 
301 	lws_pt_lock(_pt, __func__);
302 
303 	if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
304 		lws_sul_cancel(sul);
305 	else {
306 		sul->cb = _cb;
307 		sul->us = lws_now_usecs() + _us;
308 		lws_sul2_schedule(ctx, tsi, LWSSULLI_WAKE_IF_SUSPENDED, sul);
309 	}
310 
311 	lws_pt_unlock(_pt);
312 }
313 
314 #if defined(LWS_WITH_SUL_DEBUGGING)
315 
316 /*
317  * Sanity checker for any sul left scheduled when its containing object is
318  * freed... code scheduling suls must take care to cancel them when destroying
319  * their object.  This optional debugging helper checks that when an object is
320  * being destroyed, there is no live sul scheduled from inside the object.
321  */
322 
323 void
lws_sul_debug_zombies(struct lws_context * ctx,void * po,size_t len,const char * destroy_description)324 lws_sul_debug_zombies(struct lws_context *ctx, void *po, size_t len,
325 		      const char *destroy_description)
326 {
327 	struct lws_context_per_thread *pt;
328 	int n, m;
329 
330 	for (n = 0; n < ctx->count_threads; n++) {
331 		pt = &ctx->pt[n];
332 
333 		lws_pt_lock(pt, __func__);
334 
335 		for (m = 0; m < LWS_COUNT_PT_SUL_OWNERS; m++) {
336 
337 			lws_start_foreach_dll(struct lws_dll2 *, p,
338 				      lws_dll2_get_head(&pt->pt_sul_owner[m])) {
339 				lws_sorted_usec_list_t *sul =
340 					lws_container_of(p,
341 						lws_sorted_usec_list_t, list);
342 
343 				if (!po) {
344 					lwsl_cx_err(ctx, "%s",
345 							 destroy_description);
346 					/* just sanity check the list */
347 					assert(sul->cb);
348 				}
349 
350 				/*
351 				 * Is the sul resident inside the object that is
352 				 * indicated as being deleted?
353 				 */
354 
355 				if (po &&
356 				    (void *)sul >= po &&
357 				    (size_t)lws_ptr_diff(sul, po) < len) {
358 					lwsl_cx_err(ctx, "ERROR: Zombie Sul "
359 						 "(on list %d) %s, cb %p\n", m,
360 						 destroy_description, sul->cb);
361 					/*
362 					 * This assert fires if you have left
363 					 * a sul scheduled to fire later, but
364 					 * are about to destroy the object the
365 					 * sul lives in.  You must take care to
366 					 * do lws_sul_cancel(&sul) on any suls
367 					 * that may be scheduled before
368 					 * destroying the object the sul lives
369 					 * inside.
370 					 *
371 					 * You can look up the cb pointer in
372 					 * your mapfile to find out which
373 					 * callback function the sul was using
374 					 * which usually tells you which sul
375 					 * it is.
376 					 */
377 					assert(0);
378 				}
379 
380 			} lws_end_foreach_dll(p);
381 		}
382 
383 		lws_pt_unlock(pt);
384 	}
385 }
386 
387 #endif
388