• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 void
__lws_wsi_remove_from_sul(struct lws * wsi)28 __lws_wsi_remove_from_sul(struct lws *wsi)
29 {
30 	//struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
31 
32 	//lwsl_notice("%s: wsi %p, to %p, hr %p\n", __func__, wsi,
33 	//		&wsi->sul_timeout.list, &wsi->sul_hrtimer.list);
34 
35 	// lws_dll2_describe(&pt->pt_sul_owner, "pre-remove");
36 	lws_dll2_remove(&wsi->sul_timeout.list);
37 	lws_dll2_remove(&wsi->sul_hrtimer.list);
38 	lws_dll2_remove(&wsi->sul_validity.list);
39 	// lws_dll2_describe(&pt->pt_sul_owner, "post-remove");
40 }
41 
42 /*
43  * hrtimer
44  */
45 
46 static void
lws_sul_hrtimer_cb(lws_sorted_usec_list_t * sul)47 lws_sul_hrtimer_cb(lws_sorted_usec_list_t *sul)
48 {
49 	struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer);
50 
51 	if (wsi->protocol &&
52 	    wsi->protocol->callback(wsi, LWS_CALLBACK_TIMER,
53 				    wsi->user_space, NULL, 0))
54 		__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
55 				     "hrtimer cb errored");
56 }
57 
58 void
__lws_set_timer_usecs(struct lws * wsi,lws_usec_t us)59 __lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
60 {
61 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
62 
63 	wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb;
64 	__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_hrtimer, us);
65 }
66 
67 void
lws_set_timer_usecs(struct lws * wsi,lws_usec_t usecs)68 lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
69 {
70 	__lws_set_timer_usecs(wsi, usecs);
71 }
72 
73 /*
74  * wsi timeout
75  */
76 
77 static void
lws_sul_wsitimeout_cb(lws_sorted_usec_list_t * sul)78 lws_sul_wsitimeout_cb(lws_sorted_usec_list_t *sul)
79 {
80 	struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout);
81 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
82 
83 	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
84 		lws_stats_bump(pt, LWSSTATS_C_TIMEOUTS, 1);
85 
86 	/* no need to log normal idle keepalive timeout */
87 //		if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
88 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
89 	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
90 		lwsl_info("wsi %p: TIMEDOUT WAITING on %d "
91 			  "(did hdr %d, ah %p, wl %d)\n",
92 			  (void *)wsi, wsi->pending_timeout,
93 			  wsi->hdr_parsing_completed, wsi->http.ah,
94 			  pt->http.ah_wait_list_length);
95 #if defined(LWS_WITH_CGI)
96 	if (wsi->http.cgi)
97 		lwsl_notice("CGI timeout: %s\n", wsi->http.cgi->summary);
98 #endif
99 #else
100 	if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
101 		lwsl_info("wsi %p: TIMEDOUT WAITING on %d ", (void *)wsi,
102 				wsi->pending_timeout);
103 #endif
104 	/* cgi timeout */
105 	if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
106 		/*
107 		 * Since he failed a timeout, he already had a chance to
108 		 * do something and was unable to... that includes
109 		 * situations like half closed connections.  So process
110 		 * this "failed timeout" close as a violent death and
111 		 * don't try to do protocol cleanup like flush partials.
112 		 */
113 		wsi->socket_is_permanently_unusable = 1;
114 #if defined(LWS_WITH_CLIENT)
115 	if (lwsi_state(wsi) == LRS_WAITING_SSL)
116 		lws_inform_client_conn_fail(wsi,
117 			(void *)"Timed out waiting SSL", 21);
118 #endif
119 
120 	__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
121 }
122 
123 void
__lws_set_timeout(struct lws * wsi,enum pending_timeout reason,int secs)124 __lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
125 {
126 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
127 
128 	wsi->sul_timeout.cb = lws_sul_wsitimeout_cb;
129 	__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_timeout,
130 			 ((lws_usec_t)secs) * LWS_US_PER_SEC);
131 
132 	lwsl_debug("%s: %p: %d secs, reason %d\n", __func__, wsi, secs, reason);
133 
134 	wsi->pending_timeout = reason;
135 }
136 
137 void
lws_set_timeout(struct lws * wsi,enum pending_timeout reason,int secs)138 lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
139 {
140 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
141 
142 	lws_pt_lock(pt, __func__);
143 	lws_dll2_remove(&wsi->sul_timeout.list);
144 	lws_pt_unlock(pt);
145 
146 	if (!secs)
147 		return;
148 
149 	if (secs == LWS_TO_KILL_SYNC) {
150 		lwsl_debug("synchronously killing %p\n", wsi);
151 		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
152 				   "to sync kill");
153 		return;
154 	}
155 
156 	if (secs == LWS_TO_KILL_ASYNC)
157 		secs = 0;
158 
159 	// assert(!secs || !wsi->mux_stream_immortal);
160 	if (secs && wsi->mux_stream_immortal)
161 		lwsl_err("%s: on immortal stream %d %d\n", __func__, reason, secs);
162 
163 	lws_pt_lock(pt, __func__);
164 	__lws_set_timeout(wsi, reason, secs);
165 	lws_pt_unlock(pt);
166 }
167 
168 void
lws_set_timeout_us(struct lws * wsi,enum pending_timeout reason,lws_usec_t us)169 lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
170 {
171 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
172 
173 	lws_pt_lock(pt, __func__);
174 	lws_dll2_remove(&wsi->sul_timeout.list);
175 	lws_pt_unlock(pt);
176 
177 	if (!us)
178 		return;
179 
180 	lws_pt_lock(pt, __func__);
181 	__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_timeout, us);
182 
183 	lwsl_notice("%s: %p: %llu us, reason %d\n", __func__, wsi,
184 		   (unsigned long long)us, reason);
185 
186 	wsi->pending_timeout = reason;
187 	lws_pt_unlock(pt);
188 }
189 
190 /* requires context + vh lock */
191 
192 int
__lws_timed_callback_remove(struct lws_vhost * vh,struct lws_timed_vh_protocol * p)193 __lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p)
194 {
195 	lws_start_foreach_llp_safe(struct lws_timed_vh_protocol **, pt,
196 			      vh->timed_vh_protocol_list, next) {
197 		if (*pt == p) {
198 			*pt = p->next;
199 			lws_dll2_remove(&p->sul.list);
200 			lws_free(p);
201 
202 			return 0;
203 		}
204 	} lws_end_foreach_llp_safe(pt);
205 
206 	return 1;
207 }
208 
209 void
lws_sul_timed_callback_vh_protocol_cb(lws_sorted_usec_list_t * sul)210 lws_sul_timed_callback_vh_protocol_cb(lws_sorted_usec_list_t *sul)
211 {
212 	struct lws_timed_vh_protocol *tvp = lws_container_of(sul,
213 					struct lws_timed_vh_protocol, sul);
214 	struct lws_context_per_thread *pt =
215 				&tvp->vhost->context->pt[tvp->tsi_req];
216 
217 	pt->fake_wsi->context = tvp->vhost->context;
218 
219 	pt->fake_wsi->vhost = tvp->vhost; /* not a real bound wsi */
220 	pt->fake_wsi->protocol = tvp->protocol;
221 
222 	lwsl_debug("%s: timed cb: vh %s, protocol %s, reason %d\n", __func__,
223 		   tvp->vhost->name, tvp->protocol->name, tvp->reason);
224 
225 	tvp->protocol->callback(pt->fake_wsi, tvp->reason, NULL, NULL, 0);
226 
227 	__lws_timed_callback_remove(tvp->vhost, tvp);
228 }
229 
230 int
lws_timed_callback_vh_protocol_us(struct lws_vhost * vh,const struct lws_protocols * prot,int reason,lws_usec_t us)231 lws_timed_callback_vh_protocol_us(struct lws_vhost *vh,
232 				  const struct lws_protocols *prot, int reason,
233 				  lws_usec_t us)
234 {
235 	struct lws_timed_vh_protocol *p = (struct lws_timed_vh_protocol *)
236 			lws_malloc(sizeof(*p), "timed_vh");
237 
238 	if (!p)
239 		return 1;
240 
241 	memset(p, 0, sizeof(*p));
242 
243 	p->tsi_req = lws_pthread_self_to_tsi(vh->context);
244 	if (p->tsi_req < 0) /* not called from a service thread --> tsi 0 */
245 		p->tsi_req = 0;
246 
247 	lws_context_lock(vh->context, __func__); /* context ----------------- */
248 
249 	p->protocol = prot;
250 	p->reason = reason;
251 	p->vhost = vh;
252 
253 	p->sul.cb = lws_sul_timed_callback_vh_protocol_cb;
254 	/* list is always at the very top of the sul */
255 	__lws_sul_insert(&vh->context->pt[p->tsi_req].pt_sul_owner,
256 			 (lws_sorted_usec_list_t *)&p->sul.list, us);
257 
258 	// lwsl_notice("%s: %s.%s %d\n", __func__, vh->name, prot->name, secs);
259 
260 	lws_vhost_lock(vh); /* vhost ---------------------------------------- */
261 	p->next = vh->timed_vh_protocol_list;
262 	vh->timed_vh_protocol_list = p;
263 	lws_vhost_unlock(vh); /* -------------------------------------- vhost */
264 
265 	lws_context_unlock(vh->context); /* ------------------------- context */
266 
267 	return 0;
268 }
269 
270 int
lws_timed_callback_vh_protocol(struct lws_vhost * vh,const struct lws_protocols * prot,int reason,int secs)271 lws_timed_callback_vh_protocol(struct lws_vhost *vh,
272 			       const struct lws_protocols *prot, int reason,
273 			       int secs)
274 {
275 	return lws_timed_callback_vh_protocol_us(vh, prot, reason,
276 					((lws_usec_t)secs) * LWS_US_PER_SEC);
277 }
278 
279 static void
lws_validity_cb(lws_sorted_usec_list_t * sul)280 lws_validity_cb(lws_sorted_usec_list_t *sul)
281 {
282 	struct lws *wsi = lws_container_of(sul, struct lws, sul_validity);
283 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
284 	const lws_retry_bo_t *rbo = wsi->retry_policy;
285 
286 	/* one of either the ping or hangup validity threshold was crossed */
287 
288 	if (wsi->validity_hup) {
289 		lwsl_info("%s: wsi %p: validity too old\n", __func__, wsi);
290 		__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
291 				     "validity timeout");
292 		return;
293 	}
294 
295 	/* schedule a protocol-dependent ping */
296 
297 	lwsl_info("%s: wsi %p: scheduling validity check\n", __func__, wsi);
298 
299 	if (wsi->role_ops && wsi->role_ops->issue_keepalive)
300 		wsi->role_ops->issue_keepalive(wsi, 0);
301 
302 	/*
303 	 * We arrange to come back here after the additional ping to hangup time
304 	 * and do the hangup, unless we get validated (by, eg, a PONG) and
305 	 * reset the timer
306 	 */
307 
308 	assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping);
309 
310 	wsi->validity_hup = 1;
311 	__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_validity,
312 			 ((uint64_t)rbo->secs_since_valid_hangup -
313 				 rbo->secs_since_valid_ping) * LWS_US_PER_SEC);
314 }
315 
316 /*
317  * The role calls this back to actually confirm validity on a particular wsi
318  * (which may not be the original wsi)
319  */
320 
321 void
_lws_validity_confirmed_role(struct lws * wsi)322 _lws_validity_confirmed_role(struct lws *wsi)
323 {
324 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
325 	const lws_retry_bo_t *rbo = wsi->retry_policy;
326 
327 	if (!rbo || !rbo->secs_since_valid_hangup)
328 		return;
329 
330 	wsi->validity_hup = 0;
331 	wsi->sul_validity.cb = lws_validity_cb;
332 
333 	wsi->validity_hup = rbo->secs_since_valid_ping >=
334 			    rbo->secs_since_valid_hangup;
335 
336 	lwsl_info("%s: wsi %p: setting validity timer %ds (hup %d)\n",
337 			__func__, wsi,
338 			wsi->validity_hup ? rbo->secs_since_valid_hangup :
339 					    rbo->secs_since_valid_ping,
340 			wsi->validity_hup);
341 
342 	__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_validity,
343 			 ((uint64_t)(wsi->validity_hup ?
344 				rbo->secs_since_valid_hangup :
345 				rbo->secs_since_valid_ping)) * LWS_US_PER_SEC);
346 }
347 
348 void
lws_validity_confirmed(struct lws * wsi)349 lws_validity_confirmed(struct lws *wsi)
350 {
351 	/*
352 	 * This may be a stream inside a muxed network connection... leave it
353 	 * to the role to figure out who actually needs to understand their
354 	 * validity was confirmed.
355 	 */
356 	if (!wsi->h2_stream_carries_ws && /* only if not encapsulated */
357 	    wsi->role_ops && wsi->role_ops->issue_keepalive)
358 		wsi->role_ops->issue_keepalive(wsi, 1);
359 }
360