• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "private-lib-core.h"
26 
27 int
lws_callback_as_writeable(struct lws * wsi)28 lws_callback_as_writeable(struct lws *wsi)
29 {
30 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
31 	int n, m;
32 
33 	lws_stats_bump(pt, LWSSTATS_C_WRITEABLE_CB, 1);
34 #if defined(LWS_WITH_STATS)
35 	if (wsi->active_writable_req_us) {
36 		uint64_t ul = lws_now_usecs() -
37 			      wsi->active_writable_req_us;
38 
39 		lws_stats_bump(pt, LWSSTATS_US_WRITABLE_DELAY_AVG, ul);
40 		lws_stats_max(pt, LWSSTATS_US_WORST_WRITABLE_DELAY, ul);
41 		wsi->active_writable_req_us = 0;
42 	}
43 #endif
44 #if defined(LWS_WITH_DETAILED_LATENCY)
45 	if (wsi->context->detailed_latency_cb && lwsi_state_est(wsi)) {
46 		lws_usec_t us = lws_now_usecs();
47 
48 		wsi->detlat.earliest_write_req_pre_write =
49 					wsi->detlat.earliest_write_req;
50 		wsi->detlat.earliest_write_req = 0;
51 		wsi->detlat.latencies[LAT_DUR_PROXY_RX_TO_ONWARD_TX] =
52 		      ((uint32_t)us - wsi->detlat.earliest_write_req_pre_write);
53 	}
54 #endif
55 	n = wsi->role_ops->writeable_cb[lwsi_role_server(wsi)];
56 	m = user_callback_handle_rxflow(wsi->protocol->callback,
57 					wsi, (enum lws_callback_reasons) n,
58 					wsi->user_space, NULL, 0);
59 
60 	return m;
61 }
62 
63 int
lws_handle_POLLOUT_event(struct lws * wsi,struct lws_pollfd * pollfd)64 lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
65 {
66 	volatile struct lws *vwsi = (volatile struct lws *)wsi;
67 	int n;
68 
69 	// lwsl_notice("%s: %p\n", __func__, wsi);
70 
71 	vwsi->leave_pollout_active = 0;
72 	vwsi->handling_pollout = 1;
73 	/*
74 	 * if another thread wants POLLOUT on us, from here on while
75 	 * handling_pollout is set, he will only set leave_pollout_active.
76 	 * If we are going to disable POLLOUT, we will check that first.
77 	 */
78 	wsi->could_have_pending = 0; /* clear back-to-back write detection */
79 
80 	/*
81 	 * user callback is lowest priority to get these notifications
82 	 * actually, since other pending things cannot be disordered
83 	 *
84 	 * Priority 1: pending truncated sends are incomplete ws fragments
85 	 *	       If anything else sent first the protocol would be
86 	 *	       corrupted.
87 	 *
88 	 *	       These are post- any compression transform
89 	 */
90 
91 	if (lws_has_buffered_out(wsi)) {
92 		//lwsl_notice("%s: completing partial\n", __func__);
93 		if (lws_issue_raw(wsi, NULL, 0) < 0) {
94 			lwsl_info("%s signalling to close\n", __func__);
95 			goto bail_die;
96 		}
97 		/* leave POLLOUT active either way */
98 		goto bail_ok;
99 	} else
100 		if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
101 			wsi->socket_is_permanently_unusable = 1;
102 			goto bail_die; /* retry closing now */
103 		}
104 
105 	/* Priority 2: pre- compression transform */
106 
107 #if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
108 	if (wsi->http.comp_ctx.buflist_comp ||
109 	    wsi->http.comp_ctx.may_have_more) {
110 		enum lws_write_protocol wp = LWS_WRITE_HTTP;
111 
112 		lwsl_info("%s: completing comp partial (buflist_comp %p, may %d)\n",
113 				__func__, wsi->http.comp_ctx.buflist_comp,
114 				wsi->http.comp_ctx.may_have_more
115 				);
116 
117 		if (wsi->role_ops->write_role_protocol(wsi, NULL, 0, &wp) < 0) {
118 			lwsl_info("%s signalling to close\n", __func__);
119 			goto bail_die;
120 		}
121 		lws_callback_on_writable(wsi);
122 
123 		goto bail_ok;
124 	}
125 #endif
126 
127 #ifdef LWS_WITH_CGI
128 	/*
129 	 * A cgi master's wire protocol remains h1 or h2.  He is just getting
130 	 * his data from his child cgis.
131 	 */
132 	if (wsi->http.cgi) {
133 		/* also one shot */
134 		if (pollfd)
135 			if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
136 				lwsl_info("failed at set pollfd\n");
137 				return 1;
138 			}
139 		goto user_service_go_again;
140 	}
141 #endif
142 
143 	/* if we got here, we should have wire protocol ops set on the wsi */
144 	assert(wsi->role_ops);
145 
146 	if (!wsi->role_ops->handle_POLLOUT)
147 		goto bail_ok;
148 
149 	n = wsi->role_ops->handle_POLLOUT(wsi);
150 	switch (n) {
151 	case LWS_HP_RET_BAIL_OK:
152 		goto bail_ok;
153 	case LWS_HP_RET_BAIL_DIE:
154 		goto bail_die;
155 	case LWS_HP_RET_DROP_POLLOUT:
156 	case LWS_HP_RET_USER_SERVICE:
157 		break;
158 	default:
159 		assert(0);
160 	}
161 
162 	/* one shot */
163 
164 	if (pollfd) {
165 		int eff = vwsi->leave_pollout_active;
166 
167 		if (!eff) {
168 			if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
169 				lwsl_info("failed at set pollfd\n");
170 				goto bail_die;
171 			}
172 		}
173 
174 		vwsi->handling_pollout = 0;
175 
176 		/* cannot get leave_pollout_active set after the above */
177 		if (!eff && wsi->leave_pollout_active) {
178 			/*
179 			 * got set inbetween sampling eff and clearing
180 			 * handling_pollout, force POLLOUT on
181 			 */
182 			lwsl_debug("leave_pollout_active\n");
183 			if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) {
184 				lwsl_info("failed at set pollfd\n");
185 				goto bail_die;
186 			}
187 		}
188 
189 		vwsi->leave_pollout_active = 0;
190 	}
191 
192 	if (lwsi_role_client(wsi) && !wsi->hdr_parsing_completed &&
193 	     lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS &&
194 	     lwsi_state(wsi) != LRS_ISSUE_HTTP_BODY)
195 		goto bail_ok;
196 
197 	if (n == LWS_HP_RET_DROP_POLLOUT)
198 		goto bail_ok;
199 
200 
201 #ifdef LWS_WITH_CGI
202 user_service_go_again:
203 #endif
204 
205 	if (wsi->role_ops->perform_user_POLLOUT) {
206 		if (wsi->role_ops->perform_user_POLLOUT(wsi) == -1)
207 			goto bail_die;
208 		else
209 			goto bail_ok;
210 	}
211 
212 	lwsl_debug("%s: %p: non mux: wsistate 0x%lx, ops %s\n", __func__, wsi,
213 		   (unsigned long)wsi->wsistate, wsi->role_ops->name);
214 
215 	vwsi = (volatile struct lws *)wsi;
216 	vwsi->leave_pollout_active = 0;
217 
218 	n = lws_callback_as_writeable(wsi);
219 	vwsi->handling_pollout = 0;
220 
221 	if (vwsi->leave_pollout_active)
222 		if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
223 			goto bail_die;
224 
225 	return n;
226 
227 	/*
228 	 * since these don't disable the POLLOUT, they are always doing the
229 	 * right thing for leave_pollout_active whether it was set or not.
230 	 */
231 
232 bail_ok:
233 	vwsi->handling_pollout = 0;
234 	vwsi->leave_pollout_active = 0;
235 
236 	return 0;
237 
238 bail_die:
239 	vwsi->handling_pollout = 0;
240 	vwsi->leave_pollout_active = 0;
241 
242 	return -1;
243 }
244 
245 int
lws_rxflow_cache(struct lws * wsi,unsigned char * buf,int n,int len)246 lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
247 {
248 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
249 	uint8_t *buffered;
250 	size_t blen;
251 	int ret = LWSRXFC_CACHED, m;
252 
253 	/* his RX is flowcontrolled, don't send remaining now */
254 	blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
255 	if (blen) {
256 		if (buf >= buffered && buf + len <= buffered + blen &&
257 		    blen != (size_t)len) {
258 			/*
259 			 * rxflow while we were spilling prev rxflow
260 			 *
261 			 * len indicates how much was unused, then... so trim
262 			 * the head buflist to match that situation
263 			 */
264 
265 			lws_buflist_use_segment(&wsi->buflist, blen - len);
266 			lwsl_debug("%s: trim existing rxflow %d -> %d\n",
267 					__func__, (int)blen, (int)len);
268 
269 			return LWSRXFC_TRIMMED;
270 		}
271 		ret = LWSRXFC_ADDITIONAL;
272 	}
273 
274 	/* a new rxflow, buffer it and warn caller */
275 
276 	lwsl_debug("%s: rxflow append %d\n", __func__, len - n);
277 	m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
278 
279 	if (m < 0)
280 		return LWSRXFC_ERROR;
281 	if (m) {
282 		lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi);
283 		if (lws_dll2_is_detached(&wsi->dll_buflist))
284 			lws_dll2_add_head(&wsi->dll_buflist, &pt->dll_buflist_owner);
285 	}
286 
287 	return ret;
288 }
289 
290 /* this is used by the platform service code to stop us waiting for network
291  * activity in poll() when we have something that already needs service
292  */
293 
294 int
lws_service_adjust_timeout(struct lws_context * context,int timeout_ms,int tsi)295 lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
296 {
297 	struct lws_context_per_thread *pt;
298 
299 	if (!context)
300 		return 1;
301 
302 	pt = &context->pt[tsi];
303 
304 	/*
305 	 * Figure out if we really want to wait in poll()... we only need to
306 	 * wait if really nothing already to do and we have to wait for
307 	 * something from network
308 	 */
309 #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
310 	/* 1) if we know we are draining rx ext, do not wait in poll */
311 	if (pt->ws.rx_draining_ext_list)
312 		return 0;
313 #endif
314 
315 #if defined(LWS_WITH_TLS)
316 	/* 2) if we know we have non-network pending data,
317 	 *    do not wait in poll */
318 
319 	if (pt->context->tls_ops &&
320 	    pt->context->tls_ops->fake_POLLIN_for_buffered &&
321 	    pt->context->tls_ops->fake_POLLIN_for_buffered(pt))
322 			return 0;
323 #endif
324 
325 	/*
326 	 * 4) If there is any wsi with rxflow buffered and in a state to process
327 	 *    it, we should not wait in poll
328 	 */
329 
330 	lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
331 		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
332 
333 		if (!lws_is_flowcontrolled(wsi) &&
334 		     lwsi_state(wsi) != LRS_DEFERRING_ACTION)
335 			return 0;
336 
337 	/*
338 	 * 5) If any guys with http compression to spill, we shouldn't wait in
339 	 *    poll but hurry along and service them
340 	 */
341 
342 	} lws_end_foreach_dll(d);
343 
344 	return timeout_ms;
345 }
346 
347 /*
348  * POLLIN said there is something... we must read it, and either use it; or
349  * if other material already in the buflist append it and return the buflist
350  * head material.
351  */
352 int
lws_buflist_aware_read(struct lws_context_per_thread * pt,struct lws * wsi,struct lws_tokens * ebuf,char fr,const char * hint)353 lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
354 		       struct lws_tokens *ebuf, char fr, const char *hint)
355 {
356 	int n, e, bns;
357 	uint8_t *ep, *b;
358 
359 	// lwsl_debug("%s: wsi %p: %s: prior %d\n", __func__, wsi, hint, prior);
360 	// lws_buflist_describe(&wsi->buflist, wsi, __func__);
361 
362 	(void)hint;
363 	if (!ebuf->token)
364 		ebuf->token = pt->serv_buf + LWS_PRE;
365 	if (!ebuf->len ||
366 	    (unsigned int)ebuf->len > wsi->context->pt_serv_buf_size - LWS_PRE)
367 		ebuf->len = wsi->context->pt_serv_buf_size - LWS_PRE;
368 
369 	e = ebuf->len;
370 	ep = ebuf->token;
371 
372 	/* h2 or muxed stream... must force the read due to HOL blocking */
373 
374 	if (wsi->mux_substream)
375 		fr = 1;
376 
377 	/* there's something on the buflist? */
378 
379 	bns = (int)lws_buflist_next_segment_len(&wsi->buflist, &ebuf->token);
380 	b = ebuf->token;
381 
382 	if (!fr && bns)
383 		goto buflist_material;
384 
385 	/* we're going to read something */
386 
387 	ebuf->token = ep;
388 	ebuf->len = n = lws_ssl_capable_read(wsi, ep, e);
389 
390 	lwsl_info("%s: wsi %p: %s: ssl_capable_read %d\n", __func__,
391 			wsi, hint, ebuf->len);
392 
393 	if (!bns && /* only acknowledge error when we handled buflist content */
394 	    n == LWS_SSL_CAPABLE_ERROR) {
395 		lwsl_debug("%s: SSL_CAPABLE_ERROR\n", __func__);
396 		return -1;
397 	}
398 
399 	if (n <= 0 && bns)
400 		/*
401 		 * There wasn't anything to read yet, but there's something
402 		 * on the buflist to give him
403 		 */
404 		goto buflist_material;
405 
406 	/* we read something */
407 
408 	if (fr && bns) {
409 		/*
410 		 * Stash what we read, since there's earlier buflist material
411 		 */
412 
413 		n = lws_buflist_append_segment(&wsi->buflist, ebuf->token, ebuf->len);
414 		if (n < 0)
415 			return -1;
416 		if (n && lws_dll2_is_detached(&wsi->dll_buflist))
417 			lws_dll2_add_head(&wsi->dll_buflist,
418 					  &pt->dll_buflist_owner);
419 
420 		goto buflist_material;
421 	}
422 
423 	/*
424 	 * directly return what we read
425 	 */
426 
427 	return 0;
428 
429 buflist_material:
430 
431 	ebuf->token = b;
432 	if (e < bns)
433 		/* restrict to e, if more than e available */
434 		ebuf->len = e;
435 	else
436 		ebuf->len = bns;
437 
438 	return 1; /* from buflist */
439 }
440 
441 int
lws_buflist_aware_finished_consuming(struct lws * wsi,struct lws_tokens * ebuf,int used,int buffered,const char * hint)442 lws_buflist_aware_finished_consuming(struct lws *wsi, struct lws_tokens *ebuf,
443 				     int used, int buffered, const char *hint)
444 {
445 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
446 	int m;
447 
448 	//lwsl_debug("%s %s consuming buffered %d used %zu / %zu\n", __func__, hint,
449 	//		buffered, (size_t)used, (size_t)ebuf->len);
450 	// lws_buflist_describe(&wsi->buflist, wsi, __func__);
451 
452 	/* it's in the buflist; we didn't use any */
453 
454 	if (!used && buffered)
455 		return 0;
456 
457 	if (used && buffered) {
458 		if (wsi->buflist) {
459 			m = (int)lws_buflist_use_segment(&wsi->buflist, (size_t)used);
460 			// lwsl_notice("%s: used %d, next %d\n", __func__, used, m);
461 			// lws_buflist_describe(&wsi->buflist, wsi, __func__);
462 			if (m)
463 				return 0;
464 		}
465 
466 		lwsl_info("%s: removed %p from dll_buflist\n", __func__, wsi);
467 		lws_dll2_remove(&wsi->dll_buflist);
468 
469 		return 0;
470 	}
471 
472 	/* any remainder goes on the buflist */
473 
474 	if (used != ebuf->len) {
475 		// lwsl_notice("%s %s bac appending %d\n", __func__, hint,
476 		//		ebuf->len - used);
477 		m = lws_buflist_append_segment(&wsi->buflist,
478 					       ebuf->token + used,
479 					       ebuf->len - used);
480 		if (m < 0)
481 			return 1; /* OOM */
482 		if (m) {
483 			lwsl_debug("%s: added %p to rxflow list\n",
484 				   __func__, wsi);
485 			if (lws_dll2_is_detached(&wsi->dll_buflist))
486 				lws_dll2_add_head(&wsi->dll_buflist,
487 					 &pt->dll_buflist_owner);
488 		}
489 		// lws_buflist_describe(&wsi->buflist, wsi, __func__);
490 	}
491 
492 	return 0;
493 }
494 
495 void
lws_service_do_ripe_rxflow(struct lws_context_per_thread * pt)496 lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
497 {
498 	struct lws_pollfd pfd;
499 
500 	if (!pt->dll_buflist_owner.head)
501 		return;
502 
503 	/*
504 	 * service all guys with pending rxflow that reached a state they can
505 	 * accept the pending data
506 	 */
507 
508 	lws_pt_lock(pt, __func__);
509 
510 	lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
511 				   pt->dll_buflist_owner.head) {
512 		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
513 
514 		pfd.events = LWS_POLLIN;
515 		pfd.revents = LWS_POLLIN;
516 		pfd.fd = -1;
517 
518 		lwsl_debug("%s: rxflow processing: %p fc=%d, 0x%lx\n", __func__,
519 			   wsi, lws_is_flowcontrolled(wsi),
520 			   (unsigned long)wsi->wsistate);
521 
522 		if (!lws_is_flowcontrolled(wsi) &&
523 		    lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
524 			pt->inside_lws_service = 1;
525 
526 			if ((wsi->role_ops->handle_POLLIN)(pt, wsi, &pfd) ==
527 						   LWS_HPI_RET_PLEASE_CLOSE_ME)
528 				lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
529 						"close_and_handled");
530 			pt->inside_lws_service = 0;
531 		}
532 
533 	} lws_end_foreach_dll_safe(d, d1);
534 
535 	lws_pt_unlock(pt);
536 }
537 
538 /*
539  * guys that need POLLIN service again without waiting for network action
540  * can force POLLIN here if not flowcontrolled, so they will get service.
541  *
542  * Return nonzero if anybody got their POLLIN faked
543  */
544 int
lws_service_flag_pending(struct lws_context * context,int tsi)545 lws_service_flag_pending(struct lws_context *context, int tsi)
546 {
547 	struct lws_context_per_thread *pt;
548 	int forced = 0;
549 
550 	if (!context)
551 		return 1;
552 
553 	pt = &context->pt[tsi];
554 
555 	lws_pt_lock(pt, __func__);
556 
557 	/*
558 	 * 1) If there is any wsi with a buflist and in a state to process
559 	 *    it, we should not wait in poll
560 	 */
561 
562 	lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
563 		struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
564 
565 		if (!lws_is_flowcontrolled(wsi) &&
566 		     lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
567 			forced = 1;
568 			break;
569 		}
570 	} lws_end_foreach_dll(d);
571 
572 #if defined(LWS_ROLE_WS)
573 	forced |= role_ops_ws.service_flag_pending(context, tsi);
574 #endif
575 
576 #if defined(LWS_WITH_TLS)
577 	/*
578 	 * 2) For all guys with buffered SSL read data already saved up, if they
579 	 * are not flowcontrolled, fake their POLLIN status so they'll get
580 	 * service to use up the buffered incoming data, even though their
581 	 * network socket may have nothing
582 	 */
583 	lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
584 			lws_dll2_get_head(&pt->tls.dll_pending_tls_owner)) {
585 		struct lws *wsi = lws_container_of(p, struct lws,
586 						   tls.dll_pending_tls);
587 
588 		if (wsi->position_in_fds_table >= 0) {
589 
590 		pt->fds[wsi->position_in_fds_table].revents |=
591 			pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
592 		if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
593 			forced = 1;
594 			/*
595 			 * he's going to get serviced now, take him off the
596 			 * list of guys with buffered SSL.  If he still has some
597 			 * at the end of the service, he'll get put back on the
598 			 * list then.
599 			 */
600 			__lws_ssl_remove_wsi_from_buffered_list(wsi);
601 		}
602 		}
603 
604 	} lws_end_foreach_dll_safe(p, p1);
605 #endif
606 
607 	lws_pt_unlock(pt);
608 
609 	return forced;
610 }
611 
612 int
lws_service_fd_tsi(struct lws_context * context,struct lws_pollfd * pollfd,int tsi)613 lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
614 		   int tsi)
615 {
616 	struct lws_context_per_thread *pt;
617 	struct lws *wsi;
618 
619 	if (!context || context->being_destroyed1)
620 		return -1;
621 
622 	pt = &context->pt[tsi];
623 
624 	if (!pollfd) {
625 		/*
626 		 * calling with NULL pollfd for periodic background processing
627 		 * is no longer needed and is now illegal.
628 		 */
629 		assert(pollfd);
630 		return -1;
631 	}
632 	assert(lws_socket_is_valid(pollfd->fd));
633 
634 	/* no, here to service a socket descriptor */
635 	wsi = wsi_from_fd(context, pollfd->fd);
636 	if (!wsi)
637 		/* not lws connection ... leave revents alone and return */
638 		return 0;
639 
640 #if LWS_MAX_SMP > 1
641 	if (wsi->undergoing_init_from_other_pt)
642 		/*
643 		 * Temporary situation that other service thread is initializing
644 		 * this wsi right now for use on our service thread.
645 		 */
646 		return 0;
647 #endif
648 
649 	/*
650 	 * so that caller can tell we handled, past here we need to
651 	 * zero down pollfd->revents after handling
652 	 */
653 
654 	/* handle session socket closed */
655 
656 	if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
657 	    (pollfd->revents & LWS_POLLHUP)) {
658 		wsi->socket_is_permanently_unusable = 1;
659 		lwsl_debug("Session Socket %p (fd=%d) dead\n",
660 			   (void *)wsi, pollfd->fd);
661 
662 		goto close_and_handled;
663 	}
664 
665 #ifdef _WIN32
666 	if (pollfd->revents & LWS_POLLOUT)
667 		wsi->sock_send_blocking = FALSE;
668 #endif
669 
670 	if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
671 	    (pollfd->revents & LWS_POLLHUP)) {
672 		lwsl_debug("pollhup\n");
673 		wsi->socket_is_permanently_unusable = 1;
674 		goto close_and_handled;
675 	}
676 
677 #if defined(LWS_WITH_TLS)
678 	if (lwsi_state(wsi) == LRS_SHUTDOWN &&
679 	    lws_is_ssl(wsi) && wsi->tls.ssl) {
680 		switch (__lws_tls_shutdown(wsi)) {
681 		case LWS_SSL_CAPABLE_DONE:
682 		case LWS_SSL_CAPABLE_ERROR:
683 			goto close_and_handled;
684 
685 		case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
686 		case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
687 		case LWS_SSL_CAPABLE_MORE_SERVICE:
688 			goto handled;
689 		}
690 	}
691 #endif
692 	wsi->could_have_pending = 0; /* clear back-to-back write detection */
693 	pt->inside_lws_service = 1;
694 
695 	/* okay, what we came here to do... */
696 
697 	/* if we got here, we should have wire protocol ops set on the wsi */
698 	assert(wsi->role_ops);
699 
700 	// lwsl_notice("%s: %s: wsistate 0x%x\n", __func__, wsi->role_ops->name,
701 	//	    wsi->wsistate);
702 
703 	switch ((wsi->role_ops->handle_POLLIN)(pt, wsi, pollfd)) {
704 	case LWS_HPI_RET_WSI_ALREADY_DIED:
705 		pt->inside_lws_service = 0;
706 		return 1;
707 	case LWS_HPI_RET_HANDLED:
708 		break;
709 	case LWS_HPI_RET_PLEASE_CLOSE_ME:
710 close_and_handled:
711 		lwsl_debug("%p: Close and handled\n", wsi);
712 		lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
713 				   "close_and_handled");
714 #if defined(_DEBUG) && defined(LWS_WITH_LIBUV)
715 		/*
716 		 * confirm close has no problem being called again while
717 		 * it waits for libuv service to complete the first async
718 		 * close
719 		 */
720 		if (context->event_loop_ops == &event_loop_ops_uv)
721 			lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
722 					   "close_and_handled uv repeat test");
723 #endif
724 		/*
725 		 * pollfd may point to something else after the close
726 		 * due to pollfd swapping scheme on delete on some platforms
727 		 * we can't clear revents now because it'd be the wrong guy's
728 		 * revents
729 		 */
730 		pt->inside_lws_service = 0;
731 		return 1;
732 	default:
733 		assert(0);
734 	}
735 #if defined(LWS_WITH_TLS)
736 handled:
737 #endif
738 	pollfd->revents = 0;
739 	pt->inside_lws_service = 0;
740 
741 	return 0;
742 }
743 
744 int
lws_service_fd(struct lws_context * context,struct lws_pollfd * pollfd)745 lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
746 {
747 	return lws_service_fd_tsi(context, pollfd, 0);
748 }
749 
750 int
lws_service(struct lws_context * context,int timeout_ms)751 lws_service(struct lws_context *context, int timeout_ms)
752 {
753 	struct lws_context_per_thread *pt;
754 	int n;
755 
756 	if (!context)
757 		return 1;
758 
759 	pt = &context->pt[0];
760 	pt->inside_service = 1;
761 
762 	if (context->event_loop_ops->run_pt) {
763 		/* we are configured for an event loop */
764 		context->event_loop_ops->run_pt(context, 0);
765 
766 		pt->inside_service = 0;
767 
768 		return 1;
769 	}
770 	n = lws_plat_service(context, timeout_ms);
771 
772 	pt->inside_service = 0;
773 
774 	return n;
775 }
776 
777 int
lws_service_tsi(struct lws_context * context,int timeout_ms,int tsi)778 lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
779 {
780 	struct lws_context_per_thread *pt;
781 	int n;
782 
783 	if (!context)
784 		return 1;
785 
786 	pt = &context->pt[tsi];
787 	pt->inside_service = 1;
788 #if LWS_MAX_SMP > 1
789 	pt->self = pthread_self();
790 #endif
791 
792 	if (context->event_loop_ops->run_pt) {
793 		/* we are configured for an event loop */
794 		context->event_loop_ops->run_pt(context, tsi);
795 
796 		pt->inside_service = 0;
797 
798 		return 1;
799 	}
800 
801 	n = _lws_plat_service_tsi(context, timeout_ms, tsi);
802 
803 	pt->inside_service = 0;
804 
805 	return n;
806 }
807