• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #if !defined(_GNU_SOURCE)
26 #define _GNU_SOURCE
27 #endif
28 #include "private-lib-core.h"
29 
30 int
lws_poll_listen_fd(struct lws_pollfd * fd)31 lws_poll_listen_fd(struct lws_pollfd *fd)
32 {
33 	return poll(fd, 1, 0);
34 }
35 
36 int
_lws_plat_service_forced_tsi(struct lws_context * context,int tsi)37 _lws_plat_service_forced_tsi(struct lws_context *context, int tsi)
38 {
39 	struct lws_context_per_thread *pt = &context->pt[tsi];
40 	int m, n, r;
41 
42 	r = lws_service_flag_pending(context, tsi);
43 
44 	/* any socket with events to service? */
45 	for (n = 0; n < (int)pt->fds_count; n++) {
46 		if (!pt->fds[n].revents)
47 			continue;
48 
49 		m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
50 		if (m < 0) {
51 			lwsl_err("%s: lws_service_fd_tsi returned %d\n",
52 				 __func__, m);
53 			return -1;
54 		}
55 		/* if something closed, retry this slot */
56 		if (m)
57 			n--;
58 	}
59 
60 	lws_service_do_ripe_rxflow(pt);
61 
62 	return r;
63 }
64 
65 #define LWS_POLL_WAIT_LIMIT 2000000000
66 
67 int
_lws_plat_service_tsi(struct lws_context * context,int timeout_ms,int tsi)68 _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
69 {
70 	volatile struct lws_foreign_thread_pollfd *ftp, *next;
71 	volatile struct lws_context_per_thread *vpt;
72 	struct lws_context_per_thread *pt;
73 	lws_usec_t timeout_us, us;
74 	int n = -1;
75 #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
76 	int m;
77 #endif
78 
79 	/* stay dead once we are dead */
80 
81 	if (!context || !context->vhost_list)
82 		return 1;
83 
84 	pt = &context->pt[tsi];
85 	vpt = (volatile struct lws_context_per_thread *)pt;
86 
87 	lws_stats_bump(pt, LWSSTATS_C_SERVICE_ENTRY, 1);
88 
89 	if (timeout_ms < 0)
90 		timeout_ms = 0;
91 	else
92 		/* force a default timeout of 23 days */
93 		timeout_ms = LWS_POLL_WAIT_LIMIT;
94 	timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
95 
96 	if (context->event_loop_ops->run_pt)
97 		context->event_loop_ops->run_pt(context, tsi);
98 
99 	if (!pt->service_tid_detected) {
100 		struct lws _lws;
101 
102 		memset(&_lws, 0, sizeof(_lws));
103 		_lws.context = context;
104 
105 		pt->service_tid = context->vhost_list->protocols[0].callback(
106 					&_lws, LWS_CALLBACK_GET_THREAD_ID,
107 					NULL, NULL, 0);
108 		pt->service_tid_detected = 1;
109 	}
110 
111 	us = lws_now_usecs();
112 	lws_pt_lock(pt, __func__);
113 	/*
114 	 * service ripe scheduled events, and limit wait to next expected one
115 	 */
116 	us = __lws_sul_service_ripe(&pt->pt_sul_owner, us);
117 	if (us && us < timeout_us)
118 		timeout_us = us;
119 
120 	lws_pt_unlock(pt);
121 
122 	/*
123 	 * is there anybody with pending stuff that needs service forcing?
124 	 */
125 	if (!lws_service_adjust_timeout(context, 1, tsi))
126 		timeout_us = 0;
127 
128 	/* ensure we don't wrap at 2^31 with poll()'s signed int ms */
129 
130 	timeout_us /= LWS_US_PER_MS; /* ms now */
131 
132 	vpt->inside_poll = 1;
133 	lws_memory_barrier();
134 	n = poll(pt->fds, pt->fds_count, timeout_us /* ms now */ );
135 	vpt->inside_poll = 0;
136 	lws_memory_barrier();
137 
138 	#if defined(LWS_WITH_DETAILED_LATENCY)
139 	/*
140 	 * so we can track how long it took before we actually read a
141 	 * POLLIN that was signalled when we last exited poll()
142 	 */
143 	if (context->detailed_latency_cb)
144 		pt->ust_left_poll = lws_now_usecs();
145 #endif
146 
147 	/* Collision will be rare and brief.  Spin until it completes */
148 	while (vpt->foreign_spinlock)
149 		;
150 
151 	/*
152 	 * At this point we are not inside a foreign thread pollfd
153 	 * change, and we have marked ourselves as outside the poll()
154 	 * wait.  So we are the only guys that can modify the
155 	 * lws_foreign_thread_pollfd list on the pt.  Drain the list
156 	 * and apply the changes to the affected pollfds in the correct
157 	 * order.
158 	 */
159 
160 	lws_pt_lock(pt, __func__);
161 
162 	ftp = vpt->foreign_pfd_list;
163 	//lwsl_notice("cleared list %p\n", ftp);
164 	while (ftp) {
165 		struct lws *wsi;
166 		struct lws_pollfd *pfd;
167 
168 		next = ftp->next;
169 		pfd = &vpt->fds[ftp->fd_index];
170 		if (lws_socket_is_valid(pfd->fd)) {
171 			wsi = wsi_from_fd(context, pfd->fd);
172 			if (wsi)
173 				__lws_change_pollfd(wsi, ftp->_and,
174 						    ftp->_or);
175 		}
176 		lws_free((void *)ftp);
177 		ftp = next;
178 	}
179 	vpt->foreign_pfd_list = NULL;
180 	lws_memory_barrier();
181 
182 	lws_pt_unlock(pt);
183 
184 #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
185 	m = 0;
186 #endif
187 #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
188 	m |= !!pt->ws.rx_draining_ext_list;
189 #endif
190 
191 #if defined(LWS_WITH_TLS)
192 	if (pt->context->tls_ops &&
193 	    pt->context->tls_ops->fake_POLLIN_for_buffered)
194 		m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
195 #endif
196 
197 	if (
198 #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
199 		!m &&
200 #endif
201 		!n) { /* nothing to do */
202 		lws_service_do_ripe_rxflow(pt);
203 
204 		return 0;
205 	}
206 
207 	if (_lws_plat_service_forced_tsi(context, tsi) < 0)
208 		return -1;
209 
210 	if (pt->destroy_self) {
211 		lws_context_destroy(pt->context);
212 		return -1;
213 	}
214 
215 	return 0;
216 }
217 
218 int
lws_plat_service(struct lws_context * context,int timeout_ms)219 lws_plat_service(struct lws_context *context, int timeout_ms)
220 {
221 	return _lws_plat_service_tsi(context, timeout_ms, 0);
222 }
223