• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #if !defined(_GNU_SOURCE)
26 #define _GNU_SOURCE
27 #endif
28 #include "private-lib-core.h"
29 
30 int
lws_poll_listen_fd(struct lws_pollfd * fd)31 lws_poll_listen_fd(struct lws_pollfd *fd)
32 {
33 	return poll(fd, 1, 0);
34 }
35 
36 int
_lws_plat_service_forced_tsi(struct lws_context * context,int tsi)37 _lws_plat_service_forced_tsi(struct lws_context *context, int tsi)
38 {
39 	struct lws_context_per_thread *pt = &context->pt[tsi];
40 	int m, n, r;
41 
42 	r = lws_service_flag_pending(context, tsi);
43 
44 	/* any socket with events to service? */
45 	for (n = 0; n < (int)pt->fds_count; n++) {
46 		lws_sockfd_type fd = pt->fds[n].fd;
47 
48 		if (!pt->fds[n].revents)
49 			continue;
50 
51 		m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
52 		if (m < 0) {
53 			lwsl_err("%s: lws_service_fd_tsi returned %d\n",
54 				 __func__, m);
55 			return -1;
56 		}
57 
58 		/* if something closed, retry this slot since may have been
59 		 * swapped with end fd */
60 		if (m && pt->fds[n].fd != fd)
61 			n--;
62 	}
63 
64 	lws_service_do_ripe_rxflow(pt);
65 
66 	return r;
67 }
68 
69 #define LWS_POLL_WAIT_LIMIT 2000000000
70 
71 int
_lws_plat_service_tsi(struct lws_context * context,int timeout_ms,int tsi)72 _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
73 {
74 	volatile struct lws_foreign_thread_pollfd *ftp, *next;
75 	volatile struct lws_context_per_thread *vpt;
76 	struct lws_context_per_thread *pt;
77 	lws_usec_t timeout_us, us;
78 #if defined(LWS_WITH_SYS_METRICS)
79 	lws_usec_t a, b;
80 #endif
81 	int n;
82 #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
83 	int m;
84 #endif
85 
86 	/* stay dead once we are dead */
87 
88 	if (!context)
89 		return 1;
90 
91 #if defined(LWS_WITH_SYS_METRICS)
92 	b =
93 #endif
94 			us = lws_now_usecs();
95 
96 	pt = &context->pt[tsi];
97 	vpt = (volatile struct lws_context_per_thread *)pt;
98 
99 	if (timeout_ms < 0)
100 		timeout_ms = 0;
101 	else
102 		/* force a default timeout of 23 days */
103 		timeout_ms = LWS_POLL_WAIT_LIMIT;
104 	timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
105 
106 	if (context->event_loop_ops->run_pt)
107 		context->event_loop_ops->run_pt(context, tsi);
108 
109 	if (!pt->service_tid_detected && context->vhost_list) {
110 		lws_fakewsi_def_plwsa(pt);
111 
112 		lws_fakewsi_prep_plwsa_ctx(context);
113 
114 		pt->service_tid = context->vhost_list->protocols[0].callback(
115 					(struct lws *)plwsa,
116 					LWS_CALLBACK_GET_THREAD_ID,
117 					NULL, NULL, 0);
118 		pt->service_tid_detected = 1;
119 	}
120 
121 	lws_pt_lock(pt, __func__);
122 	/*
123 	 * service ripe scheduled events, and limit wait to next expected one
124 	 */
125 	us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS, us);
126 	if (us && us < timeout_us)
127 		/*
128 		 * If something wants zero wait, that's OK, but if the next sul
129 		 * coming ripe is an interval less than our wait resolution,
130 		 * bump it to be the wait resolution.
131 		 */
132 		timeout_us = us < context->us_wait_resolution ?
133 					context->us_wait_resolution : us;
134 
135 	lws_pt_unlock(pt);
136 
137 	/*
138 	 * is there anybody with pending stuff that needs service forcing?
139 	 */
140 	if (!lws_service_adjust_timeout(context, 1, tsi))
141 		timeout_us = 0;
142 
143 	/* ensure we don't wrap at 2^31 with poll()'s signed int ms */
144 
145 	timeout_us /= LWS_US_PER_MS; /* ms now */
146 
147 #if defined(LWS_WITH_SYS_METRICS)
148 	a = lws_now_usecs() - b;
149 #endif
150 	vpt->inside_poll = 1;
151 	lws_memory_barrier();
152 	n = poll(pt->fds, pt->fds_count, (int)timeout_us /* ms now */ );
153 	vpt->inside_poll = 0;
154 	lws_memory_barrier();
155 
156 #if defined(LWS_WITH_SYS_METRICS)
157 	b = lws_now_usecs();
158 #endif
159 	/* Collision will be rare and brief.  Spin until it completes */
160 	while (vpt->foreign_spinlock)
161 		;
162 
163 	/*
164 	 * At this point we are not inside a foreign thread pollfd
165 	 * change, and we have marked ourselves as outside the poll()
166 	 * wait.  So we are the only guys that can modify the
167 	 * lws_foreign_thread_pollfd list on the pt.  Drain the list
168 	 * and apply the changes to the affected pollfds in the correct
169 	 * order.
170 	 */
171 
172 	lws_pt_lock(pt, __func__);
173 
174 	ftp = vpt->foreign_pfd_list;
175 	//lwsl_notice("cleared list %p\n", ftp);
176 	while (ftp) {
177 		struct lws *wsi;
178 		struct lws_pollfd *pfd;
179 
180 		next = ftp->next;
181 		pfd = &vpt->fds[ftp->fd_index];
182 		if (lws_socket_is_valid(pfd->fd)) {
183 			wsi = wsi_from_fd(context, pfd->fd);
184 			if (wsi)
185 				__lws_change_pollfd(wsi, ftp->_and,
186 						    ftp->_or);
187 		}
188 		lws_free((void *)ftp);
189 		ftp = next;
190 	}
191 	vpt->foreign_pfd_list = NULL;
192 	lws_memory_barrier();
193 
194 	lws_pt_unlock(pt);
195 
196 #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
197 	m = 0;
198 #endif
199 #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
200 	m |= !!pt->ws.rx_draining_ext_list;
201 #endif
202 
203 #if defined(LWS_WITH_TLS)
204 	if (pt->context->tls_ops &&
205 	    pt->context->tls_ops->fake_POLLIN_for_buffered)
206 		m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
207 #endif
208 
209 	if (
210 #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
211 		!m &&
212 #endif
213 		!n) /* nothing to do */
214 		lws_service_do_ripe_rxflow(pt);
215 	else
216 		if (_lws_plat_service_forced_tsi(context, tsi) < 0)
217 			return -1;
218 
219 #if defined(LWS_WITH_SYS_METRICS)
220 	lws_metric_event(context->mt_service, METRES_GO,
221 			 (u_mt_t) (a + (lws_now_usecs() - b)));
222 #endif
223 
224 	if (pt->destroy_self) {
225 		lws_context_destroy(pt->context);
226 		return -1;
227 	}
228 
229 	return 0;
230 }
231 
232 int
lws_plat_service(struct lws_context * context,int timeout_ms)233 lws_plat_service(struct lws_context *context, int timeout_ms)
234 {
235 	return _lws_plat_service_tsi(context, timeout_ms, 0);
236 }
237