1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 int
lws_plat_service(struct lws_context * context,int timeout_ms)28 lws_plat_service(struct lws_context *context, int timeout_ms)
29 {
30 int n = _lws_plat_service_tsi(context, timeout_ms, 0);
31
32 #if !defined(LWS_AMAZON_RTOS)
33 esp_task_wdt_reset();
34 #endif
35
36 return n;
37 }
38
39
40 int
_lws_plat_service_tsi(struct lws_context * context,int timeout_ms,int tsi)41 _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
42 {
43 struct lws_context_per_thread *pt;
44 lws_usec_t timeout_us;
45 int n = -1, m, c, a = 0;
46
47 /* stay dead once we are dead */
48
49 if (!context || !context->vhost_list)
50 return 1;
51
52 pt = &context->pt[tsi];
53 lws_stats_bump(pt, LWSSTATS_C_SERVICE_ENTRY, 1);
54
55 {
56 unsigned long m = lws_now_secs();
57
58 if (m > context->time_last_state_dump) {
59 context->time_last_state_dump = m;
60 #if defined(LWS_AMAZON_RTOS)
61 n = xPortGetFreeHeapSize();
62 #else
63 n = esp_get_free_heap_size();
64 #endif
65 if ((unsigned int)n != context->last_free_heap) {
66 if ((unsigned int)n > context->last_free_heap)
67 lwsl_notice(" heap :%ld (+%ld)\n",
68 (unsigned long)n,
69 (unsigned long)(n -
70 context->last_free_heap));
71 else
72 lwsl_notice(" heap :%ld (-%ld)\n",
73 (unsigned long)n,
74 (unsigned long)(
75 context->last_free_heap -
76 n));
77 context->last_free_heap = n;
78 }
79 }
80 }
81
82 if (timeout_ms < 0)
83 timeout_ms = 0;
84 else
85 /* force a default timeout of 23 days */
86 timeout_ms = 2000000000;
87 timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
88
89 if (!pt->service_tid_detected) {
90 struct lws *_lws = pt->fake_wsi;
91
92 if (!_lws)
93 return 1;
94 _lws->context = context;
95
96 pt->service_tid = context->vhost_list->protocols[0].callback(
97 _lws, LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
98 pt->service_tid_detected = 1;
99 }
100
101 /*
102 * is there anybody with pending stuff that needs service forcing?
103 */
104 if (lws_service_adjust_timeout(context, 1, tsi)) {
105
106 again:
107 a = 0;
108 if (timeout_us) {
109 lws_usec_t us;
110
111 lws_pt_lock(pt, __func__);
112 /* don't stay in poll wait longer than next hr timeout */
113 us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
114 if (us && us < timeout_us)
115 timeout_us = us;
116
117 lws_pt_unlock(pt);
118 }
119
120 // n = poll(pt->fds, pt->fds_count, timeout_ms);
121 {
122 fd_set readfds, writefds, errfds;
123 struct timeval tv = { timeout_us / LWS_US_PER_SEC,
124 timeout_us % LWS_US_PER_SEC }, *ptv = &tv;
125 int max_fd = 0;
126 FD_ZERO(&readfds);
127 FD_ZERO(&writefds);
128 FD_ZERO(&errfds);
129
130 for (n = 0; n < (int)pt->fds_count; n++) {
131 pt->fds[n].revents = 0;
132 if (pt->fds[n].fd >= max_fd)
133 max_fd = pt->fds[n].fd;
134 if (pt->fds[n].events & LWS_POLLIN)
135 FD_SET(pt->fds[n].fd, &readfds);
136 if (pt->fds[n].events & LWS_POLLOUT)
137 FD_SET(pt->fds[n].fd, &writefds);
138 FD_SET(pt->fds[n].fd, &errfds);
139 }
140
141 n = select(max_fd + 1, &readfds, &writefds, &errfds, ptv);
142 n = 0;
143
144 #if defined(LWS_WITH_DETAILED_LATENCY)
145 /*
146 * so we can track how long it took before we actually read a POLLIN
147 * that was signalled when we last exited poll()
148 */
149 if (context->detailed_latency_cb)
150 pt->ust_left_poll = lws_now_usecs();
151 #endif
152
153 for (m = 0; m < (int)pt->fds_count; m++) {
154 c = 0;
155 if (FD_ISSET(pt->fds[m].fd, &readfds)) {
156 pt->fds[m].revents |= LWS_POLLIN;
157 c = 1;
158 }
159 if (FD_ISSET(pt->fds[m].fd, &writefds)) {
160 pt->fds[m].revents |= LWS_POLLOUT;
161 c = 1;
162 }
163 if (FD_ISSET(pt->fds[m].fd, &errfds)) {
164 // lwsl_notice("errfds %d\n", pt->fds[m].fd);
165 pt->fds[m].revents |= LWS_POLLHUP;
166 c = 1;
167 }
168
169 if (c)
170 n++;
171 }
172 }
173
174 m = 0;
175
176 #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
177 m |= !!pt->ws.rx_draining_ext_list;
178 #endif
179
180 if (pt->context->tls_ops &&
181 pt->context->tls_ops->fake_POLLIN_for_buffered)
182 m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
183
184 if (!m && !n)
185 return 0;
186 } else
187 a = 1;
188
189 m = lws_service_flag_pending(context, tsi);
190 if (m)
191 c = -1; /* unknown limit */
192 else
193 if (n < 0) {
194 if (LWS_ERRNO != LWS_EINTR)
195 return -1;
196 return 0;
197 } else
198 c = n;
199
200 /* any socket with events to service? */
201 for (n = 0; n < (int)pt->fds_count && c; n++) {
202 if (!pt->fds[n].revents)
203 continue;
204
205 c--;
206
207 m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
208 if (m < 0)
209 return -1;
210 /* if something closed, retry this slot */
211 if (m)
212 n--;
213 }
214
215 if (a)
216 goto again;
217
218 return 0;
219 }
220