1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 #if defined (_DEBUG)
lwsi_set_role(struct lws * wsi,lws_wsi_state_t role)28 void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role)
29 {
30 wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role;
31
32 lwsl_debug("lwsi_set_role(%p, 0x%lx)\n", wsi,
33 (unsigned long)wsi->wsistate);
34 }
35
lwsi_set_state(struct lws * wsi,lws_wsi_state_t lrs)36 void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs)
37 {
38 wsi->wsistate = (wsi->wsistate & (~LRS_MASK)) | lrs;
39
40 lwsl_debug("lwsi_set_state(%p, 0x%lx)\n", wsi,
41 (unsigned long)wsi->wsistate);
42 }
43 #endif
44
45
46 void
lws_vhost_bind_wsi(struct lws_vhost * vh,struct lws * wsi)47 lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi)
48 {
49 if (wsi->vhost == vh)
50 return;
51 lws_context_lock(vh->context, __func__); /* ---------- context { */
52 wsi->vhost = vh;
53 vh->count_bound_wsi++;
54 lws_context_unlock(vh->context); /* } context ---------- */
55 lwsl_debug("%s: vh %s: wsi %s/%s, count_bound_wsi %d\n", __func__,
56 vh->name, wsi->role_ops ? wsi->role_ops->name : "none",
57 wsi->protocol ? wsi->protocol->name : "none",
58 vh->count_bound_wsi);
59 assert(wsi->vhost->count_bound_wsi > 0);
60 }
61
62 void
lws_vhost_unbind_wsi(struct lws * wsi)63 lws_vhost_unbind_wsi(struct lws *wsi)
64 {
65 if (!wsi->vhost)
66 return;
67
68 lws_context_lock(wsi->context, __func__); /* ---------- context { */
69
70 assert(wsi->vhost->count_bound_wsi > 0);
71 wsi->vhost->count_bound_wsi--;
72 lwsl_debug("%s: vh %s: count_bound_wsi %d\n", __func__,
73 wsi->vhost->name, wsi->vhost->count_bound_wsi);
74
75 if (!wsi->vhost->count_bound_wsi &&
76 wsi->vhost->being_destroyed) {
77 /*
78 * We have closed all wsi that were bound to this vhost
79 * by any pt: nothing can be servicing any wsi belonging
80 * to it any more.
81 *
82 * Finalize the vh destruction
83 */
84 __lws_vhost_destroy2(wsi->vhost);
85 }
86 wsi->vhost = NULL;
87
88 lws_context_unlock(wsi->context); /* } context ---------- */
89 }
90
91 struct lws *
lws_get_network_wsi(struct lws * wsi)92 lws_get_network_wsi(struct lws *wsi)
93 {
94 if (!wsi)
95 return NULL;
96
97 #if defined(LWS_WITH_HTTP2) || defined(LWS_ROLE_MQTT)
98 if (!wsi->mux_substream
99 #if defined(LWS_WITH_CLIENT)
100 && !wsi->client_mux_substream
101 #endif
102 )
103 return wsi;
104
105 while (wsi->mux.parent_wsi)
106 wsi = wsi->mux.parent_wsi;
107 #endif
108
109 return wsi;
110 }
111
112
113 const struct lws_protocols *
lws_vhost_name_to_protocol(struct lws_vhost * vh,const char * name)114 lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name)
115 {
116 int n;
117
118 for (n = 0; n < vh->count_protocols; n++)
119 if (vh->protocols[n].name && !strcmp(name, vh->protocols[n].name))
120 return &vh->protocols[n];
121
122 return NULL;
123 }
124
125 int
lws_callback_all_protocol(struct lws_context * context,const struct lws_protocols * protocol,int reason)126 lws_callback_all_protocol(struct lws_context *context,
127 const struct lws_protocols *protocol, int reason)
128 {
129 struct lws_context_per_thread *pt = &context->pt[0];
130 unsigned int n, m = context->count_threads;
131 struct lws *wsi;
132
133 while (m--) {
134 for (n = 0; n < pt->fds_count; n++) {
135 wsi = wsi_from_fd(context, pt->fds[n].fd);
136 if (!wsi)
137 continue;
138 if (wsi->protocol == protocol)
139 protocol->callback(wsi, reason, wsi->user_space,
140 NULL, 0);
141 }
142 pt++;
143 }
144
145 return 0;
146 }
147
148 int
lws_callback_all_protocol_vhost_args(struct lws_vhost * vh,const struct lws_protocols * protocol,int reason,void * argp,size_t len)149 lws_callback_all_protocol_vhost_args(struct lws_vhost *vh,
150 const struct lws_protocols *protocol, int reason,
151 void *argp, size_t len)
152 {
153 struct lws_context *context = vh->context;
154 struct lws_context_per_thread *pt = &context->pt[0];
155 unsigned int n, m = context->count_threads;
156 struct lws *wsi;
157
158 while (m--) {
159 for (n = 0; n < pt->fds_count; n++) {
160 wsi = wsi_from_fd(context, pt->fds[n].fd);
161 if (!wsi)
162 continue;
163 if (wsi->vhost == vh && (wsi->protocol == protocol ||
164 !protocol))
165 wsi->protocol->callback(wsi, reason,
166 wsi->user_space, argp, len);
167 }
168 pt++;
169 }
170
171 return 0;
172 }
173
174 int
lws_callback_all_protocol_vhost(struct lws_vhost * vh,const struct lws_protocols * protocol,int reason)175 lws_callback_all_protocol_vhost(struct lws_vhost *vh,
176 const struct lws_protocols *protocol, int reason)
177 {
178 return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0);
179 }
180
181 int
lws_callback_vhost_protocols(struct lws * wsi,int reason,void * in,int len)182 lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, int len)
183 {
184 int n;
185
186 for (n = 0; n < wsi->vhost->count_protocols; n++)
187 if (wsi->vhost->protocols[n].callback(wsi, reason, NULL, in, len))
188 return 1;
189
190 return 0;
191 }
192
193 int
lws_callback_vhost_protocols_vhost(struct lws_vhost * vh,int reason,void * in,size_t len)194 lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in,
195 size_t len)
196 {
197 int n;
198 struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi");
199
200 if (!wsi)
201 return 1;
202
203 wsi->context = vh->context;
204 lws_vhost_bind_wsi(vh, wsi);
205
206 for (n = 0; n < wsi->vhost->count_protocols; n++) {
207 wsi->protocol = &vh->protocols[n];
208 if (wsi->protocol->callback(wsi, reason, NULL, in, len)) {
209 lws_free(wsi);
210 return 1;
211 }
212 }
213
214 lws_free(wsi);
215
216 return 0;
217 }
218
219
220 int
lws_rx_flow_control(struct lws * wsi,int _enable)221 lws_rx_flow_control(struct lws *wsi, int _enable)
222 {
223 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
224 int en = _enable;
225
226 // h2 ignores rx flow control atm
227 if (lwsi_role_h2(wsi) || wsi->mux_substream ||
228 lwsi_role_h2_ENCAPSULATION(wsi))
229 return 0; // !!!
230
231 lwsl_info("%s: %p 0x%x\n", __func__, wsi, _enable);
232
233 if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) {
234 /*
235 * convert user bool style to bitmap style... in user simple
236 * bool style _enable = 0 = flow control it, = 1 = allow rx
237 */
238 en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL;
239 if (_enable & 1)
240 en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
241 }
242
243 lws_pt_lock(pt, __func__);
244
245 /* any bit set in rxflow_bitmap DISABLEs rxflow control */
246 if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
247 wsi->rxflow_bitmap &= ~(en & 0xff);
248 else
249 wsi->rxflow_bitmap |= en & 0xff;
250
251 if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
252 wsi->rxflow_change_to)
253 goto skip;
254
255 wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE |
256 (!wsi->rxflow_bitmap);
257
258 lwsl_info("%s: %p: bitmap 0x%x: en 0x%x, ch 0x%x\n", __func__, wsi,
259 wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
260
261 if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
262 !wsi->rxflow_will_be_applied) {
263 en = __lws_rx_flow_control(wsi);
264 lws_pt_unlock(pt);
265
266 return en;
267 }
268
269 skip:
270 lws_pt_unlock(pt);
271
272 return 0;
273 }
274
275 void
lws_rx_flow_allow_all_protocol(const struct lws_context * context,const struct lws_protocols * protocol)276 lws_rx_flow_allow_all_protocol(const struct lws_context *context,
277 const struct lws_protocols *protocol)
278 {
279 const struct lws_context_per_thread *pt = &context->pt[0];
280 struct lws *wsi;
281 unsigned int n, m = context->count_threads;
282
283 while (m--) {
284 for (n = 0; n < pt->fds_count; n++) {
285 wsi = wsi_from_fd(context, pt->fds[n].fd);
286 if (!wsi)
287 continue;
288 if (wsi->protocol == protocol)
289 lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
290 }
291 pt++;
292 }
293 }
294
user_callback_handle_rxflow(lws_callback_function callback_function,struct lws * wsi,enum lws_callback_reasons reason,void * user,void * in,size_t len)295 int user_callback_handle_rxflow(lws_callback_function callback_function,
296 struct lws *wsi,
297 enum lws_callback_reasons reason, void *user,
298 void *in, size_t len)
299 {
300 int n;
301
302 wsi->rxflow_will_be_applied = 1;
303 n = callback_function(wsi, reason, user, in, len);
304 wsi->rxflow_will_be_applied = 0;
305 if (!n)
306 n = __lws_rx_flow_control(wsi);
307
308 return n;
309 }
310
311 int
__lws_rx_flow_control(struct lws * wsi)312 __lws_rx_flow_control(struct lws *wsi)
313 {
314 struct lws *wsic = wsi->child_list;
315
316 // h2 ignores rx flow control atm
317 if (lwsi_role_h2(wsi) || wsi->mux_substream ||
318 lwsi_role_h2_ENCAPSULATION(wsi))
319 return 0; // !!!
320
321 /* if he has children, do those if they were changed */
322 while (wsic) {
323 if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
324 __lws_rx_flow_control(wsic);
325
326 wsic = wsic->sibling_list;
327 }
328
329 /* there is no pending change */
330 if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE))
331 return 0;
332
333 /* stuff is still buffered, not ready to really accept new input */
334 if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
335 /* get ourselves called back to deal with stashed buffer */
336 lws_callback_on_writable(wsi);
337 // return 0;
338 }
339
340 /* now the pending is cleared, we can change rxflow state */
341
342 wsi->rxflow_change_to &= ~LWS_RXFLOW_PENDING_CHANGE;
343
344 lwsl_info("rxflow: wsi %p change_to %d\n", wsi,
345 wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
346
347 /* adjust the pollfd for this wsi */
348
349 if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
350 lwsl_info("%s: reenable POLLIN\n", __func__);
351 // lws_buflist_describe(&wsi->buflist, NULL, __func__);
352 if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
353 lwsl_info("%s: fail\n", __func__);
354 return -1;
355 }
356 } else
357 if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
358 return -1;
359
360 return 0;
361 }
362
363
364 const struct lws_protocols *
lws_get_protocol(struct lws * wsi)365 lws_get_protocol(struct lws *wsi)
366 {
367 return wsi->protocol;
368 }
369
370
371 int
lws_ensure_user_space(struct lws * wsi)372 lws_ensure_user_space(struct lws *wsi)
373 {
374 if (!wsi->protocol)
375 return 0;
376
377 /* allocate the per-connection user memory (if any) */
378
379 if (wsi->protocol->per_session_data_size && !wsi->user_space) {
380 wsi->user_space = lws_zalloc(
381 wsi->protocol->per_session_data_size, "user space");
382 if (wsi->user_space == NULL) {
383 lwsl_err("%s: OOM\n", __func__);
384 return 1;
385 }
386 } else
387 lwsl_debug("%s: %p protocol pss %lu, user_space=%p\n", __func__,
388 wsi, (long)wsi->protocol->per_session_data_size,
389 wsi->user_space);
390 return 0;
391 }
392
393 void *
lws_adjust_protocol_psds(struct lws * wsi,size_t new_size)394 lws_adjust_protocol_psds(struct lws *wsi, size_t new_size)
395 {
396 ((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size =
397 new_size;
398
399 if (lws_ensure_user_space(wsi))
400 return NULL;
401
402 return wsi->user_space;
403 }
404
405 int
lws_get_tsi(struct lws * wsi)406 lws_get_tsi(struct lws *wsi)
407 {
408 return (int)wsi->tsi;
409 }
410
411 int
lws_is_ssl(struct lws * wsi)412 lws_is_ssl(struct lws *wsi)
413 {
414 #if defined(LWS_WITH_TLS)
415 return wsi->tls.use_ssl & LCCSCF_USE_SSL;
416 #else
417 (void)wsi;
418 return 0;
419 #endif
420 }
421
422 #if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS)
423 lws_tls_conn*
lws_get_ssl(struct lws * wsi)424 lws_get_ssl(struct lws *wsi)
425 {
426 return wsi->tls.ssl;
427 }
428 #endif
429
430 int
lws_partial_buffered(struct lws * wsi)431 lws_partial_buffered(struct lws *wsi)
432 {
433 return lws_has_buffered_out(wsi);
434 }
435
436 lws_fileofs_t
lws_get_peer_write_allowance(struct lws * wsi)437 lws_get_peer_write_allowance(struct lws *wsi)
438 {
439 if (!wsi->role_ops->tx_credit)
440 return -1;
441 return wsi->role_ops->tx_credit(wsi, LWSTXCR_US_TO_PEER, 0);
442 }
443
444 void
lws_role_transition(struct lws * wsi,enum lwsi_role role,enum lwsi_state state,const struct lws_role_ops * ops)445 lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
446 const struct lws_role_ops *ops)
447 {
448 #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
449 const char *name = "(unset)";
450 #endif
451 wsi->wsistate = role | state;
452 if (ops)
453 wsi->role_ops = ops;
454 #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
455 if (wsi->role_ops)
456 name = wsi->role_ops->name;
457 lwsl_debug("%s: %p: wsistate 0x%lx, ops %s\n", __func__, wsi,
458 (unsigned long)wsi->wsistate, name);
459 #endif
460 }
461
462 int
lws_parse_uri(char * p,const char ** prot,const char ** ads,int * port,const char ** path)463 lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
464 const char **path)
465 {
466 const char *end;
467 char unix_skt = 0;
468
469 /* cut up the location into address, port and path */
470 *prot = p;
471 while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
472 p++;
473 if (!*p) {
474 end = p;
475 p = (char *)*prot;
476 *prot = end;
477 } else {
478 *p = '\0';
479 p += 3;
480 }
481 if (*p == '+') /* unix skt */
482 unix_skt = 1;
483
484 *ads = p;
485 if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
486 *port = 80;
487 else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
488 *port = 443;
489
490 if (*p == '[') {
491 ++(*ads);
492 while (*p && *p != ']')
493 p++;
494 if (*p)
495 *p++ = '\0';
496 } else
497 while (*p && *p != ':' && (unix_skt || *p != '/'))
498 p++;
499
500 if (*p == ':') {
501 *p++ = '\0';
502 *port = atoi(p);
503 while (*p && *p != '/')
504 p++;
505 }
506 *path = "/";
507 if (*p) {
508 *p++ = '\0';
509 if (*p)
510 *path = p;
511 }
512
513 return 0;
514 }
515
516 /* ... */
517
518 const char *
lws_get_urlarg_by_name(struct lws * wsi,const char * name,char * buf,int len)519 lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len)
520 {
521 int n = 0, sl = (int)strlen(name);
522
523 while (lws_hdr_copy_fragment(wsi, buf, len,
524 WSI_TOKEN_HTTP_URI_ARGS, n) >= 0) {
525
526 if (!strncmp(buf, name, sl))
527 return buf + sl;
528
529 n++;
530 }
531
532 return NULL;
533 }
534
535
536 #if defined(LWS_WITHOUT_EXTENSIONS)
537
538 /* we need to provide dummy callbacks for internal exts
539 * so user code runs when faced with a lib compiled with
540 * extensions disabled.
541 */
542
543 int
lws_extension_callback_pm_deflate(struct lws_context * context,const struct lws_extension * ext,struct lws * wsi,enum lws_extension_callback_reasons reason,void * user,void * in,size_t len)544 lws_extension_callback_pm_deflate(struct lws_context *context,
545 const struct lws_extension *ext,
546 struct lws *wsi,
547 enum lws_extension_callback_reasons reason,
548 void *user, void *in, size_t len)
549 {
550 (void)context;
551 (void)ext;
552 (void)wsi;
553 (void)reason;
554 (void)user;
555 (void)in;
556 (void)len;
557
558 return 0;
559 }
560
561 int
lws_set_extension_option(struct lws * wsi,const char * ext_name,const char * opt_name,const char * opt_val)562 lws_set_extension_option(struct lws *wsi, const char *ext_name,
563 const char *opt_name, const char *opt_val)
564 {
565 return -1;
566 }
567 #endif
568
569 int
lws_is_cgi(struct lws * wsi)570 lws_is_cgi(struct lws *wsi) {
571 #ifdef LWS_WITH_CGI
572 return !!wsi->http.cgi;
573 #else
574 return 0;
575 #endif
576 }
577
578 const struct lws_protocol_vhost_options *
lws_pvo_search(const struct lws_protocol_vhost_options * pvo,const char * name)579 lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name)
580 {
581 while (pvo) {
582 if (!strcmp(pvo->name, name))
583 break;
584
585 pvo = pvo->next;
586 }
587
588 return pvo;
589 }
590
591 int
lws_pvo_get_str(void * in,const char * name,const char ** result)592 lws_pvo_get_str(void *in, const char *name, const char **result)
593 {
594 const struct lws_protocol_vhost_options *pv =
595 lws_pvo_search((const struct lws_protocol_vhost_options *)in,
596 name);
597
598 if (!pv)
599 return 1;
600
601 *result = (const char *)pv->value;
602
603 return 0;
604 }
605
606 int
lws_broadcast(struct lws_context_per_thread * pt,int reason,void * in,size_t len)607 lws_broadcast(struct lws_context_per_thread *pt, int reason, void *in, size_t len)
608 {
609 struct lws_vhost *v = pt->context->vhost_list;
610 int n, ret = 0;
611
612 pt->fake_wsi->context = pt->context;
613
614 while (v) {
615 const struct lws_protocols *p = v->protocols;
616 pt->fake_wsi->vhost = v; /* not a real bound wsi */
617
618 for (n = 0; n < v->count_protocols; n++) {
619 pt->fake_wsi->protocol = p;
620 if (p->callback &&
621 p->callback(pt->fake_wsi, reason, NULL, in, len))
622 ret |= 1;
623 p++;
624 }
625 v = v->vhost_next;
626 }
627
628 return ret;
629 }
630
631 void *
lws_wsi_user(struct lws * wsi)632 lws_wsi_user(struct lws *wsi)
633 {
634 return wsi->user_space;
635 }
636
637 void
lws_set_wsi_user(struct lws * wsi,void * data)638 lws_set_wsi_user(struct lws *wsi, void *data)
639 {
640 if (!wsi->user_space_externally_allocated && wsi->user_space)
641 lws_free(wsi->user_space);
642
643 wsi->user_space_externally_allocated = 1;
644 wsi->user_space = data;
645 }
646
647 struct lws *
lws_get_parent(const struct lws * wsi)648 lws_get_parent(const struct lws *wsi)
649 {
650 return wsi->parent;
651 }
652
653 struct lws *
lws_get_child(const struct lws * wsi)654 lws_get_child(const struct lws *wsi)
655 {
656 return wsi->child_list;
657 }
658
659 void *
lws_get_opaque_parent_data(const struct lws * wsi)660 lws_get_opaque_parent_data(const struct lws *wsi)
661 {
662 return wsi->opaque_parent_data;
663 }
664
665 void
lws_set_opaque_parent_data(struct lws * wsi,void * data)666 lws_set_opaque_parent_data(struct lws *wsi, void *data)
667 {
668 wsi->opaque_parent_data = data;
669 }
670
671 void *
lws_get_opaque_user_data(const struct lws * wsi)672 lws_get_opaque_user_data(const struct lws *wsi)
673 {
674 return wsi->opaque_user_data;
675 }
676
677 void
lws_set_opaque_user_data(struct lws * wsi,void * data)678 lws_set_opaque_user_data(struct lws *wsi, void *data)
679 {
680 wsi->opaque_user_data = data;
681 }
682
683 int
lws_get_child_pending_on_writable(const struct lws * wsi)684 lws_get_child_pending_on_writable(const struct lws *wsi)
685 {
686 return wsi->parent_pending_cb_on_writable;
687 }
688
689 void
lws_clear_child_pending_on_writable(struct lws * wsi)690 lws_clear_child_pending_on_writable(struct lws *wsi)
691 {
692 wsi->parent_pending_cb_on_writable = 0;
693 }
694
695
696
697 const char *
lws_get_vhost_name(struct lws_vhost * vhost)698 lws_get_vhost_name(struct lws_vhost *vhost)
699 {
700 return vhost->name;
701 }
702
703 int
lws_get_vhost_port(struct lws_vhost * vhost)704 lws_get_vhost_port(struct lws_vhost *vhost)
705 {
706 return vhost->listen_port;
707 }
708
709 void *
lws_get_vhost_user(struct lws_vhost * vhost)710 lws_get_vhost_user(struct lws_vhost *vhost)
711 {
712 return vhost->user;
713 }
714
715 const char *
lws_get_vhost_iface(struct lws_vhost * vhost)716 lws_get_vhost_iface(struct lws_vhost *vhost)
717 {
718 return vhost->iface;
719 }
720
721 lws_sockfd_type
lws_get_socket_fd(struct lws * wsi)722 lws_get_socket_fd(struct lws *wsi)
723 {
724 if (!wsi)
725 return -1;
726 return wsi->desc.sockfd;
727 }
728
729
730 struct lws_vhost *
lws_vhost_get(struct lws * wsi)731 lws_vhost_get(struct lws *wsi)
732 {
733 return wsi->vhost;
734 }
735
736 struct lws_vhost *
lws_get_vhost(struct lws * wsi)737 lws_get_vhost(struct lws *wsi)
738 {
739 return wsi->vhost;
740 }
741
742 const struct lws_protocols *
lws_protocol_get(struct lws * wsi)743 lws_protocol_get(struct lws *wsi)
744 {
745 return wsi->protocol;
746 }
747
748 #if defined(LWS_WITH_UDP)
749 const struct lws_udp *
lws_get_udp(const struct lws * wsi)750 lws_get_udp(const struct lws *wsi)
751 {
752 return wsi->udp;
753 }
754 #endif
755
756 struct lws_context *
lws_get_context(const struct lws * wsi)757 lws_get_context(const struct lws *wsi)
758 {
759 return wsi->context;
760 }
761
762 #if defined(LWS_WITH_CLIENT)
763 int
_lws_generic_transaction_completed_active_conn(struct lws ** _wsi)764 _lws_generic_transaction_completed_active_conn(struct lws **_wsi)
765 {
766 struct lws *wnew, *wsi = *_wsi;
767
768 /*
769 * Are we constitutionally capable of having a queue, ie, we are on
770 * the "active client connections" list?
771 *
772 * If not, that's it for us.
773 */
774
775 if (lws_dll2_is_detached(&wsi->dll_cli_active_conns))
776 return 0; /* no new transaction */
777
778 /*
779 * With h1 queuing, the original "active client" moves his attributes
780 * like fd, ssl, queue and active client list entry to the next guy in
781 * the queue before closing... it's because the user code knows the
782 * individual wsi and the action must take place in the correct wsi
783 * context. Note this means we don't truly pipeline headers.
784 *
785 * Trying to keep the original "active client" in place to do the work
786 * of the wsi breaks down when dealing with queued POSTs otherwise; it's
787 * also competing with the real mux child arrangements and complicating
788 * the code.
789 *
790 * For that reason, see if we have any queued child now...
791 */
792
793 if (!wsi->dll2_cli_txn_queue_owner.head) {
794 /*
795 * Nothing pipelined... we should hang around a bit
796 * in case something turns up... otherwise we'll close
797 */
798 lwsl_info("%s: nothing pipelined waiting\n", __func__);
799 lwsi_set_state(wsi, LRS_IDLING);
800
801 lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE, 5);
802
803 return 0; /* no new transaction right now */
804 }
805
806 /*
807 * We have a queued child wsi we should bequeath our assets to, before
808 * closing ourself
809 */
810
811 lws_vhost_lock(wsi->vhost);
812
813 wnew = lws_container_of(wsi->dll2_cli_txn_queue_owner.head, struct lws,
814 dll2_cli_txn_queue);
815
816 assert(wsi != wnew);
817
818 lws_dll2_remove(&wnew->dll2_cli_txn_queue);
819
820 assert(lws_socket_is_valid(wsi->desc.sockfd));
821
822 /* copy the fd */
823 wnew->desc = wsi->desc;
824
825 assert(lws_socket_is_valid(wnew->desc.sockfd));
826
827 /* disconnect the fd from association with old wsi */
828
829 if (__remove_wsi_socket_from_fds(wsi))
830 return -1;
831 wsi->desc.sockfd = LWS_SOCK_INVALID;
832
833 /* point the fd table entry to new guy */
834
835 assert(lws_socket_is_valid(wnew->desc.sockfd));
836
837 if (__insert_wsi_socket_into_fds(wsi->context, wnew))
838 return -1;
839
840 #if defined(LWS_WITH_TLS)
841 /* pass on the tls */
842
843 wnew->tls = wsi->tls;
844 wsi->tls.client_bio = NULL;
845 wsi->tls.ssl = NULL;
846 wsi->tls.use_ssl = 0;
847 #endif
848
849 /* take over his copy of his endpoint as an active connection */
850
851 wnew->cli_hostname_copy = wsi->cli_hostname_copy;
852 wsi->cli_hostname_copy = NULL;
853
854
855 /*
856 * selected queued guy now replaces the original leader on the
857 * active client conn list
858 */
859
860 lws_dll2_remove(&wsi->dll_cli_active_conns);
861 lws_dll2_add_tail(&wnew->dll_cli_active_conns,
862 &wsi->vhost->dll_cli_active_conns_owner);
863
864 /* move any queued guys to queue on new active conn */
865
866 lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
867 wsi->dll2_cli_txn_queue_owner.head) {
868 struct lws *ww = lws_container_of(d, struct lws,
869 dll2_cli_txn_queue);
870
871 lws_dll2_remove(&ww->dll2_cli_txn_queue);
872 lws_dll2_add_tail(&ww->dll2_cli_txn_queue,
873 &wnew->dll2_cli_txn_queue_owner);
874
875 } lws_end_foreach_dll_safe(d, d1);
876
877 lws_vhost_unlock(wsi->vhost);
878
879 /*
880 * The original leader who passed on all his powers already can die...
881 * in the call stack above us there are guys who still want to touch
882 * him, so have him die next time around the event loop, not now.
883 */
884
885 wsi->already_did_cce = 1; /* so the close doesn't trigger a CCE */
886 lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC);
887
888 /* after the first one, they can only be coming from the queue */
889 wnew->transaction_from_pipeline_queue = 1;
890
891 lwsl_notice("%s: pipeline queue passed wsi %p on to queued wsi %p\n",
892 __func__, wsi, wnew);
893
894 *_wsi = wnew; /* inform caller we swapped */
895
896 return 1; /* new transaction */
897 }
898 #endif
899
900 int LWS_WARN_UNUSED_RESULT
lws_raw_transaction_completed(struct lws * wsi)901 lws_raw_transaction_completed(struct lws *wsi)
902 {
903 if (lws_has_buffered_out(wsi)) {
904 /*
905 * ...so he tried to send something large, but it went out
906 * as a partial, but he immediately called us to say he wants
907 * to close the connection.
908 *
909 * Defer the close until the last part of the partial is sent.
910 *
911 */
912 lwsl_debug("%s: %p: deferring due to partial\n", __func__, wsi);
913 wsi->close_when_buffered_out_drained = 1;
914 lws_callback_on_writable(wsi);
915
916 return 0;
917 }
918
919 return -1;
920 }
921
922 int
lws_bind_protocol(struct lws * wsi,const struct lws_protocols * p,const char * reason)923 lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p,
924 const char *reason)
925 {
926 // if (wsi->protocol == p)
927 // return 0;
928 const struct lws_protocols *vp = wsi->vhost->protocols, *vpo;
929
930 if (wsi->protocol && wsi->protocol_bind_balance) {
931 wsi->protocol->callback(wsi,
932 wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)],
933 wsi->user_space, (void *)reason, 0);
934 wsi->protocol_bind_balance = 0;
935 }
936 if (!wsi->user_space_externally_allocated)
937 lws_free_set_NULL(wsi->user_space);
938
939 lws_same_vh_protocol_remove(wsi);
940
941 wsi->protocol = p;
942 if (!p)
943 return 0;
944
945 if (lws_ensure_user_space(wsi))
946 return 1;
947
948 if (p > vp && p < &vp[wsi->vhost->count_protocols])
949 lws_same_vh_protocol_insert(wsi, (int)(p - vp));
950 else {
951 int n = wsi->vhost->count_protocols;
952 int hit = 0;
953
954 vpo = vp;
955
956 while (n--) {
957 if (p->name && vp->name && !strcmp(p->name, vp->name)) {
958 hit = 1;
959 lws_same_vh_protocol_insert(wsi, (int)(vp - vpo));
960 break;
961 }
962 vp++;
963 }
964 if (!hit)
965 lwsl_err("%s: %p is not in vhost '%s' protocols list\n",
966 __func__, p, wsi->vhost->name);
967 }
968
969 if (wsi->protocol->callback(wsi, wsi->role_ops->protocol_bind_cb[
970 !!lwsi_role_server(wsi)],
971 wsi->user_space, NULL, 0))
972 return 1;
973
974 wsi->protocol_bind_balance = 1;
975
976 return 0;
977 }
978
979 void
lws_http_close_immortal(struct lws * wsi)980 lws_http_close_immortal(struct lws *wsi)
981 {
982 struct lws *nwsi;
983
984 if (!wsi->mux_substream)
985 return;
986
987 assert(wsi->mux_stream_immortal);
988 wsi->mux_stream_immortal = 0;
989
990 nwsi = lws_get_network_wsi(wsi);
991 lwsl_debug("%s: %p %p %d\n", __func__, wsi, nwsi,
992 nwsi->immortal_substream_count);
993 assert(nwsi->immortal_substream_count);
994 nwsi->immortal_substream_count--;
995 if (!nwsi->immortal_substream_count)
996 /*
997 * since we closed the only immortal stream on this nwsi, we
998 * need to reapply a normal timeout regime to the nwsi
999 */
1000 lws_set_timeout(nwsi, PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE,
1001 wsi->vhost->keepalive_timeout ?
1002 wsi->vhost->keepalive_timeout : 31);
1003 }
1004
1005 void
lws_mux_mark_immortal(struct lws * wsi)1006 lws_mux_mark_immortal(struct lws *wsi)
1007 {
1008 struct lws *nwsi;
1009
1010 lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
1011
1012 if (!wsi->mux_substream
1013 #if defined(LWS_WITH_CLIENT)
1014 && !wsi->client_mux_substream
1015 #endif
1016 ) {
1017 lwsl_err("%s: not h2 substream\n", __func__);
1018 return;
1019 }
1020
1021 nwsi = lws_get_network_wsi(wsi);
1022
1023 lwsl_debug("%s: %p %p %d\n", __func__, wsi, nwsi,
1024 nwsi->immortal_substream_count);
1025
1026 wsi->mux_stream_immortal = 1;
1027 assert(nwsi->immortal_substream_count < 255); /* largest count */
1028 nwsi->immortal_substream_count++;
1029 if (nwsi->immortal_substream_count == 1)
1030 lws_set_timeout(nwsi, NO_PENDING_TIMEOUT, 0);
1031 }
1032
1033
1034 int
lws_http_mark_sse(struct lws * wsi)1035 lws_http_mark_sse(struct lws *wsi)
1036 {
1037 lws_http_headers_detach(wsi);
1038 lws_mux_mark_immortal(wsi);
1039
1040 if (wsi->mux_substream)
1041 wsi->h2_stream_carries_sse = 1;
1042
1043 return 0;
1044 }
1045
1046 #if defined(LWS_WITH_CLIENT)
1047
1048 const char *
lws_wsi_client_stash_item(struct lws * wsi,int stash_idx,int hdr_idx)1049 lws_wsi_client_stash_item(struct lws *wsi, int stash_idx, int hdr_idx)
1050 {
1051 /* try the generic client stash */
1052 if (wsi->stash)
1053 return wsi->stash->cis[stash_idx];
1054
1055 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
1056 /* if not, use the ah stash if applicable */
1057 return lws_hdr_simple_ptr(wsi, hdr_idx);
1058 #else
1059 return NULL;
1060 #endif
1061 }
1062 #endif
1063
1064 #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT)
1065
1066 void
lws_wsi_mux_insert(struct lws * wsi,struct lws * parent_wsi,int sid)1067 lws_wsi_mux_insert(struct lws *wsi, struct lws *parent_wsi, int sid)
1068 {
1069 lwsl_info("%s: wsi %p, par %p: assign sid %d (curr %d)\n", __func__,
1070 wsi, parent_wsi, sid, wsi->mux.my_sid);
1071
1072 if (wsi->mux.my_sid && wsi->mux.my_sid != (unsigned int)sid)
1073 assert(0);
1074
1075 wsi->mux.my_sid = sid;
1076 wsi->mux.parent_wsi = parent_wsi;
1077 wsi->role_ops = parent_wsi->role_ops;
1078
1079 /* new guy's sibling is whoever was the first child before */
1080 wsi->mux.sibling_list = parent_wsi->mux.child_list;
1081
1082 /* first child is now the new guy */
1083 parent_wsi->mux.child_list = wsi;
1084
1085 parent_wsi->mux.child_count++;
1086 }
1087
1088 struct lws *
lws_wsi_mux_from_id(struct lws * parent_wsi,unsigned int sid)1089 lws_wsi_mux_from_id(struct lws *parent_wsi, unsigned int sid)
1090 {
1091 lws_start_foreach_ll(struct lws *, wsi, parent_wsi->mux.child_list) {
1092 if (wsi->mux.my_sid == sid)
1093 return wsi;
1094 } lws_end_foreach_ll(wsi, mux.sibling_list);
1095
1096 return NULL;
1097 }
1098
1099 void
lws_wsi_mux_dump_children(struct lws * wsi)1100 lws_wsi_mux_dump_children(struct lws *wsi)
1101 {
1102 #if defined(_DEBUG)
1103 if (!wsi->mux.parent_wsi || !lwsl_visible(LLL_INFO))
1104 return;
1105
1106 lws_start_foreach_llp(struct lws **, w,
1107 wsi->mux.parent_wsi->mux.child_list) {
1108 lwsl_info(" \\---- child %s %p\n",
1109 (*w)->role_ops ? (*w)->role_ops->name : "?", *w);
1110 assert(*w != (*w)->mux.sibling_list);
1111 } lws_end_foreach_llp(w, mux.sibling_list);
1112 #endif
1113 }
1114
1115 void
lws_wsi_mux_close_children(struct lws * wsi,int reason)1116 lws_wsi_mux_close_children(struct lws *wsi, int reason)
1117 {
1118 struct lws *wsi2;
1119
1120 if (!wsi->mux.child_list)
1121 return;
1122
1123 lws_start_foreach_llp(struct lws **, w, wsi->mux.child_list) {
1124 lwsl_info(" closing child %p\n", *w);
1125 /* disconnect from siblings */
1126 wsi2 = (*w)->mux.sibling_list;
1127 assert (wsi2 != *w);
1128 (*w)->mux.sibling_list = NULL;
1129 (*w)->socket_is_permanently_unusable = 1;
1130 __lws_close_free_wsi(*w, reason, "mux child recurse");
1131 *w = wsi2;
1132 continue;
1133 } lws_end_foreach_llp(w, mux.sibling_list);
1134 }
1135
1136
1137 void
lws_wsi_mux_sibling_disconnect(struct lws * wsi)1138 lws_wsi_mux_sibling_disconnect(struct lws *wsi)
1139 {
1140 struct lws *wsi2;
1141
1142 lws_start_foreach_llp(struct lws **, w,
1143 wsi->mux.parent_wsi->mux.child_list) {
1144
1145 /* disconnect from siblings */
1146 if (*w == wsi) {
1147 wsi2 = (*w)->mux.sibling_list;
1148 (*w)->mux.sibling_list = NULL;
1149 *w = wsi2;
1150 lwsl_debug(" %p disentangled from sibling %p\n",
1151 wsi, wsi2);
1152 break;
1153 }
1154 } lws_end_foreach_llp(w, mux.sibling_list);
1155 wsi->mux.parent_wsi->mux.child_count--;
1156
1157 wsi->mux.parent_wsi = NULL;
1158 }
1159
1160 void
lws_wsi_mux_dump_waiting_children(struct lws * wsi)1161 lws_wsi_mux_dump_waiting_children(struct lws *wsi)
1162 {
1163 #if defined(_DEBUG)
1164 lwsl_info("%s: %p: children waiting for POLLOUT service:\n",
1165 __func__, wsi);
1166
1167 wsi = wsi->mux.child_list;
1168 while (wsi) {
1169 lwsl_info(" %c %p: sid %u: 0x%x %s %s\n",
1170 wsi->mux.requested_POLLOUT ? '*' : ' ',
1171 wsi, wsi->mux.my_sid, lwsi_state(wsi),
1172 wsi->role_ops->name,
1173 wsi->protocol ? wsi->protocol->name : "noprotocol");
1174
1175 wsi = wsi->mux.sibling_list;
1176 }
1177 #endif
1178 }
1179
1180 int
lws_wsi_mux_mark_parents_needing_writeable(struct lws * wsi)1181 lws_wsi_mux_mark_parents_needing_writeable(struct lws *wsi)
1182 {
1183 struct lws /* *network_wsi = lws_get_network_wsi(wsi), */ *wsi2;
1184 //int already = network_wsi->mux.requested_POLLOUT;
1185
1186 /* mark everybody above him as requesting pollout */
1187
1188 wsi2 = wsi;
1189 while (wsi2) {
1190 wsi2->mux.requested_POLLOUT = 1;
1191 lwsl_info("%s: mark wsi: %p, sid %u, pending writable\n",
1192 __func__, wsi2, wsi2->mux.my_sid);
1193 wsi2 = wsi2->mux.parent_wsi;
1194 }
1195
1196 return 0; // already;
1197 }
1198
1199 struct lws *
lws_wsi_mux_move_child_to_tail(struct lws ** wsi2)1200 lws_wsi_mux_move_child_to_tail(struct lws **wsi2)
1201 {
1202 struct lws *w = *wsi2;
1203
1204 while (w) {
1205 if (!w->mux.sibling_list) { /* w is the current last */
1206 lwsl_debug("w=%p, *wsi2 = %p\n", w, *wsi2);
1207
1208 if (w == *wsi2) /* we are already last */
1209 break;
1210
1211 /* last points to us as new last */
1212 w->mux.sibling_list = *wsi2;
1213
1214 /* guy pointing to us until now points to
1215 * our old next */
1216 *wsi2 = (*wsi2)->mux.sibling_list;
1217
1218 /* we point to nothing because we are last */
1219 w->mux.sibling_list->mux.sibling_list = NULL;
1220
1221 /* w becomes us */
1222 w = w->mux.sibling_list;
1223 break;
1224 }
1225 w = w->mux.sibling_list;
1226 }
1227
1228 /* clear the waiting for POLLOUT on the guy that was chosen */
1229
1230 if (w)
1231 w->mux.requested_POLLOUT = 0;
1232
1233 return w;
1234 }
1235
1236 int
lws_wsi_mux_action_pending_writeable_reqs(struct lws * wsi)1237 lws_wsi_mux_action_pending_writeable_reqs(struct lws *wsi)
1238 {
1239 struct lws *w = wsi->mux.child_list;
1240
1241 while (w) {
1242 if (w->mux.requested_POLLOUT) {
1243 if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
1244 return -1;
1245 return 0;
1246 }
1247 w = w->mux.sibling_list;
1248 }
1249
1250 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0))
1251 return -1;
1252
1253 return 0;
1254 }
1255
1256 int
lws_wsi_txc_check_skint(struct lws_tx_credit * txc,int32_t tx_cr)1257 lws_wsi_txc_check_skint(struct lws_tx_credit *txc, int32_t tx_cr)
1258 {
1259 if (txc->tx_cr <= 0) {
1260 /*
1261 * If other side is not able to cope with us sending any DATA
1262 * so no matter if we have POLLOUT on our side if it's DATA we
1263 * want to send.
1264 */
1265
1266 if (!txc->skint)
1267 lwsl_info("%s: %p: skint (%d)\n", __func__, txc,
1268 (int)txc->tx_cr);
1269
1270 txc->skint = 1;
1271
1272 return 1;
1273 }
1274
1275 if (txc->skint)
1276 lwsl_info("%s: %p: unskint (%d)\n", __func__, txc,
1277 (int)txc->tx_cr);
1278
1279 txc->skint = 0;
1280
1281 return 0;
1282 }
1283
1284 #if defined(_DEBUG)
1285 void
lws_wsi_txc_describe(struct lws_tx_credit * txc,const char * at,uint32_t sid)1286 lws_wsi_txc_describe(struct lws_tx_credit *txc, const char *at, uint32_t sid)
1287 {
1288 lwsl_info("%s: %p: %s: sid %d: %speer-to-us: %d, us-to-peer: %d\n",
1289 __func__, txc, at, (int)sid, txc->skint ? "SKINT, " : "",
1290 (int)txc->peer_tx_cr_est, (int)txc->tx_cr);
1291 }
1292 #endif
1293
1294 int
lws_wsi_tx_credit(struct lws * wsi,char peer_to_us,int add)1295 lws_wsi_tx_credit(struct lws *wsi, char peer_to_us, int add)
1296 {
1297 if (wsi->role_ops && wsi->role_ops->tx_credit)
1298 return wsi->role_ops->tx_credit(wsi, peer_to_us, add);
1299
1300 return 0;
1301 }
1302
1303 /*
1304 * Let the protocol know about incoming tx credit window updates if it's
1305 * managing the flow control manually (it may want to proxy this information)
1306 */
1307
1308 int
lws_wsi_txc_report_manual_txcr_in(struct lws * wsi,int32_t bump)1309 lws_wsi_txc_report_manual_txcr_in(struct lws *wsi, int32_t bump)
1310 {
1311 if (!wsi->txc.manual)
1312 /*
1313 * If we don't care about managing it manually, no need to
1314 * report it
1315 */
1316 return 0;
1317
1318 return user_callback_handle_rxflow(wsi->protocol->callback,
1319 wsi, LWS_CALLBACK_WSI_TX_CREDIT_GET,
1320 wsi->user_space, NULL, (size_t)bump);
1321 }
1322
1323 #if defined(LWS_WITH_CLIENT)
1324
1325 int
lws_wsi_mux_apply_queue(struct lws * wsi)1326 lws_wsi_mux_apply_queue(struct lws *wsi)
1327 {
1328 /* we have a transaction queue that wants to pipeline */
1329
1330 lws_vhost_lock(wsi->vhost);
1331
1332 lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
1333 wsi->dll2_cli_txn_queue_owner.head) {
1334 struct lws *w = lws_container_of(d, struct lws,
1335 dll2_cli_txn_queue);
1336
1337 #if defined(LWS_ROLE_H2)
1338 if (lwsi_role_http(wsi) &&
1339 lwsi_state(w) == LRS_H2_WAITING_TO_SEND_HEADERS) {
1340 lwsl_info("%s: cli pipeq %p to be h2\n", __func__, w);
1341
1342 lwsi_set_state(w, LRS_H1C_ISSUE_HANDSHAKE2);
1343
1344 /* remove ourselves from client queue */
1345 lws_dll2_remove(&w->dll2_cli_txn_queue);
1346
1347 /* attach ourselves as an h2 stream */
1348 lws_wsi_h2_adopt(wsi, w);
1349 }
1350 #endif
1351
1352 #if defined(LWS_ROLE_MQTT)
1353 if (lwsi_role_mqtt(wsi) &&
1354 lwsi_state(wsi) == LRS_ESTABLISHED) {
1355 lwsl_info("%s: cli pipeq %p to be mqtt\n", __func__, w);
1356
1357 /* remove ourselves from client queue */
1358 lws_dll2_remove(&w->dll2_cli_txn_queue);
1359
1360 /* attach ourselves as an h2 stream */
1361 lws_wsi_mqtt_adopt(wsi, w);
1362 }
1363 #endif
1364
1365 } lws_end_foreach_dll_safe(d, d1);
1366
1367 lws_vhost_unlock(wsi->vhost);
1368
1369 return 0;
1370 }
1371
1372 #endif
1373
1374 #endif
1375