1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 const char *
lws_wsi_tag(struct lws * wsi)28 lws_wsi_tag(struct lws *wsi)
29 {
30 if (!wsi)
31 return "[null wsi]";
32 return lws_lc_tag(&wsi->lc);
33 }
34
35 #if defined (_DEBUG)
lwsi_set_role(struct lws * wsi,lws_wsi_state_t role)36 void lwsi_set_role(struct lws *wsi, lws_wsi_state_t role)
37 {
38 wsi->wsistate = (wsi->wsistate & (~LWSI_ROLE_MASK)) | role;
39
40 lwsl_wsi_debug(wsi, "state 0x%lx", (unsigned long)wsi->wsistate);
41 }
42
lwsi_set_state(struct lws * wsi,lws_wsi_state_t lrs)43 void lwsi_set_state(struct lws *wsi, lws_wsi_state_t lrs)
44 {
45 lws_wsi_state_t old = wsi->wsistate;
46
47 wsi->wsistate = (old & (unsigned int)(~LRS_MASK)) | lrs;
48
49 lwsl_wsi_debug(wsi, "lwsi_set_state 0x%lx -> 0x%lx",
50 (unsigned long)old, (unsigned long)wsi->wsistate);
51 }
52 #endif
53
54
55 void
lws_log_prepend_wsi(struct lws_log_cx * cx,void * obj,char ** p,char * e)56 lws_log_prepend_wsi(struct lws_log_cx *cx, void *obj, char **p, char *e)
57 {
58 struct lws *wsi = (struct lws *)obj;
59
60 *p += lws_snprintf(*p, lws_ptr_diff_size_t(e, (*p)), "%s: ",
61 lws_wsi_tag(wsi));
62 }
63
64 void
lws_vhost_bind_wsi(struct lws_vhost * vh,struct lws * wsi)65 lws_vhost_bind_wsi(struct lws_vhost *vh, struct lws *wsi)
66 {
67 if (wsi->a.vhost == vh)
68 return;
69
70 lws_context_lock(vh->context, __func__); /* ---------- context { */
71 wsi->a.vhost = vh;
72
73 #if defined(LWS_WITH_TLS_JIT_TRUST)
74 if (!vh->count_bound_wsi && vh->grace_after_unref) {
75 lwsl_wsi_info(wsi, "in use");
76 lws_sul_cancel(&vh->sul_unref);
77 }
78 #endif
79
80 vh->count_bound_wsi++;
81 lws_context_unlock(vh->context); /* } context ---------- */
82
83 lwsl_wsi_debug(wsi, "vh %s: wsi %s/%s, count_bound_wsi %d\n",
84 vh->name, wsi->role_ops ? wsi->role_ops->name : "none",
85 wsi->a.protocol ? wsi->a.protocol->name : "none",
86 vh->count_bound_wsi);
87 assert(wsi->a.vhost->count_bound_wsi > 0);
88 }
89
90
91 /* req cx lock... acquires vh lock */
92 void
__lws_vhost_unbind_wsi(struct lws * wsi)93 __lws_vhost_unbind_wsi(struct lws *wsi)
94 {
95 struct lws_vhost *vh = wsi->a.vhost;
96
97 if (!vh)
98 return;
99
100 lws_context_assert_lock_held(wsi->a.context);
101
102 lws_vhost_lock(vh);
103
104 assert(vh->count_bound_wsi > 0);
105 vh->count_bound_wsi--;
106
107 #if defined(LWS_WITH_TLS_JIT_TRUST)
108 if (!vh->count_bound_wsi && vh->grace_after_unref)
109 lws_tls_jit_trust_vh_start_grace(vh);
110 #endif
111
112 lwsl_wsi_debug(wsi, "vh %s: count_bound_wsi %d",
113 vh->name, vh->count_bound_wsi);
114
115 lws_vhost_unlock(vh);
116
117 if (!vh->count_bound_wsi && vh->being_destroyed)
118 /*
119 * We have closed all wsi that were bound to this vhost
120 * by any pt: nothing can be servicing any wsi belonging
121 * to it any more.
122 *
123 * Finalize the vh destruction... must drop vh lock
124 */
125 __lws_vhost_destroy2(vh);
126
127 wsi->a.vhost = NULL;
128 }
129
130 struct lws *
lws_get_network_wsi(struct lws * wsi)131 lws_get_network_wsi(struct lws *wsi)
132 {
133 if (!wsi)
134 return NULL;
135
136 #if defined(LWS_WITH_HTTP2) || defined(LWS_ROLE_MQTT)
137 if (!wsi->mux_substream
138 #if defined(LWS_WITH_CLIENT)
139 && !wsi->client_mux_substream
140 #endif
141 )
142 return wsi;
143
144 while (wsi->mux.parent_wsi)
145 wsi = wsi->mux.parent_wsi;
146 #endif
147
148 return wsi;
149 }
150
151
152 const struct lws_protocols *
lws_vhost_name_to_protocol(struct lws_vhost * vh,const char * name)153 lws_vhost_name_to_protocol(struct lws_vhost *vh, const char *name)
154 {
155 int n;
156
157 for (n = 0; n < vh->count_protocols; n++)
158 if (vh->protocols[n].name && !strcmp(name, vh->protocols[n].name))
159 return &vh->protocols[n];
160
161 return NULL;
162 }
163
164 int
lws_callback_all_protocol(struct lws_context * context,const struct lws_protocols * protocol,int reason)165 lws_callback_all_protocol(struct lws_context *context,
166 const struct lws_protocols *protocol, int reason)
167 {
168 struct lws_context_per_thread *pt = &context->pt[0];
169 unsigned int n, m = context->count_threads;
170 struct lws *wsi;
171
172 while (m--) {
173 for (n = 0; n < pt->fds_count; n++) {
174 wsi = wsi_from_fd(context, pt->fds[n].fd);
175 if (!wsi)
176 continue;
177 if (wsi->a.protocol == protocol)
178 protocol->callback(wsi,
179 (enum lws_callback_reasons)reason,
180 wsi->user_space, NULL, 0);
181 }
182 pt++;
183 }
184
185 return 0;
186 }
187
188 void *
lws_evlib_wsi_to_evlib_pt(struct lws * wsi)189 lws_evlib_wsi_to_evlib_pt(struct lws *wsi)
190 {
191 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
192
193 return pt->evlib_pt;
194 }
195
196 void *
lws_evlib_tsi_to_evlib_pt(struct lws_context * cx,int tsi)197 lws_evlib_tsi_to_evlib_pt(struct lws_context *cx, int tsi)
198 {
199 struct lws_context_per_thread *pt = &cx->pt[tsi];
200
201 return pt->evlib_pt;
202 }
203
204 int
lws_callback_all_protocol_vhost_args(struct lws_vhost * vh,const struct lws_protocols * protocol,int reason,void * argp,size_t len)205 lws_callback_all_protocol_vhost_args(struct lws_vhost *vh,
206 const struct lws_protocols *protocol, int reason,
207 void *argp, size_t len)
208 {
209 struct lws_context *context = vh->context;
210 struct lws_context_per_thread *pt = &context->pt[0];
211 unsigned int n, m = context->count_threads;
212 struct lws *wsi;
213
214 while (m--) {
215 for (n = 0; n < pt->fds_count; n++) {
216 wsi = wsi_from_fd(context, pt->fds[n].fd);
217 if (!wsi)
218 continue;
219 if (wsi->a.vhost == vh && (wsi->a.protocol == protocol ||
220 !protocol))
221 wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason,
222 wsi->user_space, argp, len);
223 }
224 pt++;
225 }
226
227 return 0;
228 }
229
230 int
lws_callback_all_protocol_vhost(struct lws_vhost * vh,const struct lws_protocols * protocol,int reason)231 lws_callback_all_protocol_vhost(struct lws_vhost *vh,
232 const struct lws_protocols *protocol, int reason)
233 {
234 return lws_callback_all_protocol_vhost_args(vh, protocol, reason, NULL, 0);
235 }
236
237 int
lws_callback_vhost_protocols(struct lws * wsi,int reason,void * in,size_t len)238 lws_callback_vhost_protocols(struct lws *wsi, int reason, void *in, size_t len)
239 {
240 int n;
241
242 for (n = 0; n < wsi->a.vhost->count_protocols; n++)
243 if (wsi->a.vhost->protocols[n].callback(wsi, (enum lws_callback_reasons)reason, NULL, in, len))
244 return 1;
245
246 return 0;
247 }
248
249 #if defined(LWS_WITH_SYS_FAULT_INJECTION)
250 /*
251 * We want to inject a fault that makes it feel like the peer hung up on us,
252 * or we were otherwise cut off.
253 */
254 void
lws_wsi_fault_timedclose_cb(lws_sorted_usec_list_t * s)255 lws_wsi_fault_timedclose_cb(lws_sorted_usec_list_t *s)
256 {
257 struct lws *wsi = lws_container_of(s, struct lws, sul_fault_timedclose);
258
259 lwsl_wsi_warn(wsi, "force-closing");
260 lws_wsi_close(wsi, LWS_TO_KILL_ASYNC);
261 }
262 #endif
263
264 #if defined(LWS_WITH_SYS_FAULT_INJECTION)
265 void
lws_wsi_fault_timedclose(struct lws * wsi)266 lws_wsi_fault_timedclose(struct lws *wsi)
267 {
268 uint64_t u;
269
270 if (!lws_fi(&wsi->fic, "timedclose"))
271 return;
272
273 if (lws_fi_range(&wsi->fic, "timedclose_ms", &u))
274 return;
275
276 lwsl_wsi_warn(wsi, "injecting close in %ums", (unsigned int)u);
277 lws_sul_schedule(wsi->a.context, wsi->tsi, &wsi->sul_fault_timedclose,
278 lws_wsi_fault_timedclose_cb,
279 (lws_usec_t)(u * 1000ull));
280 }
281 #endif
282
283
284 /*
285 * We need the context lock
286 */
287
288 struct lws *
__lws_wsi_create_with_role(struct lws_context * context,int tsi,const struct lws_role_ops * ops,lws_log_cx_t * log_cx_template)289 __lws_wsi_create_with_role(struct lws_context *context, int tsi,
290 const struct lws_role_ops *ops,
291 lws_log_cx_t *log_cx_template)
292 {
293 size_t s = sizeof(struct lws);
294 struct lws *wsi;
295
296 assert(tsi >= 0 && tsi < LWS_MAX_SMP);
297
298 lws_context_assert_lock_held(context);
299
300 #if defined(LWS_WITH_EVENT_LIBS)
301 s += context->event_loop_ops->evlib_size_wsi;
302 #endif
303
304 wsi = lws_zalloc(s, __func__);
305
306 if (!wsi) {
307 lwsl_cx_err(context, "OOM");
308 return NULL;
309 }
310
311 if (log_cx_template)
312 wsi->lc.log_cx = log_cx_template;
313 else
314 wsi->lc.log_cx = context->log_cx;
315
316 #if defined(LWS_WITH_EVENT_LIBS)
317 wsi->evlib_wsi = (uint8_t *)wsi + sizeof(*wsi);
318 #endif
319 wsi->a.context = context;
320 lws_role_transition(wsi, 0, LRS_UNCONNECTED, ops);
321 wsi->pending_timeout = NO_PENDING_TIMEOUT;
322 wsi->a.protocol = NULL;
323 wsi->tsi = (char)tsi;
324 wsi->a.vhost = NULL;
325 wsi->desc.sockfd = LWS_SOCK_INVALID;
326 wsi->position_in_fds_table = LWS_NO_FDS_POS;
327
328 #if defined(LWS_WITH_SYS_FAULT_INJECTION)
329 lws_xos_init(&wsi->fic.xos, lws_xos(&context->fic.xos));
330 #endif
331
332 lws_fi_inherit_copy(&wsi->fic, &context->fic, "wsi", NULL);
333
334 if (lws_fi(&wsi->fic, "createfail")) {
335 lws_fi_destroy(&wsi->fic);
336 lws_free(wsi);
337 return NULL;
338 }
339
340 return wsi;
341 }
342
343 int
lws_wsi_inject_to_loop(struct lws_context_per_thread * pt,struct lws * wsi)344 lws_wsi_inject_to_loop(struct lws_context_per_thread *pt, struct lws *wsi)
345 {
346 int ret = 1;
347
348 lws_pt_lock(pt, __func__); /* -------------- pt { */
349
350 if (pt->context->event_loop_ops->sock_accept)
351 if (pt->context->event_loop_ops->sock_accept(wsi))
352 goto bail;
353
354 if (__insert_wsi_socket_into_fds(pt->context, wsi))
355 goto bail;
356
357 ret = 0;
358
359 bail:
360 lws_pt_unlock(pt);
361
362 return ret;
363 }
364
365 /*
366 * Take a copy of wsi->desc.sockfd before calling this, then close it
367 * afterwards
368 */
369
370 int
lws_wsi_extract_from_loop(struct lws * wsi)371 lws_wsi_extract_from_loop(struct lws *wsi)
372 {
373 if (lws_socket_is_valid(wsi->desc.sockfd))
374 __remove_wsi_socket_from_fds(wsi);
375
376 if (!wsi->a.context->event_loop_ops->destroy_wsi &&
377 wsi->a.context->event_loop_ops->wsi_logical_close) {
378 wsi->a.context->event_loop_ops->wsi_logical_close(wsi);
379 return 1; /* close / destroy continues async */
380 }
381
382 if (wsi->a.context->event_loop_ops->destroy_wsi)
383 wsi->a.context->event_loop_ops->destroy_wsi(wsi);
384
385 return 0; /* he is destroyed */
386 }
387
388 int
lws_callback_vhost_protocols_vhost(struct lws_vhost * vh,int reason,void * in,size_t len)389 lws_callback_vhost_protocols_vhost(struct lws_vhost *vh, int reason, void *in,
390 size_t len)
391 {
392 int n;
393 struct lws *wsi = lws_zalloc(sizeof(*wsi), "fake wsi");
394
395 if (!wsi)
396 return 1;
397
398 wsi->a.context = vh->context;
399 lws_vhost_bind_wsi(vh, wsi);
400
401 for (n = 0; n < wsi->a.vhost->count_protocols; n++) {
402 wsi->a.protocol = &vh->protocols[n];
403 if (wsi->a.protocol->callback(wsi, (enum lws_callback_reasons)reason, NULL, in, len)) {
404 lws_free(wsi);
405 return 1;
406 }
407 }
408
409 lws_free(wsi);
410
411 return 0;
412 }
413
414
415 int
lws_rx_flow_control(struct lws * wsi,int _enable)416 lws_rx_flow_control(struct lws *wsi, int _enable)
417 {
418 struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
419 int en = _enable;
420
421 // h2 ignores rx flow control atm
422 if (lwsi_role_h2(wsi) || wsi->mux_substream ||
423 lwsi_role_h2_ENCAPSULATION(wsi))
424 return 0; // !!!
425
426 lwsl_wsi_info(wsi, "0x%x", _enable);
427
428 if (!(_enable & LWS_RXFLOW_REASON_APPLIES)) {
429 /*
430 * convert user bool style to bitmap style... in user simple
431 * bool style _enable = 0 = flow control it, = 1 = allow rx
432 */
433 en = LWS_RXFLOW_REASON_APPLIES | LWS_RXFLOW_REASON_USER_BOOL;
434 if (_enable & 1)
435 en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
436 }
437
438 lws_pt_lock(pt, __func__);
439
440 /* any bit set in rxflow_bitmap DISABLEs rxflow control */
441 if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
442 wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap & ~(en & 0xff));
443 else
444 wsi->rxflow_bitmap = (uint8_t)(wsi->rxflow_bitmap | (en & 0xff));
445
446 if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
447 wsi->rxflow_change_to)
448 goto skip;
449
450 wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE |
451 (!wsi->rxflow_bitmap);
452
453 lwsl_wsi_info(wsi, "bitmap 0x%x: en 0x%x, ch 0x%x",
454 wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
455
456 if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
457 !wsi->rxflow_will_be_applied) {
458 en = __lws_rx_flow_control(wsi);
459 lws_pt_unlock(pt);
460
461 return en;
462 }
463
464 skip:
465 lws_pt_unlock(pt);
466
467 return 0;
468 }
469
470 void
lws_rx_flow_allow_all_protocol(const struct lws_context * context,const struct lws_protocols * protocol)471 lws_rx_flow_allow_all_protocol(const struct lws_context *context,
472 const struct lws_protocols *protocol)
473 {
474 const struct lws_context_per_thread *pt = &context->pt[0];
475 struct lws *wsi;
476 unsigned int n, m = context->count_threads;
477
478 while (m--) {
479 for (n = 0; n < pt->fds_count; n++) {
480 wsi = wsi_from_fd(context, pt->fds[n].fd);
481 if (!wsi)
482 continue;
483 if (wsi->a.protocol == protocol)
484 lws_rx_flow_control(wsi, LWS_RXFLOW_ALLOW);
485 }
486 pt++;
487 }
488 }
489
user_callback_handle_rxflow(lws_callback_function callback_function,struct lws * wsi,enum lws_callback_reasons reason,void * user,void * in,size_t len)490 int user_callback_handle_rxflow(lws_callback_function callback_function,
491 struct lws *wsi,
492 enum lws_callback_reasons reason, void *user,
493 void *in, size_t len)
494 {
495 int n;
496
497 wsi->rxflow_will_be_applied = 1;
498 n = callback_function(wsi, reason, user, in, len);
499 wsi->rxflow_will_be_applied = 0;
500 if (!n)
501 n = __lws_rx_flow_control(wsi);
502
503 return n;
504 }
505
506 int
__lws_rx_flow_control(struct lws * wsi)507 __lws_rx_flow_control(struct lws *wsi)
508 {
509 struct lws *wsic = wsi->child_list;
510
511 // h2 ignores rx flow control atm
512 if (lwsi_role_h2(wsi) || wsi->mux_substream ||
513 lwsi_role_h2_ENCAPSULATION(wsi))
514 return 0; // !!!
515
516 /* if he has children, do those if they were changed */
517 while (wsic) {
518 if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
519 __lws_rx_flow_control(wsic);
520
521 wsic = wsic->sibling_list;
522 }
523
524 /* there is no pending change */
525 if (!(wsi->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE))
526 return 0;
527
528 /* stuff is still buffered, not ready to really accept new input */
529 if (lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
530 /* get ourselves called back to deal with stashed buffer */
531 lws_callback_on_writable(wsi);
532 // return 0;
533 }
534
535 /* now the pending is cleared, we can change rxflow state */
536
537 wsi->rxflow_change_to &= (~LWS_RXFLOW_PENDING_CHANGE) & 3;
538
539 lwsl_wsi_info(wsi, "rxflow: change_to %d",
540 wsi->rxflow_change_to & LWS_RXFLOW_ALLOW);
541
542 /* adjust the pollfd for this wsi */
543
544 if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
545 lwsl_wsi_info(wsi, "reenable POLLIN");
546 // lws_buflist_describe(&wsi->buflist, NULL, __func__);
547 if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
548 lwsl_wsi_info(wsi, "fail");
549 return -1;
550 }
551 } else
552 if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
553 return -1;
554
555 return 0;
556 }
557
558
559 const struct lws_protocols *
lws_get_protocol(struct lws * wsi)560 lws_get_protocol(struct lws *wsi)
561 {
562 return wsi->a.protocol;
563 }
564
565
566 int
lws_ensure_user_space(struct lws * wsi)567 lws_ensure_user_space(struct lws *wsi)
568 {
569 if (!wsi->a.protocol)
570 return 0;
571
572 /* allocate the per-connection user memory (if any) */
573
574 if (wsi->a.protocol->per_session_data_size && !wsi->user_space) {
575 wsi->user_space = lws_zalloc(
576 wsi->a.protocol->per_session_data_size, "user space");
577 if (wsi->user_space == NULL) {
578 lwsl_wsi_err(wsi, "OOM");
579 return 1;
580 }
581 } else
582 lwsl_wsi_debug(wsi, "protocol pss %lu, user_space=%p",
583 (long)wsi->a.protocol->per_session_data_size,
584 wsi->user_space);
585 return 0;
586 }
587
588 void *
lws_adjust_protocol_psds(struct lws * wsi,size_t new_size)589 lws_adjust_protocol_psds(struct lws *wsi, size_t new_size)
590 {
591 ((struct lws_protocols *)lws_get_protocol(wsi))->per_session_data_size =
592 new_size;
593
594 if (lws_ensure_user_space(wsi))
595 return NULL;
596
597 return wsi->user_space;
598 }
599
600 int
lws_get_tsi(struct lws * wsi)601 lws_get_tsi(struct lws *wsi)
602 {
603 return (int)wsi->tsi;
604 }
605
606 int
lws_is_ssl(struct lws * wsi)607 lws_is_ssl(struct lws *wsi)
608 {
609 #if defined(LWS_WITH_TLS)
610 return wsi->tls.use_ssl & LCCSCF_USE_SSL;
611 #else
612 (void)wsi;
613 return 0;
614 #endif
615 }
616
617 #if defined(LWS_WITH_TLS) && !defined(LWS_WITH_MBEDTLS)
618 lws_tls_conn*
lws_get_ssl(struct lws * wsi)619 lws_get_ssl(struct lws *wsi)
620 {
621 return wsi->tls.ssl;
622 }
623 #endif
624
625 int
lws_has_buffered_out(struct lws * wsi)626 lws_has_buffered_out(struct lws *wsi)
627 {
628 if (wsi->buflist_out)
629 return 1;
630
631 #if defined(LWS_ROLE_H2)
632 {
633 struct lws *nwsi = lws_get_network_wsi(wsi);
634
635 if (nwsi->buflist_out)
636 return 1;
637 }
638 #endif
639
640 return 0;
641 }
642
643 int
lws_partial_buffered(struct lws * wsi)644 lws_partial_buffered(struct lws *wsi)
645 {
646 return lws_has_buffered_out(wsi);
647 }
648
649 lws_fileofs_t
lws_get_peer_write_allowance(struct lws * wsi)650 lws_get_peer_write_allowance(struct lws *wsi)
651 {
652 if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit))
653 return -1;
654
655 return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit).
656 tx_credit(wsi, LWSTXCR_US_TO_PEER, 0);
657 }
658
659 void
lws_role_transition(struct lws * wsi,enum lwsi_role role,enum lwsi_state state,const struct lws_role_ops * ops)660 lws_role_transition(struct lws *wsi, enum lwsi_role role, enum lwsi_state state,
661 const struct lws_role_ops *ops)
662 {
663 #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
664 const char *name = "(unset)";
665 #endif
666 wsi->wsistate = (unsigned int)role | (unsigned int)state;
667 if (ops)
668 wsi->role_ops = ops;
669 #if (_LWS_ENABLED_LOGS & LLL_DEBUG)
670 if (wsi->role_ops)
671 name = wsi->role_ops->name;
672 lwsl_wsi_debug(wsi, "wsistate 0x%lx, ops %s",
673 (unsigned long)wsi->wsistate, name);
674 #endif
675 }
676
677 int
lws_parse_uri(char * p,const char ** prot,const char ** ads,int * port,const char ** path)678 lws_parse_uri(char *p, const char **prot, const char **ads, int *port,
679 const char **path)
680 {
681 const char *end;
682 char unix_skt = 0;
683
684 /* cut up the location into address, port and path */
685 *prot = p;
686 while (*p && (*p != ':' || p[1] != '/' || p[2] != '/'))
687 p++;
688 if (!*p) {
689 end = p;
690 p = (char *)*prot;
691 *prot = end;
692 } else {
693 *p = '\0';
694 p += 3;
695 }
696 if (*p == '+') /* unix skt */
697 unix_skt = 1;
698
699 *ads = p;
700 if (!strcmp(*prot, "http") || !strcmp(*prot, "ws"))
701 *port = 80;
702 else if (!strcmp(*prot, "https") || !strcmp(*prot, "wss"))
703 *port = 443;
704
705 if (*p == '[') {
706 ++(*ads);
707 while (*p && *p != ']')
708 p++;
709 if (*p)
710 *p++ = '\0';
711 } else
712 while (*p && *p != ':' && (unix_skt || *p != '/'))
713 p++;
714
715 if (*p == ':') {
716 *p++ = '\0';
717 *port = atoi(p);
718 while (*p && *p != '/')
719 p++;
720 }
721 *path = "/";
722 if (*p) {
723 *p++ = '\0';
724 if (*p)
725 *path = p;
726 }
727
728 return 0;
729 }
730
731 /* ... */
732
733 int
lws_get_urlarg_by_name_safe(struct lws * wsi,const char * name,char * buf,int len)734 lws_get_urlarg_by_name_safe(struct lws *wsi, const char *name, char *buf, int len)
735 {
736 int n = 0, fraglen, sl = (int)strlen(name);
737
738 do {
739 fraglen = lws_hdr_copy_fragment(wsi, buf, len,
740 WSI_TOKEN_HTTP_URI_ARGS, n);
741
742 if (fraglen < 0)
743 break;
744
745 if (fraglen + 1 < len &&
746 fraglen >= sl &&
747 !strncmp(buf, name, (size_t)sl)) {
748 /*
749 * If he left off the trailing =, trim it from the
750 * result
751 */
752
753 if (name[sl - 1] != '=' &&
754 sl < fraglen &&
755 buf[sl] == '=')
756 sl++;
757
758 memmove(buf, buf + sl, (size_t)(fraglen - sl));
759 buf[fraglen - sl] = '\0';
760
761 return fraglen - sl;
762 }
763
764 n++;
765 } while (1);
766
767 return -1;
768 }
769
770 const char *
lws_get_urlarg_by_name(struct lws * wsi,const char * name,char * buf,int len)771 lws_get_urlarg_by_name(struct lws *wsi, const char *name, char *buf, int len)
772 {
773 int n = lws_get_urlarg_by_name_safe(wsi, name, buf, len);
774
775 return n < 0 ? NULL : buf;
776 }
777
778
779 #if defined(LWS_WITHOUT_EXTENSIONS)
780
781 /* we need to provide dummy callbacks for internal exts
782 * so user code runs when faced with a lib compiled with
783 * extensions disabled.
784 */
785
786 int
lws_extension_callback_pm_deflate(struct lws_context * context,const struct lws_extension * ext,struct lws * wsi,enum lws_extension_callback_reasons reason,void * user,void * in,size_t len)787 lws_extension_callback_pm_deflate(struct lws_context *context,
788 const struct lws_extension *ext,
789 struct lws *wsi,
790 enum lws_extension_callback_reasons reason,
791 void *user, void *in, size_t len)
792 {
793 (void)context;
794 (void)ext;
795 (void)wsi;
796 (void)reason;
797 (void)user;
798 (void)in;
799 (void)len;
800
801 return 0;
802 }
803
804 int
lws_set_extension_option(struct lws * wsi,const char * ext_name,const char * opt_name,const char * opt_val)805 lws_set_extension_option(struct lws *wsi, const char *ext_name,
806 const char *opt_name, const char *opt_val)
807 {
808 return -1;
809 }
810 #endif
811
812 int
lws_is_cgi(struct lws * wsi)813 lws_is_cgi(struct lws *wsi) {
814 #ifdef LWS_WITH_CGI
815 return !!wsi->http.cgi;
816 #else
817 return 0;
818 #endif
819 }
820
821 const struct lws_protocol_vhost_options *
lws_pvo_search(const struct lws_protocol_vhost_options * pvo,const char * name)822 lws_pvo_search(const struct lws_protocol_vhost_options *pvo, const char *name)
823 {
824 while (pvo) {
825 if (!strcmp(pvo->name, name))
826 break;
827
828 pvo = pvo->next;
829 }
830
831 return pvo;
832 }
833
834 int
lws_pvo_get_str(void * in,const char * name,const char ** result)835 lws_pvo_get_str(void *in, const char *name, const char **result)
836 {
837 const struct lws_protocol_vhost_options *pv =
838 lws_pvo_search((const struct lws_protocol_vhost_options *)in,
839 name);
840
841 if (!pv)
842 return 1;
843
844 *result = (const char *)pv->value;
845
846 return 0;
847 }
848
849 int
lws_broadcast(struct lws_context_per_thread * pt,int reason,void * in,size_t len)850 lws_broadcast(struct lws_context_per_thread *pt, int reason, void *in, size_t len)
851 {
852 struct lws_vhost *v = pt->context->vhost_list;
853 lws_fakewsi_def_plwsa(pt);
854 int n, ret = 0;
855
856 lws_fakewsi_prep_plwsa_ctx(pt->context);
857 #if !defined(LWS_PLAT_FREERTOS) && LWS_MAX_SMP > 1
858 ((struct lws *)plwsa)->tsi = (char)(int)(pt - &pt->context->pt[0]);
859 #endif
860
861 while (v) {
862 const struct lws_protocols *p = v->protocols;
863
864 plwsa->vhost = v; /* not a real bound wsi */
865
866 for (n = 0; n < v->count_protocols; n++) {
867 plwsa->protocol = p;
868 if (p->callback &&
869 p->callback((struct lws *)plwsa, (enum lws_callback_reasons)reason, NULL, in, len))
870 ret |= 1;
871 p++;
872 }
873
874 v = v->vhost_next;
875 }
876
877 return ret;
878 }
879
880 void *
lws_wsi_user(struct lws * wsi)881 lws_wsi_user(struct lws *wsi)
882 {
883 return wsi->user_space;
884 }
885
886 int
lws_wsi_tsi(struct lws * wsi)887 lws_wsi_tsi(struct lws *wsi)
888 {
889 return wsi->tsi;
890 }
891
892
893 void
lws_set_wsi_user(struct lws * wsi,void * data)894 lws_set_wsi_user(struct lws *wsi, void *data)
895 {
896 if (!wsi->user_space_externally_allocated && wsi->user_space)
897 lws_free(wsi->user_space);
898
899 wsi->user_space_externally_allocated = 1;
900 wsi->user_space = data;
901 }
902
903 struct lws *
lws_get_parent(const struct lws * wsi)904 lws_get_parent(const struct lws *wsi)
905 {
906 return wsi->parent;
907 }
908
909 struct lws *
lws_get_child(const struct lws * wsi)910 lws_get_child(const struct lws *wsi)
911 {
912 return wsi->child_list;
913 }
914
915 void *
lws_get_opaque_parent_data(const struct lws * wsi)916 lws_get_opaque_parent_data(const struct lws *wsi)
917 {
918 return wsi->opaque_parent_data;
919 }
920
921 void
lws_set_opaque_parent_data(struct lws * wsi,void * data)922 lws_set_opaque_parent_data(struct lws *wsi, void *data)
923 {
924 wsi->opaque_parent_data = data;
925 }
926
927 void *
lws_get_opaque_user_data(const struct lws * wsi)928 lws_get_opaque_user_data(const struct lws *wsi)
929 {
930 return wsi->a.opaque_user_data;
931 }
932
933 void
lws_set_opaque_user_data(struct lws * wsi,void * data)934 lws_set_opaque_user_data(struct lws *wsi, void *data)
935 {
936 wsi->a.opaque_user_data = data;
937 }
938
939 int
lws_get_child_pending_on_writable(const struct lws * wsi)940 lws_get_child_pending_on_writable(const struct lws *wsi)
941 {
942 return wsi->parent_pending_cb_on_writable;
943 }
944
945 void
lws_clear_child_pending_on_writable(struct lws * wsi)946 lws_clear_child_pending_on_writable(struct lws *wsi)
947 {
948 wsi->parent_pending_cb_on_writable = 0;
949 }
950
951
952
953 const char *
lws_get_vhost_name(struct lws_vhost * vhost)954 lws_get_vhost_name(struct lws_vhost *vhost)
955 {
956 return vhost->name;
957 }
958
959 int
lws_get_vhost_port(struct lws_vhost * vhost)960 lws_get_vhost_port(struct lws_vhost *vhost)
961 {
962 return vhost->listen_port;
963 }
964
965 void *
lws_get_vhost_user(struct lws_vhost * vhost)966 lws_get_vhost_user(struct lws_vhost *vhost)
967 {
968 return vhost->user;
969 }
970
971 const char *
lws_get_vhost_iface(struct lws_vhost * vhost)972 lws_get_vhost_iface(struct lws_vhost *vhost)
973 {
974 return vhost->iface;
975 }
976
977 lws_sockfd_type
lws_get_socket_fd(struct lws * wsi)978 lws_get_socket_fd(struct lws *wsi)
979 {
980 if (!wsi)
981 return -1;
982 return wsi->desc.sockfd;
983 }
984
985
986 struct lws_vhost *
lws_vhost_get(struct lws * wsi)987 lws_vhost_get(struct lws *wsi)
988 {
989 return wsi->a.vhost;
990 }
991
992 struct lws_vhost *
lws_get_vhost(struct lws * wsi)993 lws_get_vhost(struct lws *wsi)
994 {
995 return wsi->a.vhost;
996 }
997
998 const struct lws_protocols *
lws_protocol_get(struct lws * wsi)999 lws_protocol_get(struct lws *wsi)
1000 {
1001 return wsi->a.protocol;
1002 }
1003
1004 #if defined(LWS_WITH_UDP)
1005 const struct lws_udp *
lws_get_udp(const struct lws * wsi)1006 lws_get_udp(const struct lws *wsi)
1007 {
1008 return wsi->udp;
1009 }
1010 #endif
1011
1012 struct lws_context *
lws_get_context(const struct lws * wsi)1013 lws_get_context(const struct lws *wsi)
1014 {
1015 return wsi->a.context;
1016 }
1017
1018 struct lws_log_cx *
lwsl_wsi_get_cx(struct lws * wsi)1019 lwsl_wsi_get_cx(struct lws *wsi)
1020 {
1021 if (!wsi)
1022 return NULL;
1023
1024 return wsi->lc.log_cx;
1025 }
1026
1027 #if defined(LWS_WITH_CLIENT)
1028 int
_lws_generic_transaction_completed_active_conn(struct lws ** _wsi,char take_vh_lock)1029 _lws_generic_transaction_completed_active_conn(struct lws **_wsi, char take_vh_lock)
1030 {
1031 struct lws *wnew, *wsi = *_wsi;
1032
1033 /*
1034 * Are we constitutionally capable of having a queue, ie, we are on
1035 * the "active client connections" list?
1036 *
1037 * If not, that's it for us.
1038 */
1039
1040 if (lws_dll2_is_detached(&wsi->dll_cli_active_conns))
1041 return 0; /* no new transaction */
1042
1043 /*
1044 * With h1 queuing, the original "active client" moves his attributes
1045 * like fd, ssl, queue and active client list entry to the next guy in
1046 * the queue before closing... it's because the user code knows the
1047 * individual wsi and the action must take place in the correct wsi
1048 * context. Note this means we don't truly pipeline headers.
1049 *
1050 * Trying to keep the original "active client" in place to do the work
1051 * of the wsi breaks down when dealing with queued POSTs otherwise; it's
1052 * also competing with the real mux child arrangements and complicating
1053 * the code.
1054 *
1055 * For that reason, see if we have any queued child now...
1056 */
1057
1058 if (!wsi->dll2_cli_txn_queue_owner.head) {
1059 /*
1060 * Nothing pipelined... we should hang around a bit
1061 * in case something turns up... otherwise we'll close
1062 */
1063 lwsl_wsi_info(wsi, "nothing pipelined waiting");
1064 lwsi_set_state(wsi, LRS_IDLING);
1065
1066 lws_set_timeout(wsi, PENDING_TIMEOUT_CLIENT_CONN_IDLE,
1067 wsi->keep_warm_secs);
1068
1069 return 0; /* no new transaction right now */
1070 }
1071
1072 /*
1073 * We have a queued child wsi we should bequeath our assets to, before
1074 * closing ourself
1075 */
1076
1077 if (take_vh_lock)
1078 lws_vhost_lock(wsi->a.vhost);
1079
1080 wnew = lws_container_of(wsi->dll2_cli_txn_queue_owner.head, struct lws,
1081 dll2_cli_txn_queue);
1082
1083 assert(wsi != wnew);
1084
1085 lws_dll2_remove(&wnew->dll2_cli_txn_queue);
1086
1087 assert(lws_socket_is_valid(wsi->desc.sockfd));
1088
1089 __lws_change_pollfd(wsi, LWS_POLLOUT | LWS_POLLIN, 0);
1090
1091 /* copy the fd */
1092 wnew->desc = wsi->desc;
1093
1094 assert(lws_socket_is_valid(wnew->desc.sockfd));
1095
1096 /* disconnect the fd from association with old wsi */
1097
1098 if (__remove_wsi_socket_from_fds(wsi))
1099 return -1;
1100
1101 sanity_assert_no_wsi_traces(wsi->a.context, wsi);
1102 sanity_assert_no_sockfd_traces(wsi->a.context, wsi->desc.sockfd);
1103 wsi->desc.sockfd = LWS_SOCK_INVALID;
1104
1105 __lws_wsi_remove_from_sul(wsi);
1106
1107 /*
1108 * ... we're doing some magic here in terms of handing off the socket
1109 * that has been active to a wsi that has not yet itself been active...
1110 * depending on the event lib we may need to give a magic spark to the
1111 * new guy and snuff out the old guy's magic spark at that level as well
1112 */
1113
1114 #if defined(LWS_WITH_EVENT_LIBS)
1115 if (wsi->a.context->event_loop_ops->destroy_wsi)
1116 wsi->a.context->event_loop_ops->destroy_wsi(wsi);
1117 if (wsi->a.context->event_loop_ops->sock_accept)
1118 wsi->a.context->event_loop_ops->sock_accept(wnew);
1119 #endif
1120
1121 /* point the fd table entry to new guy */
1122
1123 assert(lws_socket_is_valid(wnew->desc.sockfd));
1124
1125 if (__insert_wsi_socket_into_fds(wsi->a.context, wnew))
1126 return -1;
1127
1128 #if defined(LWS_WITH_TLS)
1129 /* pass on the tls */
1130
1131 wnew->tls = wsi->tls;
1132 wsi->tls.client_bio = NULL;
1133 wsi->tls.ssl = NULL;
1134 wsi->tls.use_ssl = 0;
1135 #endif
1136
1137 /* take over his copy of his endpoint as an active connection */
1138
1139 if (!wnew->cli_hostname_copy && wsi->cli_hostname_copy) {
1140 wnew->cli_hostname_copy = wsi->cli_hostname_copy;
1141 wsi->cli_hostname_copy = NULL;
1142 }
1143 wnew->keep_warm_secs = wsi->keep_warm_secs;
1144
1145 /*
1146 * selected queued guy now replaces the original leader on the
1147 * active client conn list
1148 */
1149
1150 lws_dll2_remove(&wsi->dll_cli_active_conns);
1151 lws_dll2_add_tail(&wnew->dll_cli_active_conns,
1152 &wsi->a.vhost->dll_cli_active_conns_owner);
1153
1154 /* move any queued guys to queue on new active conn */
1155
1156 lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
1157 wsi->dll2_cli_txn_queue_owner.head) {
1158 struct lws *ww = lws_container_of(d, struct lws,
1159 dll2_cli_txn_queue);
1160
1161 lws_dll2_remove(&ww->dll2_cli_txn_queue);
1162 lws_dll2_add_tail(&ww->dll2_cli_txn_queue,
1163 &wnew->dll2_cli_txn_queue_owner);
1164
1165 } lws_end_foreach_dll_safe(d, d1);
1166
1167 if (take_vh_lock)
1168 lws_vhost_unlock(wsi->a.vhost);
1169
1170 /*
1171 * The original leader who passed on all his powers already can die...
1172 * in the call stack above us there are guys who still want to touch
1173 * him, so have him die next time around the event loop, not now.
1174 */
1175
1176 wsi->already_did_cce = 1; /* so the close doesn't trigger a CCE */
1177 lws_set_timeout(wsi, 1, LWS_TO_KILL_ASYNC);
1178
1179 /* after the first one, they can only be coming from the queue */
1180 wnew->transaction_from_pipeline_queue = 1;
1181
1182 lwsl_wsi_notice(wsi, " pipeline queue passed -> %s", lws_wsi_tag(wnew));
1183
1184 *_wsi = wnew; /* inform caller we swapped */
1185
1186 return 1; /* new transaction */
1187 }
1188 #endif
1189
1190 int LWS_WARN_UNUSED_RESULT
lws_raw_transaction_completed(struct lws * wsi)1191 lws_raw_transaction_completed(struct lws *wsi)
1192 {
1193 if (lws_has_buffered_out(wsi)) {
1194 /*
1195 * ...so he tried to send something large, but it went out
1196 * as a partial, but he immediately called us to say he wants
1197 * to close the connection.
1198 *
1199 * Defer the close until the last part of the partial is sent.
1200 *
1201 */
1202
1203 lwsl_wsi_debug(wsi, "deferring due to partial");
1204 wsi->close_when_buffered_out_drained = 1;
1205 lws_callback_on_writable(wsi);
1206
1207 return 0;
1208 }
1209
1210 return -1;
1211 }
1212
1213 int
lws_bind_protocol(struct lws * wsi,const struct lws_protocols * p,const char * reason)1214 lws_bind_protocol(struct lws *wsi, const struct lws_protocols *p,
1215 const char *reason)
1216 {
1217 // if (wsi->a.protocol == p)
1218 // return 0;
1219 const struct lws_protocols *vp = wsi->a.vhost->protocols, *vpo;
1220
1221 if (wsi->a.protocol && wsi->protocol_bind_balance) {
1222 wsi->a.protocol->callback(wsi,
1223 wsi->role_ops->protocol_unbind_cb[!!lwsi_role_server(wsi)],
1224 wsi->user_space, (void *)reason, 0);
1225 wsi->protocol_bind_balance = 0;
1226 }
1227 if (!wsi->user_space_externally_allocated)
1228 lws_free_set_NULL(wsi->user_space);
1229
1230 lws_same_vh_protocol_remove(wsi);
1231
1232 wsi->a.protocol = p;
1233 if (!p)
1234 return 0;
1235
1236 if (lws_ensure_user_space(wsi))
1237 return 1;
1238
1239 if (p > vp && p < &vp[wsi->a.vhost->count_protocols])
1240 lws_same_vh_protocol_insert(wsi, (int)(p - vp));
1241 else {
1242 int n = wsi->a.vhost->count_protocols;
1243 int hit = 0;
1244
1245 vpo = vp;
1246
1247 while (n--) {
1248 if (p->name && vp->name && !strcmp(p->name, vp->name)) {
1249 hit = 1;
1250 lws_same_vh_protocol_insert(wsi, (int)(vp - vpo));
1251 break;
1252 }
1253 vp++;
1254 }
1255 if (!hit)
1256 lwsl_err("%s: %p is not in vhost '%s' protocols list\n",
1257 __func__, p, wsi->a.vhost->name);
1258 }
1259
1260 if (wsi->a.protocol->callback(wsi, wsi->role_ops->protocol_bind_cb[
1261 !!lwsi_role_server(wsi)],
1262 wsi->user_space, NULL, 0))
1263 return 1;
1264
1265 wsi->protocol_bind_balance = 1;
1266
1267 return 0;
1268 }
1269
1270 void
lws_http_close_immortal(struct lws * wsi)1271 lws_http_close_immortal(struct lws *wsi)
1272 {
1273 struct lws *nwsi;
1274
1275 if (!wsi->mux_substream)
1276 return;
1277
1278 assert(wsi->mux_stream_immortal);
1279 wsi->mux_stream_immortal = 0;
1280
1281 nwsi = lws_get_network_wsi(wsi);
1282 lwsl_wsi_debug(wsi, "%s (%d)", lws_wsi_tag(nwsi),
1283 nwsi->immortal_substream_count);
1284 assert(nwsi->immortal_substream_count);
1285 nwsi->immortal_substream_count--;
1286 if (!nwsi->immortal_substream_count)
1287 /*
1288 * since we closed the only immortal stream on this nwsi, we
1289 * need to reapply a normal timeout regime to the nwsi
1290 */
1291 lws_set_timeout(nwsi, PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE,
1292 wsi->a.vhost->keepalive_timeout ?
1293 wsi->a.vhost->keepalive_timeout : 31);
1294 }
1295
1296 void
lws_mux_mark_immortal(struct lws * wsi)1297 lws_mux_mark_immortal(struct lws *wsi)
1298 {
1299 struct lws *nwsi;
1300
1301 lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
1302
1303 if (!wsi->mux_substream
1304 #if defined(LWS_WITH_CLIENT)
1305 && !wsi->client_mux_substream
1306 #endif
1307 ) {
1308 lwsl_wsi_err(wsi, "not mux substream");
1309 return;
1310 }
1311
1312 if (wsi->mux_stream_immortal)
1313 /* only need to handle it once per child wsi */
1314 return;
1315
1316 nwsi = lws_get_network_wsi(wsi);
1317 if (!nwsi)
1318 return;
1319
1320 lwsl_wsi_debug(wsi, "%s (%d)\n", lws_wsi_tag(nwsi),
1321 nwsi->immortal_substream_count);
1322
1323 wsi->mux_stream_immortal = 1;
1324 assert(nwsi->immortal_substream_count < 255); /* largest count */
1325 nwsi->immortal_substream_count++;
1326 if (nwsi->immortal_substream_count == 1)
1327 lws_set_timeout(nwsi, NO_PENDING_TIMEOUT, 0);
1328 }
1329
1330 int
lws_http_mark_sse(struct lws * wsi)1331 lws_http_mark_sse(struct lws *wsi)
1332 {
1333 if (!wsi)
1334 return 0;
1335
1336 lws_http_headers_detach(wsi);
1337 lws_mux_mark_immortal(wsi);
1338
1339 if (wsi->mux_substream)
1340 wsi->h2_stream_carries_sse = 1;
1341
1342 return 0;
1343 }
1344
1345 #if defined(LWS_WITH_CLIENT)
1346
1347 const char *
lws_wsi_client_stash_item(struct lws * wsi,int stash_idx,int hdr_idx)1348 lws_wsi_client_stash_item(struct lws *wsi, int stash_idx, int hdr_idx)
1349 {
1350 /* try the generic client stash */
1351 if (wsi->stash)
1352 return wsi->stash->cis[stash_idx];
1353
1354 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
1355 /* if not, use the ah stash if applicable */
1356 return lws_hdr_simple_ptr(wsi, (enum lws_token_indexes)hdr_idx);
1357 #else
1358 return NULL;
1359 #endif
1360 }
1361 #endif
1362
1363 #if defined(LWS_ROLE_H2) || defined(LWS_ROLE_MQTT)
1364
1365 void
lws_wsi_mux_insert(struct lws * wsi,struct lws * parent_wsi,unsigned int sid)1366 lws_wsi_mux_insert(struct lws *wsi, struct lws *parent_wsi, unsigned int sid)
1367 {
1368 lwsl_wsi_info(wsi, "par %s: assign sid %d (curr %d)",
1369 lws_wsi_tag(parent_wsi), sid, wsi->mux.my_sid);
1370
1371 if (wsi->mux.my_sid && wsi->mux.my_sid != (unsigned int)sid)
1372 assert(0);
1373
1374 wsi->mux.my_sid = sid;
1375 wsi->mux.parent_wsi = parent_wsi;
1376 wsi->role_ops = parent_wsi->role_ops;
1377
1378 /* new guy's sibling is whoever was the first child before */
1379 wsi->mux.sibling_list = parent_wsi->mux.child_list;
1380
1381 /* first child is now the new guy */
1382 parent_wsi->mux.child_list = wsi;
1383
1384 parent_wsi->mux.child_count++;
1385 }
1386
1387 struct lws *
lws_wsi_mux_from_id(struct lws * parent_wsi,unsigned int sid)1388 lws_wsi_mux_from_id(struct lws *parent_wsi, unsigned int sid)
1389 {
1390 lws_start_foreach_ll(struct lws *, wsi, parent_wsi->mux.child_list) {
1391 if (wsi->mux.my_sid == sid)
1392 return wsi;
1393 } lws_end_foreach_ll(wsi, mux.sibling_list);
1394
1395 return NULL;
1396 }
1397
1398 void
lws_wsi_mux_dump_children(struct lws * wsi)1399 lws_wsi_mux_dump_children(struct lws *wsi)
1400 {
1401 #if defined(_DEBUG)
1402 if (!wsi->mux.parent_wsi || !lwsl_visible(LLL_INFO))
1403 return;
1404
1405 lws_start_foreach_llp(struct lws **, w,
1406 wsi->mux.parent_wsi->mux.child_list) {
1407 lwsl_wsi_info(wsi, " \\---- child %s %s\n",
1408 (*w)->role_ops ? (*w)->role_ops->name : "?",
1409 lws_wsi_tag(*w));
1410 assert(*w != (*w)->mux.sibling_list);
1411 } lws_end_foreach_llp(w, mux.sibling_list);
1412 #endif
1413 }
1414
1415 void
lws_wsi_mux_close_children(struct lws * wsi,int reason)1416 lws_wsi_mux_close_children(struct lws *wsi, int reason)
1417 {
1418 struct lws *wsi2;
1419 struct lws **w;
1420
1421 if (!wsi->mux.child_list)
1422 return;
1423
1424 w = &wsi->mux.child_list;
1425 while (*w) {
1426 lwsl_wsi_info((*w), " closing child");
1427 /* disconnect from siblings */
1428 wsi2 = (*w)->mux.sibling_list;
1429 assert (wsi2 != *w);
1430 (*w)->mux.sibling_list = NULL;
1431 (*w)->socket_is_permanently_unusable = 1;
1432 __lws_close_free_wsi(*w, (enum lws_close_status)reason, "mux child recurse");
1433 *w = wsi2;
1434 }
1435 }
1436
1437
1438 void
lws_wsi_mux_sibling_disconnect(struct lws * wsi)1439 lws_wsi_mux_sibling_disconnect(struct lws *wsi)
1440 {
1441 struct lws *wsi2;
1442
1443 lws_start_foreach_llp(struct lws **, w,
1444 wsi->mux.parent_wsi->mux.child_list) {
1445
1446 /* disconnect from siblings */
1447 if (*w == wsi) {
1448 wsi2 = (*w)->mux.sibling_list;
1449 (*w)->mux.sibling_list = NULL;
1450 *w = wsi2;
1451 lwsl_wsi_debug(wsi, " disentangled from sibling %s",
1452 lws_wsi_tag(wsi2));
1453 break;
1454 }
1455 } lws_end_foreach_llp(w, mux.sibling_list);
1456 wsi->mux.parent_wsi->mux.child_count--;
1457
1458 wsi->mux.parent_wsi = NULL;
1459 }
1460
1461 void
lws_wsi_mux_dump_waiting_children(struct lws * wsi)1462 lws_wsi_mux_dump_waiting_children(struct lws *wsi)
1463 {
1464 #if defined(_DEBUG)
1465 lwsl_info("%s: %s: children waiting for POLLOUT service:\n",
1466 __func__, lws_wsi_tag(wsi));
1467
1468 wsi = wsi->mux.child_list;
1469 while (wsi) {
1470 lwsl_wsi_info(wsi, " %c sid %u: 0x%x %s %s",
1471 wsi->mux.requested_POLLOUT ? '*' : ' ',
1472 wsi->mux.my_sid, lwsi_state(wsi),
1473 wsi->role_ops->name,
1474 wsi->a.protocol ? wsi->a.protocol->name : "noprotocol");
1475
1476 wsi = wsi->mux.sibling_list;
1477 }
1478 #endif
1479 }
1480
1481 int
lws_wsi_mux_mark_parents_needing_writeable(struct lws * wsi)1482 lws_wsi_mux_mark_parents_needing_writeable(struct lws *wsi)
1483 {
1484 struct lws /* *network_wsi = lws_get_network_wsi(wsi), */ *wsi2;
1485 //int already = network_wsi->mux.requested_POLLOUT;
1486
1487 /* mark everybody above him as requesting pollout */
1488
1489 wsi2 = wsi;
1490 while (wsi2) {
1491 wsi2->mux.requested_POLLOUT = 1;
1492 lwsl_wsi_info(wsi2, "sid %u, pending writable",
1493 wsi2->mux.my_sid);
1494 wsi2 = wsi2->mux.parent_wsi;
1495 }
1496
1497 return 0; // already;
1498 }
1499
1500 struct lws *
lws_wsi_mux_move_child_to_tail(struct lws ** wsi2)1501 lws_wsi_mux_move_child_to_tail(struct lws **wsi2)
1502 {
1503 struct lws *w = *wsi2;
1504
1505 while (w) {
1506 if (!w->mux.sibling_list) { /* w is the current last */
1507 lwsl_wsi_debug(w, "*wsi2 = %s\n", lws_wsi_tag(*wsi2));
1508
1509 if (w == *wsi2) /* we are already last */
1510 break;
1511
1512 /* last points to us as new last */
1513 w->mux.sibling_list = *wsi2;
1514
1515 /* guy pointing to us until now points to
1516 * our old next */
1517 *wsi2 = (*wsi2)->mux.sibling_list;
1518
1519 /* we point to nothing because we are last */
1520 w->mux.sibling_list->mux.sibling_list = NULL;
1521
1522 /* w becomes us */
1523 w = w->mux.sibling_list;
1524 break;
1525 }
1526 w = w->mux.sibling_list;
1527 }
1528
1529 /* clear the waiting for POLLOUT on the guy that was chosen */
1530
1531 if (w)
1532 w->mux.requested_POLLOUT = 0;
1533
1534 return w;
1535 }
1536
1537 int
lws_wsi_mux_action_pending_writeable_reqs(struct lws * wsi)1538 lws_wsi_mux_action_pending_writeable_reqs(struct lws *wsi)
1539 {
1540 struct lws *w = wsi->mux.child_list;
1541
1542 while (w) {
1543 if (w->mux.requested_POLLOUT) {
1544 if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
1545 return -1;
1546 return 0;
1547 }
1548 w = w->mux.sibling_list;
1549 }
1550
1551 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0))
1552 return -1;
1553
1554 return 0;
1555 }
1556
1557 int
lws_wsi_txc_check_skint(struct lws_tx_credit * txc,int32_t tx_cr)1558 lws_wsi_txc_check_skint(struct lws_tx_credit *txc, int32_t tx_cr)
1559 {
1560 if (txc->tx_cr <= 0) {
1561 /*
1562 * If other side is not able to cope with us sending any DATA
1563 * so no matter if we have POLLOUT on our side if it's DATA we
1564 * want to send.
1565 */
1566
1567 if (!txc->skint)
1568 lwsl_info("%s: %p: skint (%d)\n", __func__, txc,
1569 (int)txc->tx_cr);
1570
1571 txc->skint = 1;
1572
1573 return 1;
1574 }
1575
1576 if (txc->skint)
1577 lwsl_info("%s: %p: unskint (%d)\n", __func__, txc,
1578 (int)txc->tx_cr);
1579
1580 txc->skint = 0;
1581
1582 return 0;
1583 }
1584
1585 #if defined(_DEBUG)
1586 void
lws_wsi_txc_describe(struct lws_tx_credit * txc,const char * at,uint32_t sid)1587 lws_wsi_txc_describe(struct lws_tx_credit *txc, const char *at, uint32_t sid)
1588 {
1589 lwsl_info("%s: %p: %s: sid %d: %speer-to-us: %d, us-to-peer: %d\n",
1590 __func__, txc, at, (int)sid, txc->skint ? "SKINT, " : "",
1591 (int)txc->peer_tx_cr_est, (int)txc->tx_cr);
1592 }
1593 #endif
1594
1595 int
lws_wsi_tx_credit(struct lws * wsi,char peer_to_us,int add)1596 lws_wsi_tx_credit(struct lws *wsi, char peer_to_us, int add)
1597 {
1598 if (wsi->role_ops && lws_rops_fidx(wsi->role_ops, LWS_ROPS_tx_credit))
1599 return lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_tx_credit).
1600 tx_credit(wsi, peer_to_us, add);
1601
1602 return 0;
1603 }
1604
1605 /*
1606 * Let the protocol know about incoming tx credit window updates if it's
1607 * managing the flow control manually (it may want to proxy this information)
1608 */
1609
1610 int
lws_wsi_txc_report_manual_txcr_in(struct lws * wsi,int32_t bump)1611 lws_wsi_txc_report_manual_txcr_in(struct lws *wsi, int32_t bump)
1612 {
1613 if (!wsi->txc.manual)
1614 /*
1615 * If we don't care about managing it manually, no need to
1616 * report it
1617 */
1618 return 0;
1619
1620 return user_callback_handle_rxflow(wsi->a.protocol->callback,
1621 wsi, LWS_CALLBACK_WSI_TX_CREDIT_GET,
1622 wsi->user_space, NULL, (size_t)bump);
1623 }
1624
1625 #if defined(LWS_WITH_CLIENT)
1626
1627 int
lws_wsi_mux_apply_queue(struct lws * wsi)1628 lws_wsi_mux_apply_queue(struct lws *wsi)
1629 {
1630 /* we have a transaction queue that wants to pipeline */
1631
1632 lws_context_lock(wsi->a.context, __func__); /* -------------- cx { */
1633 lws_vhost_lock(wsi->a.vhost);
1634
1635 lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
1636 wsi->dll2_cli_txn_queue_owner.head) {
1637 struct lws *w = lws_container_of(d, struct lws,
1638 dll2_cli_txn_queue);
1639
1640 #if defined(LWS_ROLE_H2)
1641 if (lwsi_role_http(wsi) &&
1642 lwsi_state(w) == LRS_H2_WAITING_TO_SEND_HEADERS) {
1643 lwsl_wsi_info(w, "cli pipeq to be h2");
1644
1645 lwsi_set_state(w, LRS_H1C_ISSUE_HANDSHAKE2);
1646
1647 /* remove ourselves from client queue */
1648 lws_dll2_remove(&w->dll2_cli_txn_queue);
1649
1650 /* attach ourselves as an h2 stream */
1651 lws_wsi_h2_adopt(wsi, w);
1652 }
1653 #endif
1654
1655 #if defined(LWS_ROLE_MQTT)
1656 if (lwsi_role_mqtt(wsi) &&
1657 lwsi_state(wsi) == LRS_ESTABLISHED) {
1658 lwsl_wsi_info(w, "cli pipeq to be mqtt\n");
1659
1660 /* remove ourselves from client queue */
1661 lws_dll2_remove(&w->dll2_cli_txn_queue);
1662
1663 /* attach ourselves as an h2 stream */
1664 lws_wsi_mqtt_adopt(wsi, w);
1665 }
1666 #endif
1667
1668 } lws_end_foreach_dll_safe(d, d1);
1669
1670 lws_vhost_unlock(wsi->a.vhost);
1671 lws_context_unlock(wsi->a.context); /* } cx -------------- */
1672
1673 return 0;
1674 }
1675
1676 #endif
1677
1678 #endif
1679