• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include <private-lib-core.h>
26 
27 #define LWS_CPYAPP(ptr, str) { strcpy(ptr, str); ptr += strlen(str); }
28 
29 /*
30  * client-parser.c: lws_ws_client_rx_sm() needs to be roughly kept in
31  *   sync with changes here, esp related to ext draining
32  */
33 
34 int
lws_ws_rx_sm(struct lws * wsi,char already_processed,unsigned char c)35 lws_ws_rx_sm(struct lws *wsi, char already_processed, unsigned char c)
36 {
37 	int callback_action = LWS_CALLBACK_RECEIVE;
38 	struct lws_ext_pm_deflate_rx_ebufs pmdrx;
39 	unsigned short close_code;
40 	unsigned char *pp;
41 	int ret = 0;
42 	int n = 0;
43 #if !defined(LWS_WITHOUT_EXTENSIONS)
44 	int rx_draining_ext = 0;
45 	int lin;
46 #endif
47 
48 	pmdrx.eb_in.token = NULL;
49 	pmdrx.eb_in.len = 0;
50 	pmdrx.eb_out.token = NULL;
51 	pmdrx.eb_out.len = 0;
52 
53 	switch (wsi->lws_rx_parse_state) {
54 	case LWS_RXPS_NEW:
55 #if !defined(LWS_WITHOUT_EXTENSIONS)
56 		if (wsi->ws->rx_draining_ext) {
57 			pmdrx.eb_in.token = NULL;
58 			pmdrx.eb_in.len = 0;
59 			pmdrx.eb_out.token = NULL;
60 			pmdrx.eb_out.len = 0;
61 			lws_remove_wsi_from_draining_ext_list(wsi);
62 			rx_draining_ext = 1;
63 			lwsl_debug("%s: doing draining flow\n", __func__);
64 
65 			goto drain_extension;
66 		}
67 #endif
68 		switch (wsi->ws->ietf_spec_revision) {
69 		case 13:
70 			/*
71 			 * no prepended frame key any more
72 			 */
73 			wsi->ws->all_zero_nonce = 1;
74 			goto handle_first;
75 
76 		default:
77 			lwsl_warn("lws_ws_rx_sm: unknown spec version %d\n",
78 				  wsi->ws->ietf_spec_revision);
79 			break;
80 		}
81 		break;
82 	case LWS_RXPS_04_mask_1:
83 		wsi->ws->mask[1] = c;
84 		if (c)
85 			wsi->ws->all_zero_nonce = 0;
86 		wsi->lws_rx_parse_state = LWS_RXPS_04_mask_2;
87 		break;
88 	case LWS_RXPS_04_mask_2:
89 		wsi->ws->mask[2] = c;
90 		if (c)
91 			wsi->ws->all_zero_nonce = 0;
92 		wsi->lws_rx_parse_state = LWS_RXPS_04_mask_3;
93 		break;
94 	case LWS_RXPS_04_mask_3:
95 		wsi->ws->mask[3] = c;
96 		if (c)
97 			wsi->ws->all_zero_nonce = 0;
98 
99 		/*
100 		 * start from the zero'th byte in the XOR key buffer since
101 		 * this is the start of a frame with a new key
102 		 */
103 
104 		wsi->ws->mask_idx = 0;
105 
106 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_1;
107 		break;
108 
109 	/*
110 	 *  04 logical framing from the spec (all this is masked when incoming
111 	 *  and has to be unmasked)
112 	 *
113 	 * We ignore the possibility of extension data because we don't
114 	 * negotiate any extensions at the moment.
115 	 *
116 	 *    0                   1                   2                   3
117 	 *    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
118 	 *   +-+-+-+-+-------+-+-------------+-------------------------------+
119 	 *   |F|R|R|R| opcode|R| Payload len |    Extended payload length    |
120 	 *   |I|S|S|S|  (4)  |S|     (7)     |             (16/63)           |
121 	 *   |N|V|V|V|       |V|             |   (if payload len==126/127)   |
122 	 *   | |1|2|3|       |4|             |                               |
123 	 *   +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
124 	 *   |     Extended payload length continued, if payload len == 127  |
125 	 *   + - - - - - - - - - - - - - - - +-------------------------------+
126 	 *   |                               |         Extension data        |
127 	 *   +-------------------------------+ - - - - - - - - - - - - - - - +
128 	 *   :                                                               :
129 	 *   +---------------------------------------------------------------+
130 	 *   :                       Application data                        :
131 	 *   +---------------------------------------------------------------+
132 	 *
133 	 *  We pass payload through to userland as soon as we get it, ignoring
134 	 *  FIN.  It's up to userland to buffer it up if it wants to see a
135 	 *  whole unfragmented block of the original size (which may be up to
136 	 *  2^63 long!)
137 	 */
138 
139 	case LWS_RXPS_04_FRAME_HDR_1:
140 handle_first:
141 
142 		wsi->ws->opcode = c & 0xf;
143 		wsi->ws->rsv = c & 0x70;
144 		wsi->ws->final = !!((c >> 7) & 1);
145 		wsi->ws->defeat_check_utf8 = 0;
146 
147 		if (((wsi->ws->opcode) & 8) && !wsi->ws->final) {
148 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
149 					(uint8_t *)"frag ctl", 8);
150 			return -1;
151 		}
152 
153 		switch (wsi->ws->opcode) {
154 		case LWSWSOPC_TEXT_FRAME:
155 			wsi->ws->check_utf8 = lws_check_opt(
156 				wsi->a.context->options,
157 				LWS_SERVER_OPTION_VALIDATE_UTF8);
158 			/* fallthru */
159 		case LWSWSOPC_BINARY_FRAME:
160 			if (wsi->ws->opcode == LWSWSOPC_BINARY_FRAME)
161 				wsi->ws->check_utf8 = 0;
162 			if (wsi->ws->continuation_possible) {
163 				lws_close_reason(wsi,
164 					LWS_CLOSE_STATUS_PROTOCOL_ERR,
165 					(uint8_t *)"bad cont", 8);
166 				return -1;
167 			}
168 			wsi->ws->rsv_first_msg = (c & 0x70);
169 #if !defined(LWS_WITHOUT_EXTENSIONS)
170 			/*
171 			 *  set the expectation that we will have to
172 			 * fake up the zlib trailer to the inflator for this
173 			 * frame
174 			 */
175 			wsi->ws->pmd_trailer_application = !!(c & 0x40);
176 #endif
177 			wsi->ws->frame_is_binary =
178 			     wsi->ws->opcode == LWSWSOPC_BINARY_FRAME;
179 			wsi->ws->first_fragment = 1;
180 			wsi->ws->continuation_possible = !wsi->ws->final;
181 			break;
182 		case LWSWSOPC_CONTINUATION:
183 			if (!wsi->ws->continuation_possible) {
184 				lws_close_reason(wsi,
185 					LWS_CLOSE_STATUS_PROTOCOL_ERR,
186 					(uint8_t *)"bad cont", 8);
187 				return -1;
188 			}
189 			break;
190 		case LWSWSOPC_CLOSE:
191 			wsi->ws->check_utf8 = 0;
192 			wsi->ws->utf8 = 0;
193 			break;
194 		case 3:
195 		case 4:
196 		case 5:
197 		case 6:
198 		case 7:
199 		case 0xb:
200 		case 0xc:
201 		case 0xd:
202 		case 0xe:
203 		case 0xf:
204 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
205 					(uint8_t *)"bad opc", 7);
206 			lwsl_info("illegal opcode\n");
207 			return -1;
208 		}
209 
210 		if (wsi->ws->owed_a_fin &&
211 		    (wsi->ws->opcode == LWSWSOPC_TEXT_FRAME ||
212 		     wsi->ws->opcode == LWSWSOPC_BINARY_FRAME)) {
213 			lwsl_info("hey you owed us a FIN\n");
214 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
215 					(uint8_t *)"bad fin", 7);
216 			return -1;
217 		}
218 		if ((!(wsi->ws->opcode & 8)) && wsi->ws->final) {
219 			wsi->ws->continuation_possible = 0;
220 			wsi->ws->owed_a_fin = 0;
221 		}
222 
223 		if (!wsi->ws->final)
224 			wsi->ws->owed_a_fin = 1;
225 
226 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN;
227 		if (wsi->ws->rsv &&
228 		    (
229 #if !defined(LWS_WITHOUT_EXTENSIONS)
230 				    !wsi->ws->count_act_ext ||
231 #endif
232 				    (wsi->ws->rsv & ~0x40))) {
233 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
234 					 (uint8_t *)"rsv bits", 8);
235 			return -1;
236 		}
237 		break;
238 
239 	case LWS_RXPS_04_FRAME_HDR_LEN:
240 
241 		wsi->ws->this_frame_masked = !!(c & 0x80);
242 
243 		switch (c & 0x7f) {
244 		case 126:
245 			/* control frames are not allowed to have big lengths */
246 			if (wsi->ws->opcode & 8)
247 				goto illegal_ctl_length;
248 
249 			wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_2;
250 			break;
251 		case 127:
252 			/* control frames are not allowed to have big lengths */
253 			if (wsi->ws->opcode & 8)
254 				goto illegal_ctl_length;
255 
256 			wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_8;
257 			break;
258 		default:
259 			wsi->ws->rx_packet_length = c & 0x7f;
260 
261 
262 			if (wsi->ws->this_frame_masked)
263 				wsi->lws_rx_parse_state =
264 						LWS_RXPS_07_COLLECT_FRAME_KEY_1;
265 			else
266 				if (wsi->ws->rx_packet_length) {
267 					wsi->lws_rx_parse_state =
268 					LWS_RXPS_WS_FRAME_PAYLOAD;
269 				} else {
270 					wsi->lws_rx_parse_state = LWS_RXPS_NEW;
271 					goto spill;
272 				}
273 			break;
274 		}
275 		break;
276 
277 	case LWS_RXPS_04_FRAME_HDR_LEN16_2:
278 		wsi->ws->rx_packet_length = (size_t)(c << 8);
279 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_1;
280 		break;
281 
282 	case LWS_RXPS_04_FRAME_HDR_LEN16_1:
283 		wsi->ws->rx_packet_length |= c;
284 		if (wsi->ws->this_frame_masked)
285 			wsi->lws_rx_parse_state =
286 					LWS_RXPS_07_COLLECT_FRAME_KEY_1;
287 		else {
288 			wsi->lws_rx_parse_state =
289 				LWS_RXPS_WS_FRAME_PAYLOAD;
290 		}
291 		break;
292 
293 	case LWS_RXPS_04_FRAME_HDR_LEN64_8:
294 		if (c & 0x80) {
295 			lwsl_warn("b63 of length must be zero\n");
296 			/* kill the connection */
297 			return -1;
298 		}
299 #if defined __LP64__
300 		wsi->ws->rx_packet_length = ((size_t)c) << 56;
301 #else
302 		wsi->ws->rx_packet_length = 0;
303 #endif
304 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_7;
305 		break;
306 
307 	case LWS_RXPS_04_FRAME_HDR_LEN64_7:
308 #if defined __LP64__
309 		wsi->ws->rx_packet_length |= ((size_t)c) << 48;
310 #endif
311 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_6;
312 		break;
313 
314 	case LWS_RXPS_04_FRAME_HDR_LEN64_6:
315 #if defined __LP64__
316 		wsi->ws->rx_packet_length |= ((size_t)c) << 40;
317 #endif
318 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_5;
319 		break;
320 
321 	case LWS_RXPS_04_FRAME_HDR_LEN64_5:
322 #if defined __LP64__
323 		wsi->ws->rx_packet_length |= ((size_t)c) << 32;
324 #endif
325 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_4;
326 		break;
327 
328 	case LWS_RXPS_04_FRAME_HDR_LEN64_4:
329 		wsi->ws->rx_packet_length |= ((size_t)c) << 24;
330 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_3;
331 		break;
332 
333 	case LWS_RXPS_04_FRAME_HDR_LEN64_3:
334 		wsi->ws->rx_packet_length |= ((size_t)c) << 16;
335 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_2;
336 		break;
337 
338 	case LWS_RXPS_04_FRAME_HDR_LEN64_2:
339 		wsi->ws->rx_packet_length |= ((size_t)c) << 8;
340 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_1;
341 		break;
342 
343 	case LWS_RXPS_04_FRAME_HDR_LEN64_1:
344 		wsi->ws->rx_packet_length |= ((size_t)c);
345 		if (wsi->ws->this_frame_masked)
346 			wsi->lws_rx_parse_state =
347 					LWS_RXPS_07_COLLECT_FRAME_KEY_1;
348 		else
349 			wsi->lws_rx_parse_state = LWS_RXPS_WS_FRAME_PAYLOAD;
350 		break;
351 
352 	case LWS_RXPS_07_COLLECT_FRAME_KEY_1:
353 		wsi->ws->mask[0] = c;
354 		if (c)
355 			wsi->ws->all_zero_nonce = 0;
356 		wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_2;
357 		break;
358 
359 	case LWS_RXPS_07_COLLECT_FRAME_KEY_2:
360 		wsi->ws->mask[1] = c;
361 		if (c)
362 			wsi->ws->all_zero_nonce = 0;
363 		wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_3;
364 		break;
365 
366 	case LWS_RXPS_07_COLLECT_FRAME_KEY_3:
367 		wsi->ws->mask[2] = c;
368 		if (c)
369 			wsi->ws->all_zero_nonce = 0;
370 		wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_4;
371 		break;
372 
373 	case LWS_RXPS_07_COLLECT_FRAME_KEY_4:
374 		wsi->ws->mask[3] = c;
375 		if (c)
376 			wsi->ws->all_zero_nonce = 0;
377 		wsi->lws_rx_parse_state = LWS_RXPS_WS_FRAME_PAYLOAD;
378 		wsi->ws->mask_idx = 0;
379 		if (wsi->ws->rx_packet_length == 0) {
380 			wsi->lws_rx_parse_state = LWS_RXPS_NEW;
381 			goto spill;
382 		}
383 		break;
384 
385 
386 	case LWS_RXPS_WS_FRAME_PAYLOAD:
387 		assert(wsi->ws->rx_ubuf);
388 
389 		if (wsi->ws->rx_ubuf_head + LWS_PRE >= wsi->ws->rx_ubuf_alloc) {
390 			lwsl_err("Attempted overflow \n");
391 			return -1;
392 		}
393 		if (!(already_processed & ALREADY_PROCESSED_IGNORE_CHAR)) {
394 			if (wsi->ws->all_zero_nonce)
395 				wsi->ws->rx_ubuf[LWS_PRE +
396 				                 (wsi->ws->rx_ubuf_head++)] = c;
397 			else
398 				wsi->ws->rx_ubuf[LWS_PRE +
399 				                 (wsi->ws->rx_ubuf_head++)] =
400 				   c ^ wsi->ws->mask[(wsi->ws->mask_idx++) & 3];
401 
402 			--wsi->ws->rx_packet_length;
403 		}
404 
405 		if (!wsi->ws->rx_packet_length) {
406 			lwsl_debug("%s: ws fragment length exhausted\n",
407 				   __func__);
408 			/* spill because we have the whole frame */
409 			wsi->lws_rx_parse_state = LWS_RXPS_NEW;
410 			goto spill;
411 		}
412 #if !defined(LWS_WITHOUT_EXTENSIONS)
413 		if (wsi->ws->rx_draining_ext) {
414 			lwsl_debug("%s: UNTIL_EXHAUSTED draining\n", __func__);
415 			goto drain_extension;
416 		}
417 #endif
418 		/*
419 		 * if there's no protocol max frame size given, we are
420 		 * supposed to default to context->pt_serv_buf_size
421 		 */
422 		if (!wsi->a.protocol->rx_buffer_size &&
423 		    wsi->ws->rx_ubuf_head != wsi->a.context->pt_serv_buf_size)
424 			break;
425 
426 		if (wsi->a.protocol->rx_buffer_size &&
427 		    wsi->ws->rx_ubuf_head != wsi->a.protocol->rx_buffer_size)
428 			break;
429 
430 		/* spill because we filled our rx buffer */
431 spill:
432 		/*
433 		 * is this frame a control packet we should take care of at this
434 		 * layer?  If so service it and hide it from the user callback
435 		 */
436 
437 		lwsl_parser("spill on %s\n", wsi->a.protocol->name);
438 
439 		switch (wsi->ws->opcode) {
440 		case LWSWSOPC_CLOSE:
441 
442 			if (wsi->ws->peer_has_sent_close)
443 				break;
444 
445 			wsi->ws->peer_has_sent_close = 1;
446 
447 			pp = &wsi->ws->rx_ubuf[LWS_PRE];
448 			if (lws_check_opt(wsi->a.context->options,
449 					  LWS_SERVER_OPTION_VALIDATE_UTF8) &&
450 			    wsi->ws->rx_ubuf_head > 2 &&
451 			    lws_check_utf8(&wsi->ws->utf8, pp + 2,
452 					   wsi->ws->rx_ubuf_head - 2))
453 				goto utf8_fail;
454 
455 			/* is this an acknowledgment of our close? */
456 			if (lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK) {
457 				/*
458 				 * fine he has told us he is closing too, let's
459 				 * finish our close
460 				 */
461 				lwsl_parser("seen client close ack\n");
462 				return -1;
463 			}
464 			if (lwsi_state(wsi) == LRS_RETURNED_CLOSE)
465 				/* if he sends us 2 CLOSE, kill him */
466 				return -1;
467 
468 			if (lws_partial_buffered(wsi)) {
469 				/*
470 				 * if we're in the middle of something,
471 				 * we can't do a normal close response and
472 				 * have to just close our end.
473 				 */
474 				wsi->socket_is_permanently_unusable = 1;
475 				lwsl_parser("Closing on peer close "
476 					    "due to pending tx\n");
477 				return -1;
478 			}
479 
480 			if (wsi->ws->rx_ubuf_head >= 2) {
481 				close_code = (unsigned short)((pp[0] << 8) | pp[1]);
482 				if (close_code < 1000 ||
483 				    close_code == 1004 ||
484 				    close_code == 1005 ||
485 				    close_code == 1006 ||
486 				    close_code == 1012 ||
487 				    close_code == 1013 ||
488 				    close_code == 1014 ||
489 				    close_code == 1015 ||
490 				    (close_code >= 1016 && close_code < 3000)
491 				) {
492 					pp[0] = (LWS_CLOSE_STATUS_PROTOCOL_ERR >> 8) & 0xff;
493 					pp[1] = LWS_CLOSE_STATUS_PROTOCOL_ERR & 0xff;
494 				}
495 			}
496 
497 			if (user_callback_handle_rxflow(
498 					wsi->a.protocol->callback, wsi,
499 					LWS_CALLBACK_WS_PEER_INITIATED_CLOSE,
500 					wsi->user_space,
501 					&wsi->ws->rx_ubuf[LWS_PRE],
502 					wsi->ws->rx_ubuf_head))
503 				return -1;
504 
505 			lwsl_parser("server sees client close packet\n");
506 			lwsi_set_state(wsi, LRS_RETURNED_CLOSE);
507 			/* deal with the close packet contents as a PONG */
508 			wsi->ws->payload_is_close = 1;
509 			goto process_as_ping;
510 
511 		case LWSWSOPC_PING:
512 			lwsl_info("received %d byte ping, sending pong\n",
513 						 (int)wsi->ws->rx_ubuf_head);
514 
515 			if (wsi->ws->pong_pending_flag) {
516 				/*
517 				 * there is already a pending pong payload
518 				 * we should just log and drop
519 				 */
520 				lwsl_parser("DROP PING since one pending\n");
521 				goto ping_drop;
522 			}
523 process_as_ping:
524 			/* control packets can only be < 128 bytes long */
525 			if (wsi->ws->rx_ubuf_head > 128 - 3) {
526 				lwsl_parser("DROP PING payload too large\n");
527 				goto ping_drop;
528 			}
529 
530 			/* stash the pong payload */
531 			memcpy(wsi->ws->pong_payload_buf + LWS_PRE,
532 			       &wsi->ws->rx_ubuf[LWS_PRE],
533 				wsi->ws->rx_ubuf_head);
534 
535 			wsi->ws->pong_payload_len = (uint8_t)wsi->ws->rx_ubuf_head;
536 			wsi->ws->pong_pending_flag = 1;
537 
538 			/* get it sent as soon as possible */
539 			lws_callback_on_writable(wsi);
540 ping_drop:
541 			wsi->ws->rx_ubuf_head = 0;
542 			return 0;
543 
544 		case LWSWSOPC_PONG:
545 			lwsl_info("received pong\n");
546 			lwsl_hexdump(&wsi->ws->rx_ubuf[LWS_PRE],
547 			             wsi->ws->rx_ubuf_head);
548 
549 			lws_validity_confirmed(wsi);
550 
551 			/* issue it */
552 			callback_action = LWS_CALLBACK_RECEIVE_PONG;
553 			break;
554 
555 		case LWSWSOPC_TEXT_FRAME:
556 		case LWSWSOPC_BINARY_FRAME:
557 		case LWSWSOPC_CONTINUATION:
558 			break;
559 
560 		default:
561 			lwsl_parser("unknown opc %x\n", wsi->ws->opcode);
562 
563 			return -1;
564 		}
565 
566 		/*
567 		 * No it's real payload, pass it up to the user callback.
568 		 *
569 		 * We have been statefully collecting it in the
570 		 * LWS_RXPS_WS_FRAME_PAYLOAD clause above.
571 		 *
572 		 * It's nicely buffered with the pre-padding taken care of
573 		 * so it can be sent straight out again using lws_write.
574 		 *
575 		 * However, now we have a chunk of it, we want to deal with it
576 		 * all here.  Since this may be input to permessage-deflate and
577 		 * there are block limits on that for input and output, we may
578 		 * need to iterate.
579 		 */
580 
581 		pmdrx.eb_in.token = &wsi->ws->rx_ubuf[LWS_PRE];
582 		pmdrx.eb_in.len = (int)wsi->ws->rx_ubuf_head;
583 
584 		/* for the non-pm-deflate case */
585 
586 		pmdrx.eb_out = pmdrx.eb_in;
587 
588 		if (wsi->ws->opcode == LWSWSOPC_PONG && !pmdrx.eb_in.len)
589 			goto already_done;
590 #if !defined(LWS_WITHOUT_EXTENSIONS)
591 drain_extension:
592 #endif
593 
594 		do {
595 
596 //			lwsl_notice("%s: pmdrx.eb_in.len: %d\n", __func__,
597 //					(int)pmdrx.eb_in.len);
598 
599 			if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
600 			    lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK)
601 				goto already_done;
602 
603 			n = PMDR_DID_NOTHING;
604 
605 #if !defined(LWS_WITHOUT_EXTENSIONS)
606 			lin = pmdrx.eb_in.len;
607 			//if (lin)
608 			//	lwsl_hexdump_notice(ebuf.token, ebuf.len);
609 			lwsl_ext("%s: +++ passing %d %p to ext\n", __func__,
610 					pmdrx.eb_in.len, pmdrx.eb_in.token);
611 
612 			n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &pmdrx, 0);
613 			lwsl_debug("%s: ext says %d / ebuf.len %d\n", __func__,
614 				   n, pmdrx.eb_out.len);
615 			if (wsi->ws->rx_draining_ext)
616 				already_processed &= (char)~ALREADY_PROCESSED_NO_CB;
617 #endif
618 
619 			/*
620 			 * ebuf may be pointing somewhere completely different
621 			 * now, it's the output
622 			 */
623 #if !defined(LWS_WITHOUT_EXTENSIONS)
624 			if (n < 0) {
625 				/*
626 				 * we may rely on this to get RX, just drop
627 				 * connection
628 				 */
629 				wsi->socket_is_permanently_unusable = 1;
630 				return -1;
631 			}
632 			if (n == PMDR_DID_NOTHING)
633 				/* ie, not PMDR_NOTHING_WE_SHOULD_DO */
634 				break;
635 #endif
636 			lwsl_debug("%s: post ext ret %d, ebuf in %d / out %d\n",
637 				    __func__, n, pmdrx.eb_in.len,
638 				    pmdrx.eb_out.len);
639 
640 #if !defined(LWS_WITHOUT_EXTENSIONS)
641 			if (rx_draining_ext && !pmdrx.eb_out.len) {
642 				lwsl_debug("   --- ending drain on 0 read\n");
643 				goto already_done;
644 			}
645 
646 			if (n == PMDR_HAS_PENDING)
647 				/*
648 				 * extension had more...
649 				 * main loop will come back
650 				 */
651 				lws_add_wsi_to_draining_ext_list(wsi);
652 			else
653 				lws_remove_wsi_from_draining_ext_list(wsi);
654 
655 			rx_draining_ext = wsi->ws->rx_draining_ext;
656 #endif
657 
658 			if (pmdrx.eb_out.len &&
659 			    wsi->ws->check_utf8 && !wsi->ws->defeat_check_utf8) {
660 				if (lws_check_utf8(&wsi->ws->utf8,
661 						   pmdrx.eb_out.token,
662 						   (size_t)pmdrx.eb_out.len)) {
663 					lws_close_reason(wsi,
664 						LWS_CLOSE_STATUS_INVALID_PAYLOAD,
665 						(uint8_t *)"bad utf8", 8);
666 					goto utf8_fail;
667 				}
668 
669 				/* we are ending partway through utf-8 character? */
670 				if (!wsi->ws->rx_packet_length &&
671 				    wsi->ws->final && wsi->ws->utf8
672 #if !defined(LWS_WITHOUT_EXTENSIONS)
673 				    /* if ext not negotiated, going to be UNKNOWN */
674 				    && (n == PMDR_EMPTY_FINAL || n == PMDR_UNKNOWN)
675 #endif
676 				) {
677 					lwsl_info("FINAL utf8 error\n");
678 					lws_close_reason(wsi,
679 						LWS_CLOSE_STATUS_INVALID_PAYLOAD,
680 						(uint8_t *)"partial utf8", 12);
681 utf8_fail:
682 					lwsl_notice("utf8 error\n");
683 					lwsl_hexdump_notice(pmdrx.eb_out.token,
684 							    (size_t)pmdrx.eb_out.len);
685 
686 					return -1;
687 				}
688 			}
689 
690 			/* if pmd not enabled, in == out */
691 
692 			if (n == PMDR_DID_NOTHING
693 #if !defined(LWS_WITHOUT_EXTENSIONS)
694 					||
695 			    n == PMDR_NOTHING_WE_SHOULD_DO ||
696 			    n == PMDR_UNKNOWN
697 #endif
698 			    )
699 				pmdrx.eb_in.len -= pmdrx.eb_out.len;
700 
701 	if (!wsi->wsistate_pre_close &&
702 			    (pmdrx.eb_out.len >= 0 ||
703 			     callback_action == LWS_CALLBACK_RECEIVE_PONG ||
704 						       n == PMDR_EMPTY_FINAL)) {
705 				if (pmdrx.eb_out.len)
706 					pmdrx.eb_out.token[pmdrx.eb_out.len] = '\0';
707 
708 				if (wsi->a.protocol->callback &&
709 				    !(already_processed & ALREADY_PROCESSED_NO_CB)) {
710 					if (callback_action ==
711 						      LWS_CALLBACK_RECEIVE_PONG)
712 						lwsl_info("Doing pong callback\n");
713 
714 					ret = user_callback_handle_rxflow(
715 						wsi->a.protocol->callback, wsi,
716 						(enum lws_callback_reasons)
717 							     callback_action,
718 						wsi->user_space,
719 						pmdrx.eb_out.token,
720 						(size_t)pmdrx.eb_out.len);
721 				}
722 				wsi->ws->first_fragment = 0;
723 			}
724 
725 #if !defined(LWS_WITHOUT_EXTENSIONS)
726 			if (!lin)
727 				break;
728 #endif
729 
730 		} while (pmdrx.eb_in.len
731 #if !defined(LWS_WITHOUT_EXTENSIONS)
732 				|| rx_draining_ext
733 #endif
734 		);
735 
736 already_done:
737 		wsi->ws->rx_ubuf_head = 0;
738 		break;
739 	}
740 
741 	return ret;
742 
743 illegal_ctl_length:
744 
745 	lwsl_warn("Control frame with xtended length is illegal\n");
746 	/* kill the connection */
747 	return -1;
748 }
749 
750 
751 size_t
lws_remaining_packet_payload(struct lws * wsi)752 lws_remaining_packet_payload(struct lws *wsi)
753 {
754 	return wsi->ws->rx_packet_length;
755 }
756 
lws_frame_is_binary(struct lws * wsi)757 int lws_frame_is_binary(struct lws *wsi)
758 {
759 	return wsi->ws->frame_is_binary;
760 }
761 
762 void
lws_add_wsi_to_draining_ext_list(struct lws * wsi)763 lws_add_wsi_to_draining_ext_list(struct lws *wsi)
764 {
765 #if !defined(LWS_WITHOUT_EXTENSIONS)
766 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
767 
768 	if (wsi->ws->rx_draining_ext)
769 		return;
770 
771 	lwsl_debug("%s: RX EXT DRAINING: Adding to list\n", __func__);
772 
773 	wsi->ws->rx_draining_ext = 1;
774 	wsi->ws->rx_draining_ext_list = pt->ws.rx_draining_ext_list;
775 	pt->ws.rx_draining_ext_list = wsi;
776 #endif
777 }
778 
779 void
lws_remove_wsi_from_draining_ext_list(struct lws * wsi)780 lws_remove_wsi_from_draining_ext_list(struct lws *wsi)
781 {
782 #if !defined(LWS_WITHOUT_EXTENSIONS)
783 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
784 	struct lws **w = &pt->ws.rx_draining_ext_list;
785 
786 	if (!wsi->ws->rx_draining_ext)
787 		return;
788 
789 	lwsl_debug("%s: RX EXT DRAINING: Removing from list\n", __func__);
790 
791 	wsi->ws->rx_draining_ext = 0;
792 
793 	/* remove us from context draining ext list */
794 	while (*w) {
795 		if (*w == wsi) {
796 			/* if us, point it instead to who we were pointing to */
797 			*w = wsi->ws->rx_draining_ext_list;
798 			break;
799 		}
800 		w = &((*w)->ws->rx_draining_ext_list);
801 	}
802 	wsi->ws->rx_draining_ext_list = NULL;
803 #endif
804 }
805 
806 static int
lws_0405_frame_mask_generate(struct lws * wsi)807 lws_0405_frame_mask_generate(struct lws *wsi)
808 {
809 	size_t n;
810 	/* fetch the per-frame nonce */
811 
812 	n = lws_get_random(lws_get_context(wsi), wsi->ws->mask, 4);
813 	if (n != 4) {
814 		lwsl_parser("Unable to read from random device %s %d\n",
815 			    SYSTEM_RANDOM_FILEPATH, (int)n);
816 		return 1;
817 	}
818 
819 	/* start masking from first byte of masking key buffer */
820 	wsi->ws->mask_idx = 0;
821 
822 	return 0;
823 }
824 
825 int
lws_server_init_wsi_for_ws(struct lws * wsi)826 lws_server_init_wsi_for_ws(struct lws *wsi)
827 {
828 	int n;
829 
830 	lwsi_set_state(wsi, LRS_ESTABLISHED);
831 
832 	/*
833 	 * create the frame buffer for this connection according to the
834 	 * size mentioned in the protocol definition.  If 0 there, use
835 	 * a big default for compatibility
836 	 */
837 
838 	n = (int)wsi->a.protocol->rx_buffer_size;
839 	if (!n)
840 		n = (int)wsi->a.context->pt_serv_buf_size;
841 	n += LWS_PRE;
842 	wsi->ws->rx_ubuf = lws_malloc((unsigned int)n + 4 /* 0x0000ffff zlib */, "rx_ubuf");
843 	if (!wsi->ws->rx_ubuf) {
844 		lwsl_err("Out of Mem allocating rx buffer %d\n", n);
845 		return 1;
846 	}
847 	wsi->ws->rx_ubuf_alloc = (uint32_t)n;
848 
849 	/* notify user code that we're ready to roll */
850 
851 	if (wsi->a.protocol->callback)
852 		if (wsi->a.protocol->callback(wsi, LWS_CALLBACK_ESTABLISHED,
853 					    wsi->user_space,
854 #ifdef LWS_WITH_TLS
855 					    wsi->tls.ssl,
856 #else
857 					    NULL,
858 #endif
859 					    wsi->h2_stream_carries_ws))
860 			return 1;
861 
862 	lws_validity_confirmed(wsi);
863 	lwsl_debug("ws established\n");
864 
865 	return 0;
866 }
867 
868 
869 
870 int
lws_is_final_fragment(struct lws * wsi)871 lws_is_final_fragment(struct lws *wsi)
872 {
873 #if !defined(LWS_WITHOUT_EXTENSIONS)
874 	lwsl_debug("%s: final %d, rx pk length %ld, draining %ld\n", __func__,
875 		   wsi->ws->final, (long)wsi->ws->rx_packet_length,
876 		   (long)wsi->ws->rx_draining_ext);
877 	return wsi->ws->final && !wsi->ws->rx_packet_length &&
878 	       !wsi->ws->rx_draining_ext;
879 #else
880 	return wsi->ws->final && !wsi->ws->rx_packet_length;
881 #endif
882 }
883 
884 int
lws_is_first_fragment(struct lws * wsi)885 lws_is_first_fragment(struct lws *wsi)
886 {
887 	return wsi->ws->first_fragment;
888 }
889 
890 unsigned char
lws_get_reserved_bits(struct lws * wsi)891 lws_get_reserved_bits(struct lws *wsi)
892 {
893 	return wsi->ws->rsv;
894 }
895 
896 int
lws_get_close_length(struct lws * wsi)897 lws_get_close_length(struct lws *wsi)
898 {
899 	return wsi->ws->close_in_ping_buffer_len;
900 }
901 
902 unsigned char *
lws_get_close_payload(struct lws * wsi)903 lws_get_close_payload(struct lws *wsi)
904 {
905 	return &wsi->ws->ping_payload_buf[LWS_PRE];
906 }
907 
908 void
lws_close_reason(struct lws * wsi,enum lws_close_status status,unsigned char * buf,size_t len)909 lws_close_reason(struct lws *wsi, enum lws_close_status status,
910 		 unsigned char *buf, size_t len)
911 {
912 	unsigned char *p, *start;
913 	int budget = sizeof(wsi->ws->ping_payload_buf) - LWS_PRE;
914 
915 	assert(lwsi_role_ws(wsi));
916 
917 	start = p = &wsi->ws->ping_payload_buf[LWS_PRE];
918 
919 	*p++ = (uint8_t)((((int)status) >> 8) & 0xff);
920 	*p++ = (uint8_t)(((int)status) & 0xff);
921 
922 	if (buf)
923 		while (len-- && p < start + budget)
924 			*p++ = *buf++;
925 
926 	wsi->ws->close_in_ping_buffer_len = (uint8_t)lws_ptr_diff(p, start);
927 }
928 
929 static int
lws_is_ws_with_ext(struct lws * wsi)930 lws_is_ws_with_ext(struct lws *wsi)
931 {
932 #if defined(LWS_WITHOUT_EXTENSIONS)
933 	return 0;
934 #else
935 	return lwsi_role_ws(wsi) && !!wsi->ws->count_act_ext;
936 #endif
937 }
938 
939 static int
rops_handle_POLLIN_ws(struct lws_context_per_thread * pt,struct lws * wsi,struct lws_pollfd * pollfd)940 rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi,
941 		       struct lws_pollfd *pollfd)
942 {
943 	unsigned int pending = 0;
944 	struct lws_tokens ebuf;
945 	char buffered = 0;
946 	int n = 0, m, sanity = 10;
947 #if defined(LWS_WITH_HTTP2)
948 	struct lws *wsi1;
949 #endif
950 
951 	if (!wsi->ws) {
952 		lwsl_err("ws role wsi with no ws\n");
953 		return LWS_HPI_RET_PLEASE_CLOSE_ME;
954 	}
955 
956 	// lwsl_notice("%s: %s\n", __func__, wsi->a.protocol->name);
957 
958 	//lwsl_info("%s: wsistate 0x%x, pollout %d\n", __func__,
959 	//	   wsi->wsistate, pollfd->revents & LWS_POLLOUT);
960 
961 	/*
962 	 * something went wrong with parsing the handshake, and
963 	 * we ended up back in the event loop without completing it
964 	 */
965 	if (lwsi_state(wsi) == LRS_PRE_WS_SERVING_ACCEPT) {
966 		wsi->socket_is_permanently_unusable = 1;
967 		return LWS_HPI_RET_PLEASE_CLOSE_ME;
968 	}
969 
970 	ebuf.token = NULL;
971 	ebuf.len = 0;
972 
973 	if (lwsi_state(wsi) == LRS_WAITING_CONNECT) {
974 #if defined(LWS_WITH_CLIENT)
975 		if ((pollfd->revents & LWS_POLLOUT) &&
976 		    lws_handle_POLLOUT_event(wsi, pollfd)) {
977 			lwsl_debug("POLLOUT event closed it\n");
978 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
979 		}
980 
981 		n = lws_http_client_socket_service(wsi, pollfd);
982 		if (n)
983 			return LWS_HPI_RET_WSI_ALREADY_DIED;
984 #endif
985 		return LWS_HPI_RET_HANDLED;
986 	}
987 
988 	/* 1: something requested a callback when it was OK to write */
989 
990 	if ((pollfd->revents & LWS_POLLOUT) &&
991 	    lwsi_state_can_handle_POLLOUT(wsi) &&
992 	    lws_handle_POLLOUT_event(wsi, pollfd)) {
993 		if (lwsi_state(wsi) == LRS_RETURNED_CLOSE)
994 			lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE);
995 
996 		return LWS_HPI_RET_PLEASE_CLOSE_ME;
997 	}
998 
999 	if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
1000 	    lwsi_state(wsi) == LRS_WAITING_TO_SEND_CLOSE) {
1001 		/*
1002 		 * we stopped caring about anything except control
1003 		 * packets.  Force flow control off, defeat tx
1004 		 * draining.
1005 		 */
1006 		lws_rx_flow_control(wsi, 1);
1007 #if !defined(LWS_WITHOUT_EXTENSIONS)
1008 		if (wsi->ws)
1009 			wsi->ws->tx_draining_ext = 0;
1010 #endif
1011 	}
1012 #if !defined(LWS_WITHOUT_EXTENSIONS)
1013 	if (wsi->ws->tx_draining_ext) {
1014 		lws_handle_POLLOUT_event(wsi, pollfd);
1015 		//lwsl_notice("%s: tx drain\n", __func__);
1016 		/*
1017 		 * We cannot deal with new RX until the TX ext path has
1018 		 * been drained.  It's because new rx will, eg, crap on
1019 		 * the wsi rx buf that may be needed to retain state.
1020 		 *
1021 		 * TX ext drain path MUST go through event loop to avoid
1022 		 * blocking.
1023 		 */
1024 		lws_callback_on_writable(wsi);
1025 		return LWS_HPI_RET_HANDLED;
1026 	}
1027 #endif
1028 	if ((pollfd->revents & LWS_POLLIN) && lws_is_flowcontrolled(wsi)) {
1029 		/* We cannot deal with any kind of new RX because we are
1030 		 * RX-flowcontrolled.
1031 		 */
1032 		lwsl_info("%s: flowcontrolled, ignoring rx\n", __func__);
1033 
1034 		if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
1035 			return -1;
1036 
1037 		return LWS_HPI_RET_HANDLED;
1038 	}
1039 
1040 	if (lws_is_flowcontrolled(wsi))
1041 		return LWS_HPI_RET_HANDLED;
1042 
1043 #if defined(LWS_WITH_HTTP2)
1044 	if (wsi->mux_substream || wsi->upgraded_to_http2) {
1045 		wsi1 = lws_get_network_wsi(wsi);
1046 		if (wsi1 && lws_has_buffered_out(wsi1))
1047 			/* We cannot deal with any kind of new RX
1048 			 * because we are dealing with a partial send
1049 			 * (new RX may trigger new http_action() that
1050 			 * expect to be able to send)
1051 			 */
1052 			return LWS_HPI_RET_HANDLED;
1053 	}
1054 #endif
1055 
1056 #if !defined(LWS_WITHOUT_EXTENSIONS)
1057 	/* 2: RX Extension needs to be drained
1058 	 */
1059 
1060 	if (wsi->ws->rx_draining_ext) {
1061 
1062 		lwsl_debug("%s: RX EXT DRAINING: Service\n", __func__);
1063 #if defined(LWS_WITH_CLIENT)
1064 		if (lwsi_role_client(wsi)) {
1065 			n = lws_ws_client_rx_sm(wsi, 0);
1066 			if (n < 0)
1067 				/* we closed wsi */
1068 				return LWS_HPI_RET_PLEASE_CLOSE_ME;
1069 		} else
1070 #endif
1071 			n = lws_ws_rx_sm(wsi, ALREADY_PROCESSED_IGNORE_CHAR, 0);
1072 
1073 		return LWS_HPI_RET_HANDLED;
1074 	}
1075 
1076 	if (wsi->ws->rx_draining_ext)
1077 		/*
1078 		 * We have RX EXT content to drain, but can't do it
1079 		 * right now.  That means we cannot do anything lower
1080 		 * priority either.
1081 		 */
1082 		return LWS_HPI_RET_HANDLED;
1083 #endif
1084 
1085 	/* 3: buflist needs to be drained
1086 	 */
1087 read:
1088 	//lws_buflist_describe(&wsi->buflist, wsi, __func__);
1089 	ebuf.len = (int)lws_buflist_next_segment_len(&wsi->buflist,
1090 						     &ebuf.token);
1091 	if (ebuf.len) {
1092 		lwsl_info("draining buflist (len %d)\n", ebuf.len);
1093 		buffered = 1;
1094 		goto drain;
1095 	}
1096 
1097 	if (!(pollfd->revents & pollfd->events & LWS_POLLIN) && !wsi->http.ah)
1098 		return LWS_HPI_RET_HANDLED;
1099 
1100 	if (lws_is_flowcontrolled(wsi)) {
1101 		lwsl_info("%s: %p should be rxflow (bm 0x%x)..\n",
1102 			    __func__, wsi, wsi->rxflow_bitmap);
1103 		return LWS_HPI_RET_HANDLED;
1104 	}
1105 
1106 	if (!(lwsi_role_client(wsi) &&
1107 	      (lwsi_state(wsi) != LRS_ESTABLISHED &&
1108 	       lwsi_state(wsi) != LRS_AWAITING_CLOSE_ACK &&
1109 	       lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) {
1110 		/*
1111 		 * In case we are going to react to this rx by scheduling
1112 		 * writes, we need to restrict the amount of rx to the size
1113 		 * the protocol reported for rx buffer.
1114 		 *
1115 		 * Otherwise we get a situation we have to absorb possibly a
1116 		 * lot of reads before we get a chance to drain them by writing
1117 		 * them, eg, with echo type tests in autobahn.
1118 		 */
1119 
1120 		buffered = 0;
1121 		ebuf.token = pt->serv_buf;
1122 		if (lwsi_role_ws(wsi))
1123 			ebuf.len = (int)wsi->ws->rx_ubuf_alloc;
1124 		else
1125 			ebuf.len = (int)wsi->a.context->pt_serv_buf_size;
1126 
1127 		if ((unsigned int)ebuf.len > wsi->a.context->pt_serv_buf_size)
1128 			ebuf.len = (int)wsi->a.context->pt_serv_buf_size;
1129 
1130 		if ((int)pending > ebuf.len)
1131 			pending = (unsigned int)ebuf.len;
1132 
1133 		ebuf.len = lws_ssl_capable_read(wsi, ebuf.token,
1134 						(size_t)(pending ? pending :
1135 						(unsigned int)ebuf.len));
1136 		switch (ebuf.len) {
1137 		case 0:
1138 			lwsl_info("%s: zero length read\n",
1139 				  __func__);
1140 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
1141 		case LWS_SSL_CAPABLE_MORE_SERVICE:
1142 			lwsl_info("SSL Capable more service\n");
1143 			return LWS_HPI_RET_HANDLED;
1144 		case LWS_SSL_CAPABLE_ERROR:
1145 			lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n",
1146 					__func__);
1147 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
1148 		}
1149 
1150 		/*
1151 		 * coverity thinks ssl_capable_read() may read over
1152 		 * 2GB.  Dissuade it...
1153 		 */
1154 		ebuf.len &= 0x7fffffff;
1155 	}
1156 
1157 drain:
1158 
1159 	/*
1160 	 * give any active extensions a chance to munge the buffer
1161 	 * before parse.  We pass in a pointer to an lws_tokens struct
1162 	 * prepared with the default buffer and content length that's in
1163 	 * there.  Rather than rewrite the default buffer, extensions
1164 	 * that expect to grow the buffer can adapt .token to
1165 	 * point to their own per-connection buffer in the extension
1166 	 * user allocation.  By default with no extensions or no
1167 	 * extension callback handling, just the normal input buffer is
1168 	 * used then so it is efficient.
1169 	 */
1170 	m = 0;
1171 	do {
1172 
1173 		/* service incoming data */
1174 		//lws_buflist_describe(&wsi->buflist, wsi, __func__);
1175 		if (ebuf.len > 0) {
1176 #if defined(LWS_ROLE_H2)
1177 			if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY &&
1178 			    lwsi_state(wsi) != LRS_DISCARD_BODY)
1179 				n = lws_read_h2(wsi, ebuf.token,
1180 					     (unsigned int)ebuf.len);
1181 			else
1182 #endif
1183 				n = lws_read_h1(wsi, ebuf.token,
1184 					     (unsigned int)ebuf.len);
1185 
1186 			if (n < 0) {
1187 				/* we closed wsi */
1188 				return LWS_HPI_RET_WSI_ALREADY_DIED;
1189 			}
1190 			//lws_buflist_describe(&wsi->buflist, wsi, __func__);
1191 			//lwsl_notice("%s: consuming %d / %d\n", __func__, n, ebuf.len);
1192 			if (ebuf.len < 0 ||
1193 			    lws_buflist_aware_finished_consuming(wsi, &ebuf, n,
1194 							buffered, __func__))
1195 				return LWS_HPI_RET_PLEASE_CLOSE_ME;
1196 		}
1197 
1198 		ebuf.token = NULL;
1199 		ebuf.len = 0;
1200 	} while (m);
1201 
1202 	if (wsi->http.ah
1203 #if defined(LWS_WITH_CLIENT)
1204 			&& !wsi->client_h2_alpn
1205 #endif
1206 			) {
1207 		lwsl_info("%s: %p: detaching ah\n", __func__, wsi);
1208 		lws_header_table_detach(wsi, 0);
1209 	}
1210 
1211 	pending = (unsigned int)lws_ssl_pending(wsi);
1212 
1213 #if defined(LWS_WITH_CLIENT)
1214 	if (!pending && (wsi->flags & LCCSCF_PRIORITIZE_READS) &&
1215 	    lws_buflist_total_len(&wsi->buflist))
1216 		pending = 9999999;
1217 #endif
1218 
1219 	if (pending) {
1220 		if (lws_is_ws_with_ext(wsi))
1221 			pending = pending > wsi->ws->rx_ubuf_alloc ?
1222 				wsi->ws->rx_ubuf_alloc : pending;
1223 		else
1224 			pending = pending > wsi->a.context->pt_serv_buf_size ?
1225 				wsi->a.context->pt_serv_buf_size : pending;
1226 		if (--sanity)
1227 			goto read;
1228 		else
1229 			/*
1230 			 * Something has gone wrong, we are spinning...
1231 			 * let's bail on this connection
1232 			 */
1233 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
1234 	}
1235 
1236 	if (buffered && /* were draining, now nothing left */
1237 	    !lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
1238 		lwsl_info("%s: %p flow buf: drained\n", __func__, wsi);
1239 		/* having drained the rxflow buffer, can rearm POLLIN */
1240 #if !defined(LWS_WITH_SERVER)
1241 		n =
1242 #endif
1243 		__lws_rx_flow_control(wsi);
1244 		/* n ignored, needed for NO_SERVER case */
1245 	}
1246 
1247 	/* n = 0 */
1248 	return LWS_HPI_RET_HANDLED;
1249 }
1250 
1251 
rops_handle_POLLOUT_ws(struct lws * wsi)1252 int rops_handle_POLLOUT_ws(struct lws *wsi)
1253 {
1254 	int write_type = LWS_WRITE_PONG;
1255 #if !defined(LWS_WITHOUT_EXTENSIONS)
1256 	struct lws_ext_pm_deflate_rx_ebufs pmdrx;
1257 	int ret, m;
1258 #endif
1259 	int n;
1260 
1261 #if !defined(LWS_WITHOUT_EXTENSIONS)
1262 	lwsl_debug("%s: %s: wsi->ws->tx_draining_ext %d\n", __func__,
1263 			wsi->a.protocol->name, wsi->ws->tx_draining_ext);
1264 #endif
1265 
1266 	/* Priority 3: pending control packets (pong or close)
1267 	 *
1268 	 * 3a: close notification packet requested from close api
1269 	 */
1270 
1271 	if (lwsi_state(wsi) == LRS_WAITING_TO_SEND_CLOSE) {
1272 		lwsl_debug("sending close packet\n");
1273 		lwsl_hexdump_debug(&wsi->ws->ping_payload_buf[LWS_PRE],
1274 				   wsi->ws->close_in_ping_buffer_len);
1275 		wsi->waiting_to_send_close_frame = 0;
1276 		n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE],
1277 			      wsi->ws->close_in_ping_buffer_len,
1278 			      LWS_WRITE_CLOSE);
1279 		if (n >= 0) {
1280 			if (wsi->close_needs_ack) {
1281 				lwsi_set_state(wsi, LRS_AWAITING_CLOSE_ACK);
1282 				lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK,
1283 						5);
1284 				lwsl_debug("sent close, await ack\n");
1285 
1286 				return LWS_HP_RET_BAIL_OK;
1287 			}
1288 			wsi->close_needs_ack = 0;
1289 			lwsi_set_state(wsi, LRS_RETURNED_CLOSE);
1290 		}
1291 
1292 		return LWS_HP_RET_BAIL_DIE;
1293 	}
1294 
1295 	/* else, the send failed and we should just hang up */
1296 
1297 	if ((lwsi_role_ws(wsi) && wsi->ws->pong_pending_flag) ||
1298 	    (lwsi_state(wsi) == LRS_RETURNED_CLOSE &&
1299 	     wsi->ws->payload_is_close)) {
1300 
1301 		if (wsi->ws->payload_is_close)
1302 			write_type = LWS_WRITE_CLOSE;
1303 		else {
1304 			if (wsi->wsistate_pre_close) {
1305 				/* we started close flow, forget pong */
1306 				wsi->ws->pong_pending_flag = 0;
1307 				return LWS_HP_RET_BAIL_OK;
1308 			}
1309 			lwsl_info("issuing pong %d on %s\n",
1310 				  wsi->ws->pong_payload_len, lws_wsi_tag(wsi));
1311 		}
1312 
1313 		n = lws_write(wsi, &wsi->ws->pong_payload_buf[LWS_PRE],
1314 			      wsi->ws->pong_payload_len, (enum lws_write_protocol)write_type);
1315 		if (n < 0)
1316 			return LWS_HP_RET_BAIL_DIE;
1317 
1318 		/* well he is sent, mark him done */
1319 		wsi->ws->pong_pending_flag = 0;
1320 		if (wsi->ws->payload_is_close) {
1321 			// assert(0);
1322 			/* oh... a close frame was it... then we are done */
1323 			return LWS_HP_RET_BAIL_DIE;
1324 		}
1325 
1326 		/* otherwise for PING, leave POLLOUT active either way */
1327 		return LWS_HP_RET_BAIL_OK;
1328 	}
1329 
1330 	if (!wsi->socket_is_permanently_unusable &&
1331 	    wsi->ws->send_check_ping) {
1332 
1333 		lwsl_info("%s: issuing ping on wsi %s: %s %s h2: %d\n", __func__,
1334 				lws_wsi_tag(wsi),
1335 				wsi->role_ops->name, wsi->a.protocol->name,
1336 				wsi->mux_substream);
1337 		wsi->ws->send_check_ping = 0;
1338 		n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE],
1339 			      0, LWS_WRITE_PING);
1340 		if (n < 0)
1341 			return LWS_HP_RET_BAIL_DIE;
1342 
1343 		return LWS_HP_RET_BAIL_OK;
1344 	}
1345 
1346 	/* Priority 4: if we are closing, not allowed to send more data frags
1347 	 *	       which means user callback or tx ext flush banned now
1348 	 */
1349 	if (lwsi_state(wsi) == LRS_RETURNED_CLOSE)
1350 		return LWS_HP_RET_USER_SERVICE;
1351 
1352 #if !defined(LWS_WITHOUT_EXTENSIONS)
1353 	/* Priority 5: Tx path extension with more to send
1354 	 *
1355 	 *	       These are handled as new fragments each time around
1356 	 *	       So while we must block new writeable callback to enforce
1357 	 *	       payload ordering, but since they are always complete
1358 	 *	       fragments control packets can interleave OK.
1359 	 */
1360 	if (wsi->ws->tx_draining_ext) {
1361 		lwsl_ext("SERVICING TX EXT DRAINING\n");
1362 		if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
1363 			return LWS_HP_RET_BAIL_DIE;
1364 		/* leave POLLOUT active */
1365 		return LWS_HP_RET_BAIL_OK;
1366 	}
1367 
1368 	/* Priority 6: extensions
1369 	 */
1370 	if (!wsi->ws->extension_data_pending && !wsi->ws->tx_draining_ext) {
1371 		lwsl_ext("%s: !wsi->ws->extension_data_pending\n", __func__);
1372 		return LWS_HP_RET_USER_SERVICE;
1373 	}
1374 
1375 	/*
1376 	 * Check in on the active extensions, see if they had pending stuff to
1377 	 * spill... they need to get the first look-in otherwise sequence will
1378 	 * be disordered.
1379 	 *
1380 	 * coming here with a NULL, zero-length ebuf means just spill pending
1381 	 */
1382 
1383 	ret = 1;
1384 	if (wsi->role_ops == &role_ops_raw_skt
1385 #if defined(LWS_ROLE_RAW_FILE)
1386 		|| wsi->role_ops == &role_ops_raw_file
1387 #endif
1388 	    )
1389 		ret = 0;
1390 
1391 	while (ret == 1) {
1392 
1393 		/* default to nobody has more to spill */
1394 
1395 		ret = 0;
1396 		pmdrx.eb_in.token = NULL;
1397 		pmdrx.eb_in.len = 0;
1398 
1399 		/* give every extension a chance to spill */
1400 
1401 		m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_TX_PRESEND,
1402 				      &pmdrx, 0);
1403 		if (m < 0) {
1404 			lwsl_err("ext reports fatal error\n");
1405 			return LWS_HP_RET_BAIL_DIE;
1406 		}
1407 		if (m)
1408 			/*
1409 			 * at least one extension told us he has more
1410 			 * to spill, so we will go around again after
1411 			 */
1412 			ret = 1;
1413 
1414 		/* assuming they gave us something to send, send it */
1415 
1416 		if (pmdrx.eb_in.len) {
1417 			n = lws_issue_raw(wsi, (unsigned char *)pmdrx.eb_in.token,
1418 					(unsigned int)pmdrx.eb_in.len);
1419 			if (n < 0) {
1420 				lwsl_info("closing from POLLOUT spill\n");
1421 				return LWS_HP_RET_BAIL_DIE;
1422 			}
1423 			/*
1424 			 * Keep amount spilled small to minimize chance of this
1425 			 */
1426 			if (n != pmdrx.eb_in.len) {
1427 				lwsl_err("Unable to spill ext %d vs %d\n",
1428 						pmdrx.eb_in.len, n);
1429 				return LWS_HP_RET_BAIL_DIE;
1430 			}
1431 		} else
1432 			continue;
1433 
1434 		/* no extension has more to spill */
1435 
1436 		if (!ret)
1437 			continue;
1438 
1439 		/*
1440 		 * There's more to spill from an extension, but we just sent
1441 		 * something... did that leave the pipe choked?
1442 		 */
1443 
1444 		if (!lws_send_pipe_choked(wsi))
1445 			/* no we could add more */
1446 			continue;
1447 
1448 		lwsl_info("choked in POLLOUT service\n");
1449 
1450 		/*
1451 		 * Yes, he's choked.  Leave the POLLOUT masked on so we will
1452 		 * come back here when he is unchoked.  Don't call the user
1453 		 * callback to enforce ordering of spilling, he'll get called
1454 		 * when we come back here and there's nothing more to spill.
1455 		 */
1456 
1457 		return LWS_HP_RET_BAIL_OK;
1458 	}
1459 
1460 	wsi->ws->extension_data_pending = 0;
1461 #endif
1462 
1463 	return LWS_HP_RET_USER_SERVICE;
1464 }
1465 
1466 static int
rops_service_flag_pending_ws(struct lws_context * context,int tsi)1467 rops_service_flag_pending_ws(struct lws_context *context, int tsi)
1468 {
1469 #if !defined(LWS_WITHOUT_EXTENSIONS)
1470 	struct lws_context_per_thread *pt = &context->pt[tsi];
1471 	struct lws *wsi;
1472 	int forced = 0;
1473 
1474 	/* POLLIN faking (the pt lock is taken by the parent) */
1475 
1476 	/*
1477 	 * 1) For all guys with already-available ext data to drain, if they are
1478 	 * not flowcontrolled, fake their POLLIN status
1479 	 */
1480 	wsi = pt->ws.rx_draining_ext_list;
1481 	while (wsi && wsi->position_in_fds_table != LWS_NO_FDS_POS) {
1482 		pt->fds[wsi->position_in_fds_table].revents =
1483 			(short)((short)pt->fds[wsi->position_in_fds_table].revents |
1484 			(short)(pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN));
1485 		if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN)
1486 			forced = 1;
1487 
1488 		wsi = wsi->ws->rx_draining_ext_list;
1489 	}
1490 
1491 	return forced;
1492 #else
1493 	return 0;
1494 #endif
1495 }
1496 
1497 static int
rops_close_via_role_protocol_ws(struct lws * wsi,enum lws_close_status reason)1498 rops_close_via_role_protocol_ws(struct lws *wsi, enum lws_close_status reason)
1499 {
1500 	if (!wsi->ws)
1501 		return 0;
1502 
1503 	if (!wsi->ws->close_in_ping_buffer_len && /* already a reason */
1504 	     (reason == LWS_CLOSE_STATUS_NOSTATUS ||
1505 	      reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY))
1506 		return 0;
1507 
1508 	lwsl_debug("%s: sending close indication...\n", __func__);
1509 
1510 	/* if no prepared close reason, use 1000 and no aux data */
1511 
1512 	if (!wsi->ws->close_in_ping_buffer_len) {
1513 		wsi->ws->close_in_ping_buffer_len = 2;
1514 		wsi->ws->ping_payload_buf[LWS_PRE] = (reason >> 8) & 0xff;
1515 		wsi->ws->ping_payload_buf[LWS_PRE + 1] = reason & 0xff;
1516 	}
1517 
1518 	wsi->waiting_to_send_close_frame = 1;
1519 	wsi->close_needs_ack = 1;
1520 	lwsi_set_state(wsi, LRS_WAITING_TO_SEND_CLOSE);
1521 	__lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_SEND, 5);
1522 
1523 	lws_callback_on_writable(wsi);
1524 
1525 	return 1;
1526 }
1527 
1528 static int
rops_close_role_ws(struct lws_context_per_thread * pt,struct lws * wsi)1529 rops_close_role_ws(struct lws_context_per_thread *pt, struct lws *wsi)
1530 {
1531 	if (!wsi->ws)
1532 		return 0;
1533 
1534 #if !defined(LWS_WITHOUT_EXTENSIONS)
1535 
1536 	if (wsi->ws->rx_draining_ext) {
1537 		struct lws **w = &pt->ws.rx_draining_ext_list;
1538 
1539 		wsi->ws->rx_draining_ext = 0;
1540 		/* remove us from context draining ext list */
1541 		while (*w) {
1542 			if (*w == wsi) {
1543 				*w = wsi->ws->rx_draining_ext_list;
1544 				break;
1545 			}
1546 			w = &((*w)->ws->rx_draining_ext_list);
1547 		}
1548 		wsi->ws->rx_draining_ext_list = NULL;
1549 	}
1550 
1551 	if (wsi->ws->tx_draining_ext) {
1552 		struct lws **w = &pt->ws.tx_draining_ext_list;
1553 		lwsl_ext("%s: CLEARING tx_draining_ext\n", __func__);
1554 		wsi->ws->tx_draining_ext = 0;
1555 		/* remove us from context draining ext list */
1556 		while (*w) {
1557 			if (*w == wsi) {
1558 				*w = wsi->ws->tx_draining_ext_list;
1559 				break;
1560 			}
1561 			w = &((*w)->ws->tx_draining_ext_list);
1562 		}
1563 		wsi->ws->tx_draining_ext_list = NULL;
1564 	}
1565 #endif
1566 	lws_free_set_NULL(wsi->ws->rx_ubuf);
1567 
1568 	wsi->ws->pong_payload_len = 0;
1569 	wsi->ws->pong_pending_flag = 0;
1570 
1571 	/* deallocate any active extension contexts */
1572 
1573 	if (lws_ext_cb_active(wsi, LWS_EXT_CB_DESTROY, NULL, 0) < 0)
1574 		lwsl_warn("extension destruction failed\n");
1575 
1576 	return 0;
1577 }
1578 
1579 static int
rops_write_role_protocol_ws(struct lws * wsi,unsigned char * buf,size_t len,enum lws_write_protocol * wp)1580 rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
1581 			    enum lws_write_protocol *wp)
1582 {
1583 #if !defined(LWS_WITHOUT_EXTENSIONS)
1584 	struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
1585 	enum lws_write_protocol wpt;
1586 #endif
1587 	struct lws_ext_pm_deflate_rx_ebufs pmdrx;
1588 	int masked7 = lwsi_role_client(wsi);
1589 	unsigned char is_masked_bit = 0;
1590 	unsigned char *dropmask = NULL;
1591 	size_t orig_len = len;
1592 	int pre = 0, n = 0;
1593 
1594 	// lwsl_err("%s: wp 0x%x len %d\n", __func__, *wp, (int)len);
1595 #if !defined(LWS_WITHOUT_EXTENSIONS)
1596 	if (wsi->ws->tx_draining_ext) {
1597 		/* remove us from the list */
1598 		struct lws **w = &pt->ws.tx_draining_ext_list;
1599 
1600 		lwsl_ext("%s: CLEARING tx_draining_ext\n", __func__);
1601 		wsi->ws->tx_draining_ext = 0;
1602 		/* remove us from context draining ext list */
1603 		while (*w) {
1604 			if (*w == wsi) {
1605 				*w = wsi->ws->tx_draining_ext_list;
1606 				break;
1607 			}
1608 			w = &((*w)->ws->tx_draining_ext_list);
1609 		}
1610 		wsi->ws->tx_draining_ext_list = NULL;
1611 
1612 		wpt = *wp;
1613 		*wp = (wsi->ws->tx_draining_stashed_wp & 0xc0) |
1614 				LWS_WRITE_CONTINUATION;
1615 
1616 		/*
1617 		 * When we are just flushing (len == 0), we can trust the
1618 		 * stashed wp info completely.  Otherwise adjust it to the
1619 		 * FIN status of the incoming packet.
1620 		 */
1621 
1622 		if (!(wpt & LWS_WRITE_NO_FIN) && len)
1623 			*wp &= (enum lws_write_protocol)~LWS_WRITE_NO_FIN;
1624 
1625 		lwsl_ext("FORCED draining wp to 0x%02X "
1626 			 "(stashed 0x%02X, incoming 0x%02X)\n", *wp,
1627 			 wsi->ws->tx_draining_stashed_wp, wpt);
1628 		// assert(0);
1629 	}
1630 #endif
1631 
1632 	if (((*wp) & 0x1f) == LWS_WRITE_HTTP ||
1633 	    ((*wp) & 0x1f) == LWS_WRITE_HTTP_FINAL ||
1634 	    ((*wp) & 0x1f) == LWS_WRITE_HTTP_HEADERS_CONTINUATION ||
1635 	    ((*wp) & 0x1f) == LWS_WRITE_HTTP_HEADERS)
1636 		goto send_raw;
1637 
1638 
1639 
1640 	/* if we are continuing a frame that already had its header done */
1641 
1642 	if (wsi->ws->inside_frame) {
1643 		lwsl_debug("INSIDE FRAME\n");
1644 		goto do_more_inside_frame;
1645 	}
1646 
1647 	wsi->ws->clean_buffer = 1;
1648 
1649 	/*
1650 	 * give a chance to the extensions to modify payload
1651 	 * the extension may decide to produce unlimited payload erratically
1652 	 * (eg, compression extension), so we require only that if he produces
1653 	 * something, it will be a complete fragment of the length known at
1654 	 * the time (just the fragment length known), and if he has
1655 	 * more we will come back next time he is writeable and allow him to
1656 	 * produce more fragments until he's drained.
1657 	 *
1658 	 * This allows what is sent each time it is writeable to be limited to
1659 	 * a size that can be sent without partial sends or blocking, allows
1660 	 * interleaving of control frames and other connection service.
1661 	 */
1662 
1663 	pmdrx.eb_in.token = buf;
1664 	pmdrx.eb_in.len = (int)len;
1665 
1666 	/* for the non-pm-deflate case */
1667 
1668 	pmdrx.eb_out = pmdrx.eb_in;
1669 
1670 	switch ((int)*wp) {
1671 	case LWS_WRITE_PING:
1672 	case LWS_WRITE_PONG:
1673 	case LWS_WRITE_CLOSE:
1674 		break;
1675 	default:
1676 #if !defined(LWS_WITHOUT_EXTENSIONS)
1677 		n = lws_ext_cb_active(wsi, (int)LWS_EXT_CB_PAYLOAD_TX, &pmdrx, (int)*wp);
1678 		if (n < 0)
1679 			return -1;
1680 		lwsl_ext("%s: defl ext ret %d, ext in remaining %d, "
1681 			    "out %d compressed (wp 0x%x)\n", __func__, n,
1682 			    (int)pmdrx.eb_in.len, (int)pmdrx.eb_out.len, *wp);
1683 
1684 		if (n == PMDR_HAS_PENDING) {
1685 			lwsl_ext("%s: HAS PENDING: write drain len %d "
1686 				    "(wp 0x%x) SETTING tx_draining_ext "
1687 				    "(remaining in %d)\n", __func__,
1688 				    (int)pmdrx.eb_out.len, *wp,
1689 				    (int)pmdrx.eb_in.len);
1690 			/* extension requires further draining */
1691 			wsi->ws->tx_draining_ext = 1;
1692 			wsi->ws->tx_draining_ext_list =
1693 					pt->ws.tx_draining_ext_list;
1694 			pt->ws.tx_draining_ext_list = wsi;
1695 			/* we must come back to do more */
1696 			lws_callback_on_writable(wsi);
1697 			/*
1698 			 * keep a copy of the write type for the overall
1699 			 * action that has provoked generation of these
1700 			 * fragments, so the last guy can use its FIN state.
1701 			 */
1702 			wsi->ws->tx_draining_stashed_wp = (uint8_t)*wp;
1703 			/*
1704 			 * Despite what we may have thought, this is definitely
1705 			 * NOT the last fragment, because the extension asserted
1706 			 * he has more coming.  For example, the extension may
1707 			 * be compressing, and has saved up everything until the
1708 			 * end, where the output is larger than one chunk.
1709 			 *
1710 			 * Make sure this intermediate one doesn't actually
1711 			 * go out with a FIN.
1712 			 */
1713 			*wp |= LWS_WRITE_NO_FIN;
1714 		}
1715 #endif
1716 		if (pmdrx.eb_out.len && wsi->ws->stashed_write_pending) {
1717 			wsi->ws->stashed_write_pending = 0;
1718 			*wp = (unsigned int)(((*wp) & 0xc0) | (unsigned int)wsi->ws->stashed_write_type);
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * an extension did something we need to keep... for example, if
1724 	 * compression extension, it has already updated its state according
1725 	 * to this being issued
1726 	 */
1727 	if (buf != pmdrx.eb_out.token) {
1728 		/*
1729 		 * ext might eat it, but not have anything to issue yet.
1730 		 * In that case we have to follow his lead, but stash and
1731 		 * replace the write type that was lost here the first time.
1732 		 */
1733 		if (len && !pmdrx.eb_out.len) {
1734 			if (!wsi->ws->stashed_write_pending)
1735 				wsi->ws->stashed_write_type =
1736 						(char)(*wp) & 0x3f;
1737 			wsi->ws->stashed_write_pending = 1;
1738 			return (int)len;
1739 		}
1740 		/*
1741 		 * extension recreated it:
1742 		 * need to buffer this if not all sent
1743 		 */
1744 		wsi->ws->clean_buffer = 0;
1745 	}
1746 
1747 	buf = pmdrx.eb_out.token;
1748 	len = (unsigned int)pmdrx.eb_out.len;
1749 
1750 	if (!buf) {
1751 		lwsl_err("null buf (%d)\n", (int)len);
1752 		return -1;
1753 	}
1754 
1755 	switch (wsi->ws->ietf_spec_revision) {
1756 	case 13:
1757 		if (masked7) {
1758 			pre += 4;
1759 			dropmask = &buf[0 - pre];
1760 			is_masked_bit = 0x80;
1761 		}
1762 
1763 		switch ((*wp) & 0xf) {
1764 		case LWS_WRITE_TEXT:
1765 			n = LWSWSOPC_TEXT_FRAME;
1766 			break;
1767 		case LWS_WRITE_BINARY:
1768 			n = LWSWSOPC_BINARY_FRAME;
1769 			break;
1770 		case LWS_WRITE_CONTINUATION:
1771 			n = LWSWSOPC_CONTINUATION;
1772 			break;
1773 
1774 		case LWS_WRITE_CLOSE:
1775 			n = LWSWSOPC_CLOSE;
1776 			break;
1777 		case LWS_WRITE_PING:
1778 			n = LWSWSOPC_PING;
1779 			break;
1780 		case LWS_WRITE_PONG:
1781 			n = LWSWSOPC_PONG;
1782 			break;
1783 		default:
1784 			lwsl_warn("lws_write: unknown write opc / wp\n");
1785 			return -1;
1786 		}
1787 
1788 		if (!((*wp) & LWS_WRITE_NO_FIN))
1789 			n |= 1 << 7;
1790 
1791 		if (len < 126) {
1792 			pre += 2;
1793 			buf[-pre] = (uint8_t)n;
1794 			buf[-pre + 1] = (unsigned char)(len | is_masked_bit);
1795 		} else {
1796 			if (len < 65536) {
1797 				pre += 4;
1798 				buf[-pre] = (uint8_t)n;
1799 				buf[-pre + 1] = (uint8_t)(126 | is_masked_bit);
1800 				buf[-pre + 2] = (unsigned char)(len >> 8);
1801 				buf[-pre + 3] = (unsigned char)len;
1802 			} else {
1803 				pre += 10;
1804 				buf[-pre] = (uint8_t)n;
1805 				buf[-pre + 1] = (uint8_t)(127 | is_masked_bit);
1806 #if defined __LP64__
1807 					buf[-pre + 2] = (len >> 56) & 0x7f;
1808 					buf[-pre + 3] = (uint8_t)(len >> 48);
1809 					buf[-pre + 4] = (uint8_t)(len >> 40);
1810 					buf[-pre + 5] = (uint8_t)(len >> 32);
1811 #else
1812 					buf[-pre + 2] = 0;
1813 					buf[-pre + 3] = 0;
1814 					buf[-pre + 4] = 0;
1815 					buf[-pre + 5] = 0;
1816 #endif
1817 				buf[-pre + 6] = (unsigned char)(len >> 24);
1818 				buf[-pre + 7] = (unsigned char)(len >> 16);
1819 				buf[-pre + 8] = (unsigned char)(len >> 8);
1820 				buf[-pre + 9] = (unsigned char)len;
1821 			}
1822 		}
1823 		break;
1824 	}
1825 
1826 do_more_inside_frame:
1827 
1828 	/*
1829 	 * Deal with masking if we are in client -> server direction and
1830 	 * the wp demands it
1831 	 */
1832 
1833 	if (masked7) {
1834 		if (!wsi->ws->inside_frame)
1835 			if (lws_0405_frame_mask_generate(wsi)) {
1836 				lwsl_err("frame mask generation failed\n");
1837 				return -1;
1838 			}
1839 
1840 		/*
1841 		 * in v7, just mask the payload
1842 		 */
1843 		if (dropmask) { /* never set if already inside frame */
1844 			for (n = 4; n < (int)len + 4; n++)
1845 				dropmask[n] = dropmask[n] ^ wsi->ws->mask[
1846 					(wsi->ws->mask_idx++) & 3];
1847 
1848 			/* copy the frame nonce into place */
1849 			memcpy(dropmask, wsi->ws->mask, 4);
1850 		}
1851 	}
1852 
1853 	if (lwsi_role_h2_ENCAPSULATION(wsi)) {
1854 		struct lws *encap = lws_get_network_wsi(wsi);
1855 
1856 		assert(encap != wsi);
1857 
1858 		return lws_rops_func_fidx(encap->role_ops,
1859 				   LWS_ROPS_write_role_protocol).
1860 					write_role_protocol(wsi, buf - pre,
1861 							    len + (unsigned int)pre, wp);
1862 	}
1863 
1864 	switch ((*wp) & 0x1f) {
1865 	case LWS_WRITE_TEXT:
1866 	case LWS_WRITE_BINARY:
1867 	case LWS_WRITE_CONTINUATION:
1868 		if (!wsi->h2_stream_carries_ws) {
1869 
1870 			/*
1871 			 * give any active extensions a chance to munge the
1872 			 * buffer before send.  We pass in a pointer to an
1873 			 * lws_tokens struct prepared with the default buffer
1874 			 * and content length that's in there.  Rather than
1875 			 * rewrite the default buffer, extensions that expect
1876 			 * to grow the buffer can adapt .token to point to their
1877 			 * own per-connection buffer in the extension user
1878 			 * allocation.  By default with no extensions or no
1879 			 * extension callback handling, just the normal input
1880 			 * buffer is used then so it is efficient.
1881 			 *
1882 			 * callback returns 1 in case it wants to spill more
1883 			 * buffers
1884 			 *
1885 			 * This takes care of holding the buffer if send is
1886 			 * incomplete, ie, if wsi->ws->clean_buffer is 0
1887 			 * (meaning an extension meddled with the buffer).  If
1888 			 * wsi->ws->clean_buffer is 1, it will instead return
1889 			 * to the user code how much OF THE USER BUFFER was
1890 			 * consumed.
1891 			 */
1892 
1893 			n = lws_issue_raw_ext_access(wsi, buf - pre, len + (unsigned int)pre);
1894 			wsi->ws->inside_frame = 1;
1895 			if (n <= 0)
1896 				return n;
1897 
1898 			if (n == (int)len + pre) {
1899 				/* everything in the buffer was handled
1900 				 * (or rebuffered...) */
1901 				wsi->ws->inside_frame = 0;
1902 				return (int)orig_len;
1903 			}
1904 
1905 			/*
1906 			 * it is how many bytes of user buffer got sent... may
1907 			 * be < orig_len in which case callback when writable
1908 			 * has already been arranged and user code can call
1909 			 * lws_write() again with the rest later.
1910 			 */
1911 
1912 			return n - pre;
1913 		}
1914 		break;
1915 	default:
1916 		break;
1917 	}
1918 
1919 send_raw:
1920 	return lws_issue_raw(wsi, (unsigned char *)buf - pre, len + (unsigned int)pre);
1921 }
1922 
1923 static int
rops_close_kill_connection_ws(struct lws * wsi,enum lws_close_status reason)1924 rops_close_kill_connection_ws(struct lws *wsi, enum lws_close_status reason)
1925 {
1926 	/* deal with ws encapsulation in h2 */
1927 #if defined(LWS_WITH_HTTP2)
1928 	if (wsi->mux_substream && wsi->h2_stream_carries_ws)
1929 		return lws_rops_func_fidx(&role_ops_h2,
1930 				   LWS_ROPS_close_kill_connection).
1931 				close_kill_connection(wsi, reason);
1932 
1933 	return 0;
1934 #else
1935 	return 0;
1936 #endif
1937 }
1938 
1939 static int
rops_callback_on_writable_ws(struct lws * wsi)1940 rops_callback_on_writable_ws(struct lws *wsi)
1941 {
1942 #if defined(LWS_WITH_HTTP2)
1943 	if (lwsi_role_h2_ENCAPSULATION(wsi)) {
1944 		/* we know then that it has an h2 parent */
1945 		struct lws *enc = lws_rops_func_fidx(&role_ops_h2,
1946 						     LWS_ROPS_encapsulation_parent).
1947 						     encapsulation_parent(wsi);
1948 
1949 		assert(enc);
1950 		if (lws_rops_func_fidx(enc->role_ops,
1951 				       LWS_ROPS_callback_on_writable).
1952 						callback_on_writable(wsi))
1953 			return 1;
1954 	}
1955 #endif
1956 	return 0;
1957 }
1958 
1959 static int
rops_init_vhost_ws(struct lws_vhost * vh,const struct lws_context_creation_info * info)1960 rops_init_vhost_ws(struct lws_vhost *vh,
1961 		   const struct lws_context_creation_info *info)
1962 {
1963 #if !defined(LWS_WITHOUT_EXTENSIONS)
1964 #ifdef LWS_WITH_PLUGINS
1965 	struct lws_plugin *plugin;
1966 	int m;
1967 
1968 	if (vh->context->plugin_extension_count) {
1969 
1970 		m = 0;
1971 		while (info->extensions && info->extensions[m].callback)
1972 			m++;
1973 
1974 		/*
1975 		 * give the vhost a unified list of extensions including the
1976 		 * ones that came from plugins
1977 		 */
1978 		vh->ws.extensions = lws_zalloc(sizeof(struct lws_extension) *
1979 				     (unsigned int)(m + vh->context->plugin_extension_count + 1),
1980 				     "extensions");
1981 		if (!vh->ws.extensions)
1982 			return 1;
1983 
1984 		memcpy((struct lws_extension *)vh->ws.extensions, info->extensions,
1985 		       sizeof(struct lws_extension) * (unsigned int)m);
1986 		plugin = vh->context->plugin_list;
1987 		while (plugin) {
1988 			const lws_plugin_protocol_t *plpr =
1989 				(const lws_plugin_protocol_t *)plugin->hdr;
1990 
1991 			memcpy((struct lws_extension *)&vh->ws.extensions[m],
1992 				plpr->extensions,
1993 			       sizeof(struct lws_extension) *
1994 			       (unsigned int)plpr->count_extensions);
1995 			m += plpr->count_extensions;
1996 			plugin = plugin->list;
1997 		}
1998 	} else
1999 #endif
2000 		vh->ws.extensions = info->extensions;
2001 #endif
2002 
2003 	return 0;
2004 }
2005 
2006 static int
rops_destroy_vhost_ws(struct lws_vhost * vh)2007 rops_destroy_vhost_ws(struct lws_vhost *vh)
2008 {
2009 #ifdef LWS_WITH_PLUGINS
2010 #if !defined(LWS_WITHOUT_EXTENSIONS)
2011 	if (vh->context->plugin_extension_count)
2012 		lws_free((void *)vh->ws.extensions);
2013 #endif
2014 #endif
2015 
2016 	return 0;
2017 }
2018 
2019 #if defined(LWS_WITH_HTTP_PROXY)
2020 static int
ws_destroy_proxy_buf(struct lws_dll2 * d,void * user)2021 ws_destroy_proxy_buf(struct lws_dll2 *d, void *user)
2022 {
2023 	lws_free(d);
2024 
2025 	return 0;
2026 }
2027 #endif
2028 
2029 static int
rops_destroy_role_ws(struct lws * wsi)2030 rops_destroy_role_ws(struct lws *wsi)
2031 {
2032 #if defined(LWS_WITH_HTTP_PROXY)
2033 	lws_dll2_foreach_safe(&wsi->ws->proxy_owner, NULL, ws_destroy_proxy_buf);
2034 #endif
2035 
2036 	lws_free_set_NULL(wsi->ws);
2037 
2038 	return 0;
2039 }
2040 
2041 static int
rops_issue_keepalive_ws(struct lws * wsi,int isvalid)2042 rops_issue_keepalive_ws(struct lws *wsi, int isvalid)
2043 {
2044 	uint64_t us;
2045 
2046 #if defined(LWS_WITH_HTTP2)
2047 	if (lwsi_role_h2_ENCAPSULATION(wsi)) {
2048 		/* we know then that it has an h2 parent */
2049 		struct lws *enc = lws_rops_func_fidx(&role_ops_h2,
2050 						     LWS_ROPS_encapsulation_parent).
2051 						     encapsulation_parent(wsi);
2052 
2053 		assert(enc);
2054 		if (lws_rops_func_fidx(enc->role_ops, LWS_ROPS_issue_keepalive).
2055 						  issue_keepalive(enc, isvalid))
2056 			return 1;
2057 	}
2058 #endif
2059 
2060 	if (isvalid)
2061 		_lws_validity_confirmed_role(wsi);
2062 	else {
2063 		us = (uint64_t)lws_now_usecs();
2064 		memcpy(&wsi->ws->ping_payload_buf[LWS_PRE], &us, 8);
2065 		wsi->ws->send_check_ping = 1;
2066 		lws_callback_on_writable(wsi);
2067 	}
2068 
2069 	return 0;
2070 }
2071 
2072 static const lws_rops_t rops_table_ws[] = {
2073 	/*  1 */ { .init_vhost		    = rops_init_vhost_ws },
2074 	/*  2 */ { .destroy_vhost	    = rops_destroy_vhost_ws },
2075 	/*  3 */ { .service_flag_pending    = rops_service_flag_pending_ws },
2076 	/*  4 */ { .handle_POLLIN	    = rops_handle_POLLIN_ws },
2077 	/*  5 */ { .handle_POLLOUT	    = rops_handle_POLLOUT_ws },
2078 	/*  6 */ { .callback_on_writable    = rops_callback_on_writable_ws },
2079 	/*  7 */ { .write_role_protocol	    = rops_write_role_protocol_ws },
2080 	/*  8 */ { .close_via_role_protocol = rops_close_via_role_protocol_ws },
2081 	/*  9 */ { .close_role		    = rops_close_role_ws },
2082 	/* 10 */ { .close_kill_connection   = rops_close_kill_connection_ws },
2083 	/* 11 */ { .destroy_role	    = rops_destroy_role_ws },
2084 	/* 12 */ { .issue_keepalive	    = rops_issue_keepalive_ws },
2085 };
2086 
2087 const struct lws_role_ops role_ops_ws = {
2088 	/* role name */			"ws",
2089 	/* alpn id */			NULL,
2090 
2091 	/* rops_table */		rops_table_ws,
2092 	/* rops_idx */			{
2093 	  /* LWS_ROPS_check_upgrades */
2094 	  /* LWS_ROPS_pt_init_destroy */		0x00,
2095 	  /* LWS_ROPS_init_vhost */
2096 	  /* LWS_ROPS_destroy_vhost */			0x12,
2097 	  /* LWS_ROPS_service_flag_pending */
2098 	  /* LWS_ROPS_handle_POLLIN */			0x34,
2099 	  /* LWS_ROPS_handle_POLLOUT */
2100 	  /* LWS_ROPS_perform_user_POLLOUT */		0x50,
2101 	  /* LWS_ROPS_callback_on_writable */
2102 	  /* LWS_ROPS_tx_credit */			0x60,
2103 	  /* LWS_ROPS_write_role_protocol */
2104 	  /* LWS_ROPS_encapsulation_parent */		0x70,
2105 	  /* LWS_ROPS_alpn_negotiated */
2106 	  /* LWS_ROPS_close_via_role_protocol */	0x08,
2107 	  /* LWS_ROPS_close_role */
2108 	  /* LWS_ROPS_close_kill_connection */		0x9a,
2109 	  /* LWS_ROPS_destroy_role */
2110 	  /* LWS_ROPS_adoption_bind */			0xb0,
2111 	  /* LWS_ROPS_client_bind */
2112 	  /* LWS_ROPS_issue_keepalive */		0x0c,
2113 					},
2114 
2115 	/* adoption_cb clnt, srv */	{ LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED,
2116 					  LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED },
2117 	/* rx_cb clnt, srv */		{ LWS_CALLBACK_CLIENT_RECEIVE,
2118 					  LWS_CALLBACK_RECEIVE },
2119 	/* writeable cb clnt, srv */	{ LWS_CALLBACK_CLIENT_WRITEABLE,
2120 					  LWS_CALLBACK_SERVER_WRITEABLE },
2121 	/* close cb clnt, srv */	{ LWS_CALLBACK_CLIENT_CLOSED,
2122 					  LWS_CALLBACK_CLOSED },
2123 	/* protocol_bind cb c, srv */	{ LWS_CALLBACK_WS_CLIENT_BIND_PROTOCOL,
2124 					  LWS_CALLBACK_WS_SERVER_BIND_PROTOCOL },
2125 	/* protocol_unbind cb c, srv */	{ LWS_CALLBACK_WS_CLIENT_DROP_PROTOCOL,
2126 					  LWS_CALLBACK_WS_SERVER_DROP_PROTOCOL },
2127 	/* file handles */		0
2128 };
2129