• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  *
25  * When the user code is in a different process, a non-tls unix domain socket
26  * proxy is used to asynchronusly transfer buffers in each direction via the
27  * network stack, without explicit IPC
28  *
29  *     user_process{ [user code] | shim | socket-}------ lws_process{ lws }
30  *
31  * Lws exposes a listening unix domain socket in this case, the user processes
32  * connect to it and pass just info.streamtype in an initial tx packet.  All
33  * packets are prepended by a 1-byte type field when used in this mode.  See
34  * lws-secure-streams.h for documentation and definitions.
35  *
36  * Proxying in either direction can face the situation it cannot send the onward
37  * packet immediately and is subject to separating the write request from the
38  * write action.  To make the best use of memory, a single preallocated buffer
39  * stashes pending packets in all four directions (c->p, p->c, p->ss, ss->p).
40  * This allows it to adapt to different traffic patterns without wasted areas
41  * dedicated to traffic that isn't coming in a particular application.
42  *
43  * A shim is provided to monitor the process' unix domain socket and regenerate
44  * the secure sockets api there with callbacks happening in the process thread
45  * context.
46  *
47  * This file implements the listening unix domain socket proxy... this code is
48  * only going to run on a Linux-class device with its implications about memory
49  * availability.
50  */
51 
52 #include <private-lib-core.h>
53 
54 struct raw_pss {
55 	struct conn		*conn;
56 };
57 
58 /*
59  * Proxy - onward secure-stream handler
60  */
61 
62 typedef struct ss_proxy_onward {
63 	lws_ss_handle_t 	*ss;
64 	struct conn		*conn;
65 } ss_proxy_t;
66 
67 void
lws_proxy_clean_conn_ss(struct lws * wsi)68 lws_proxy_clean_conn_ss(struct lws *wsi)
69 {
70 #if 0
71 	lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
72 	struct conn *conn = h->conn_if_sspc_onw;
73 
74 	if (!wsi)
75 		return;
76 
77 	if (conn && conn->ss)
78 		conn->ss->wsi = NULL;
79 #endif
80 }
81 
82 
83 void
ss_proxy_onward_link_req_writeable(lws_ss_handle_t * h_onward)84 ss_proxy_onward_link_req_writeable(lws_ss_handle_t *h_onward)
85 {
86 	ss_proxy_t *m = (ss_proxy_t *)&h_onward[1];
87 
88 	if (m->conn->wsi) /* if possible, request client conn write */
89 		lws_callback_on_writable(m->conn->wsi);
90 }
91 
92 int
__lws_ss_proxy_bind_ss_to_conn_wsi(void * parconn,size_t dsh_size)93 __lws_ss_proxy_bind_ss_to_conn_wsi(void *parconn, size_t dsh_size)
94 {
95 	struct conn *conn = (struct conn *)parconn;
96 	struct lws_context_per_thread *pt;
97 
98 	if (!conn || !conn->wsi || !conn->ss)
99 		return -1;
100 
101 	pt = &conn->wsi->a.context->pt[(int)conn->wsi->tsi];
102 
103 	if (lws_fi(&conn->ss->fic, "ssproxy_dsh_create_oom"))
104 		return -1;
105 	conn->dsh = lws_dsh_create(&pt->ss_dsh_owner, dsh_size, 2);
106 	if (!conn->dsh)
107 		return -1;
108 
109 	__lws_lc_tag_append(&conn->wsi->lc, lws_ss_tag(conn->ss));
110 
111 	return 0;
112 }
113 
114 /* Onward secure streams payload interface */
115 
116 static lws_ss_state_return_t
ss_proxy_onward_rx(void * userobj,const uint8_t * buf,size_t len,int flags)117 ss_proxy_onward_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
118 {
119 	ss_proxy_t *m = (ss_proxy_t *)userobj;
120 	const char *rsp = NULL;
121 	int n;
122 
123 	// lwsl_notice("%s: len %d\n", __func__, (int)len);
124 
125 	/*
126 	 * The onward secure stream connection has received something.
127 	 */
128 
129 	if (m->ss->rideshare != m->ss->policy && m->ss->rideshare) {
130 		rsp = m->ss->rideshare->streamtype;
131 		flags |= LWSSS_FLAG_RIDESHARE;
132 	}
133 
134 	/*
135 	 * Apply SSS framing around this chunk of RX and stash it in the dsh
136 	 * in ss -> proxy [ -> client] direction.  This can fail...
137 	 */
138 
139 	if (lws_fi(&m->ss->fic, "ssproxy_dsh_rx_queue_oom"))
140 		n = 1;
141 	else
142 		n = lws_ss_serialize_rx_payload(m->conn->dsh, buf, len,
143 						flags, rsp);
144 	if (n)
145 		/*
146 		 * We couldn't buffer this rx, eg due to OOM, let's escalate it
147 		 * to be a "loss of connection", which it basically is...
148 		 */
149 		return LWSSSSRET_DISCONNECT_ME;
150 
151 	/*
152 	 * Manage rx flow on the SS (onward) side according to our situation
153 	 * in the dsh holding proxy->client serialized forwarding rx
154 	 */
155 
156 	if (!m->conn->onward_in_flow_control && m->ss->wsi &&
157 	    m->ss->policy->proxy_buflen_rxflow_on_above &&
158 	    lws_dsh_get_size(m->conn->dsh, KIND_SS_TO_P) >=
159 				m->ss->policy->proxy_buflen_rxflow_on_above) {
160 		lwsl_info("%s: %s: rxflow disabling rx (%lu / %lu, hwm %lu)\n", __func__,
161 				lws_wsi_tag(m->ss->wsi),
162 				(unsigned long)lws_dsh_get_size(m->conn->dsh, KIND_SS_TO_P),
163 				(unsigned long)m->ss->policy->proxy_buflen,
164 				(unsigned long)m->ss->policy->proxy_buflen_rxflow_on_above);
165 		/*
166 		 * stop taking in rx once the onward wsi rx is above the
167 		 * high water mark
168 		 */
169 		lws_rx_flow_control(m->ss->wsi, 0);
170 		m->conn->onward_in_flow_control = 1;
171 	}
172 
173 	if (m->conn->wsi) /* if possible, request client conn write */
174 		lws_callback_on_writable(m->conn->wsi);
175 
176 	return LWSSSSRET_OK;
177 }
178 
179 /*
180  * we are transmitting buffered payload originally from the client on to the ss
181  */
182 
183 static lws_ss_state_return_t
ss_proxy_onward_tx(void * userobj,lws_ss_tx_ordinal_t ord,uint8_t * buf,size_t * len,int * flags)184 ss_proxy_onward_tx(void *userobj, lws_ss_tx_ordinal_t ord, uint8_t *buf,
185 		   size_t *len, int *flags)
186 {
187 	ss_proxy_t *m = (ss_proxy_t *)userobj;
188 	void *p;
189 	size_t si;
190 
191 	if (!m->conn->ss || m->conn->state != LPCSPROX_OPERATIONAL) {
192 		lwsl_notice("%s: ss not ready\n", __func__);
193 		*len = 0;
194 
195 		return LWSSSSRET_TX_DONT_SEND;
196 	}
197 
198 	/*
199 	 * The onward secure stream says that we could send something to it
200 	 * (by putting it in buf, and setting *len and *flags)... dredge the
201 	 * next thing out of the dsh
202 	 */
203 
204 	if (lws_ss_deserialize_tx_payload(m->conn->dsh, m->ss->wsi,
205 					  ord, buf, len, flags))
206 		return LWSSSSRET_TX_DONT_SEND;
207 
208 	/* ... there's more we want to send? */
209 	if (!lws_dsh_get_head(m->conn->dsh, KIND_C_TO_P, (void **)&p, &si))
210 		_lws_ss_request_tx(m->conn->ss);
211 
212 	if (!*len && !*flags)
213 		/* we don't actually want to send anything */
214 		return LWSSSSRET_TX_DONT_SEND;
215 
216 	lwsl_info("%s: onward tx %d fl 0x%x\n", __func__, (int)*len, *flags);
217 
218 #if 0
219 	{
220 		int ff = open("/tmp/z", O_RDWR | O_CREAT | O_APPEND, 0666);
221 		if (ff == -1)
222 			lwsl_err("%s: errno %d\n", __func__, errno);
223 		write(ff, buf, *len);
224 		close(ff);
225 	}
226 #endif
227 
228 	return LWSSSSRET_OK;
229 }
230 
231 static lws_ss_state_return_t
ss_proxy_onward_state(void * userobj,void * sh,lws_ss_constate_t state,lws_ss_tx_ordinal_t ack)232 ss_proxy_onward_state(void *userobj, void *sh,
233 		      lws_ss_constate_t state, lws_ss_tx_ordinal_t ack)
234 {
235 	ss_proxy_t *m = (ss_proxy_t *)userobj;
236 	size_t dsh_size;
237 
238 	switch (state) {
239 	case LWSSSCS_CREATING:
240 
241 		/*
242 		 * conn is private to -process.c, call thru to a) adjust
243 		 * the accepted incoming proxy link wsi tag name to be
244 		 * appended with the onward ss tag information now we
245 		 * have it, and b) allocate the dsh buffer now we
246 		 * can find out the policy about it for the streamtype.
247 		 */
248 
249 		dsh_size = m->ss->policy->proxy_buflen ?
250 				m->ss->policy->proxy_buflen : 32768;
251 
252 		lwsl_notice("%s: %s: initializing dsh max len %lu\n",
253 				__func__, lws_ss_tag(m->ss),
254 				(unsigned long)dsh_size);
255 
256 		/* this includes ssproxy_dsh_create_oom fault generation */
257 
258 		if (__lws_ss_proxy_bind_ss_to_conn_wsi(m->conn, dsh_size)) {
259 
260 			/* failed to allocate the dsh */
261 
262 			lwsl_notice("%s: dsh init failed\n", __func__);
263 
264 			return LWSSSSRET_DESTROY_ME;
265 		}
266 		break;
267 
268 	case LWSSSCS_DESTROYING:
269 		if (!m->conn)
270 			break;
271 		if (!m->conn->wsi) {
272 			/*
273 			 * Our onward secure stream is closing and our client
274 			 * connection has already gone away... destroy the conn.
275 			 */
276 			lwsl_info("%s: Destroying conn\n", __func__);
277 			lws_dsh_destroy(&m->conn->dsh);
278 			free(m->conn);
279 			m->conn = NULL;
280 			return 0;
281 		} else
282 			lwsl_info("%s: ss DESTROYING, wsi up\n", __func__);
283 		break;
284 
285 	default:
286 		break;
287 	}
288 	if (!m->conn) {
289 		lwsl_warn("%s: dropping state due to conn not up\n", __func__);
290 
291 		return LWSSSSRET_OK;
292 	}
293 
294 	if (lws_ss_serialize_state(m->conn->wsi, m->conn->dsh, state, ack))
295 		/*
296 		 * Failed to alloc state packet that we want to send in dsh,
297 		 * we will lose coherence and have to disconnect the link
298 		 */
299 		return LWSSSSRET_DISCONNECT_ME;
300 
301 	if (m->conn->wsi) /* if possible, request client conn write */
302 		lws_callback_on_writable(m->conn->wsi);
303 
304 	return LWSSSSRET_OK;
305 }
306 
307 void
ss_proxy_onward_txcr(void * userobj,int bump)308 ss_proxy_onward_txcr(void *userobj, int bump)
309 {
310 	ss_proxy_t *m = (ss_proxy_t *)userobj;
311 
312 	if (!m->conn)
313 		return;
314 
315 	lws_ss_serialize_txcr(m->conn->dsh, bump);
316 
317 	if (m->conn->wsi) /* if possible, request client conn write */
318 		lws_callback_on_writable(m->conn->wsi);
319 }
320 
321 /*
322  * Client <-> Proxy connection, usually on Unix Domain Socket
323  */
324 
325 static int
callback_ss_proxy(struct lws * wsi,enum lws_callback_reasons reason,void * user,void * in,size_t len)326 callback_ss_proxy(struct lws *wsi, enum lws_callback_reasons reason,
327 		  void *user, void *in, size_t len)
328 {
329 	struct raw_pss *pss = (struct raw_pss *)user;
330 	const lws_ss_policy_t *rsp;
331 	struct conn *conn = NULL;
332 	lws_ss_metadata_t *md;
333 	lws_ss_info_t ssi;
334 	const uint8_t *cp;
335 	char s[512];
336 	uint8_t *p;
337 	size_t si;
338 	char pay;
339 	int n;
340 
341 	if (pss)
342 		conn = pss->conn;
343 
344 	switch (reason) {
345 	case LWS_CALLBACK_PROTOCOL_INIT:
346 		break;
347 
348 	case LWS_CALLBACK_PROTOCOL_DESTROY:
349 		break;
350 
351 	/* callbacks related to raw socket descriptor "accepted side" */
352 
353         case LWS_CALLBACK_RAW_ADOPT:
354 		lwsl_info("LWS_CALLBACK_RAW_ADOPT\n");
355 		if (!pss)
356 			return -1;
357 
358 		if (lws_fi(&wsi->fic, "ssproxy_client_adopt_oom"))
359 			pss->conn = NULL;
360 		else
361 			pss->conn = malloc(sizeof(struct conn));
362 		if (!pss->conn)
363 			return -1;
364 
365 		memset(pss->conn, 0, sizeof(*pss->conn));
366 
367 		/* dsh is allocated when the onward ss is done */
368 
369 		pss->conn->wsi = wsi;
370 		wsi->bound_ss_proxy_conn = 1; /* opaque is conn */
371 
372 		pss->conn->state = LPCSPROX_WAIT_INITIAL_TX;
373 
374 		/*
375 		 * Client is expected to follow the unix domain socket
376 		 * acceptance up rapidly with an initial tx containing the
377 		 * streamtype name.  We can't create the stream until then.
378 		 */
379 		lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_CLIENT_HS_SEND, 3);
380                 break;
381 
382 	case LWS_CALLBACK_RAW_CLOSE:
383 		lwsl_info("LWS_CALLBACK_RAW_CLOSE:\n");
384 
385 		if (!conn)
386 			break;
387 
388 		/*
389 		 * the client unix domain socket connection (wsi / conn->wsi)
390 		 * has closed... eg, client has exited or otherwise has
391 		 * definitively finished with the proxying and onward connection
392 		 *
393 		 * But right now, the SS and possibly the SS onward wsi are
394 		 * still live...
395 		 */
396 
397 		assert(conn->wsi == wsi);
398 		conn->wsi = NULL;
399 
400 		lwsl_notice("%s: cli->prox link %s closing\n", __func__,
401 				lws_wsi_tag(wsi));
402 
403 		/* sever relationship with conn */
404 		lws_set_opaque_user_data(wsi, NULL);
405 
406 		/*
407 		 * The current wsi is decoupled from the pss / conn and
408 		 * the conn no longer has a pointer on it.
409 		 *
410 		 * If there's an outgoing, proxied SS conn on our behalf, we
411 		 * have to destroy those
412 		 */
413 
414 		if (conn->ss) {
415 			struct lws *cw = conn->ss->wsi;
416 			/*
417 			 * conn->ss is the onward connection SS
418 			 */
419 
420 			lwsl_info("%s: destroying %s, wsi %s\n",
421 					__func__, lws_ss_tag(conn->ss),
422 					lws_wsi_tag(conn->ss->wsi));
423 
424 			/* sever conn relationship with ss about to be deleted */
425 
426 			conn->ss->wsi = NULL;
427 
428 			if (cw && wsi != cw) {
429 
430 				/* disconnect onward SS from its wsi */
431 
432 				lws_set_opaque_user_data(cw, NULL);
433 
434 				/*
435 				 * The wsi doing the onward connection can no
436 				 * longer relate to the conn... otherwise when
437 				 * he gets callbacks he wants to bind to
438 				 * the ss we are about to delete
439 				 */
440 				lws_wsi_close(cw, LWS_TO_KILL_ASYNC);
441 			}
442 
443 			lws_ss_destroy(&conn->ss);
444 			/*
445 			 * Conn may have gone, at ss destroy handler in
446 			 * ssi.state for proxied ss
447 			 */
448 			break;
449 		}
450 
451 		if (conn->state == LPCSPROX_DESTROYED || !conn->ss) {
452 			/*
453 			 * There's no onward secure stream and our client
454 			 * connection is closing.  Destroy the conn.
455 			 */
456 			lws_dsh_destroy(&conn->dsh);
457 			free(conn);
458 			pss->conn = NULL;
459 		} else
460 			lwsl_debug("%s: CLOSE; %s\n", __func__, lws_ss_tag(conn->ss));
461 
462 		break;
463 
464 	case LWS_CALLBACK_RAW_RX:
465 		/*
466 		 * ie, the proxy is receiving something from a client
467 		 */
468 		lwsl_info("%s: RX: rx %d\n", __func__, (int)len);
469 
470 		if (!conn || !conn->wsi) {
471 			lwsl_err("%s: rx with bad conn state\n", __func__);
472 
473 			return -1;
474 		}
475 
476 		// lwsl_hexdump_info(in, len);
477 
478 		if (conn->state == LPCSPROX_WAIT_INITIAL_TX) {
479 			memset(&ssi, 0, sizeof(ssi));
480 			ssi.user_alloc = sizeof(ss_proxy_t);
481 			ssi.handle_offset = offsetof(ss_proxy_t, ss);
482 			ssi.opaque_user_data_offset =
483 					offsetof(ss_proxy_t, conn);
484 			ssi.rx = ss_proxy_onward_rx;
485 			ssi.tx = ss_proxy_onward_tx;
486 		}
487 		ssi.state = ss_proxy_onward_state;
488 		ssi.flags = 0;
489 
490 		// coverity[uninit_use_in_call]
491 		n = lws_ss_deserialize_parse(&conn->parser,
492 				lws_get_context(wsi), conn->dsh, in, len,
493 				&conn->state, conn, &conn->ss, &ssi, 0);
494 		switch (n) {
495 		case LWSSSSRET_OK:
496 			break;
497 		case LWSSSSRET_DISCONNECT_ME:
498 			return -1;
499 		case LWSSSSRET_DESTROY_ME:
500 			if (conn->ss)
501 				lws_ss_destroy(&conn->ss);
502 			return -1;
503 		}
504 
505 		if (conn->state == LPCSPROX_REPORTING_FAIL ||
506 		    conn->state == LPCSPROX_REPORTING_OK)
507 			lws_callback_on_writable(conn->wsi);
508 
509 		break;
510 
511 	case LWS_CALLBACK_RAW_WRITEABLE:
512 
513 		lwsl_debug("%s: %s: LWS_CALLBACK_RAW_WRITEABLE, state 0x%x\n",
514 				__func__, lws_wsi_tag(wsi), lwsi_state(wsi));
515 
516 		/*
517 		 * We can transmit something back to the client from the dsh
518 		 * of stuff we received on its behalf from the ss
519 		 */
520 
521 		if (!conn || !conn->wsi)
522 			break;
523 
524 		n = 0;
525 		pay = 0;
526 
527 		s[3] = 0;
528 		cp = (const uint8_t *)s;
529 		switch (conn->state) {
530 		case LPCSPROX_REPORTING_FAIL:
531 			s[3] = 1;
532 			/* fallthru */
533 		case LPCSPROX_REPORTING_OK:
534 			s[0] = LWSSS_SER_RXPRE_CREATE_RESULT;
535 			s[1] = 0;
536 			s[2] = 1;
537 
538 			n = 8;
539 
540 			lws_ser_wu32be((uint8_t *)&s[4], conn->ss &&
541 							 conn->ss->policy ?
542 					conn->ss->policy->client_buflen : 0);
543 
544 			/*
545 			 * If there's rideshare sequencing, it's added after the
546 			 * first 4 bytes or the create result, comma-separated
547 			 */
548 
549 			if (conn->ss) {
550 				rsp = conn->ss->policy;
551 
552 				while (rsp) {
553 					if (n != 4 && n < (int)sizeof(s) - 2)
554 						s[n++] = ',';
555 					n += lws_snprintf(&s[n], sizeof(s) - (unsigned int)n,
556 							"%s", rsp->streamtype);
557 					rsp = lws_ss_policy_lookup(wsi->a.context,
558 						rsp->rideshare_streamtype);
559 				}
560 			}
561 			s[2] = (char)(n - 3);
562 			conn->state = LPCSPROX_OPERATIONAL;
563 			lws_set_timeout(wsi, 0, 0);
564 			break;
565 
566 		case LPCSPROX_OPERATIONAL:
567 
568 			/*
569 			 * returning [onward -> ] proxy]-> client
570 			 * rx metadata has priority 1
571 			 */
572 
573 			md = conn->ss->metadata;
574 			while (md) {
575 				// lwsl_notice("%s: check %s: %d\n", __func__,
576 				// md->name, md->pending_onward);
577 				if (md->pending_onward) {
578 					size_t naml = strlen(md->name);
579 
580 					// lwsl_notice("%s: proxy issuing rxmd\n", __func__);
581 
582 					if (4 + naml + md->length > sizeof(s)) {
583 						lwsl_err("%s: rxmdata too big\n",
584 								__func__);
585 						goto hangup;
586 					}
587 					md->pending_onward = 0;
588 					p = (uint8_t *)s;
589 					p[0] = LWSSS_SER_RXPRE_METADATA;
590 					lws_ser_wu16be(&p[1], (uint16_t)(1 + naml +
591 							      md->length));
592 					p[3] = (uint8_t)naml;
593 					memcpy(&p[4], md->name, naml);
594 					p += 4 + naml;
595 					memcpy(p, md->value__may_own_heap,
596 					       md->length);
597 					p += md->length;
598 
599 					n = lws_ptr_diff(p, cp);
600 					goto again;
601 				}
602 
603 				md = md->next;
604 			}
605 
606 			/*
607 			 * If we have performance data, render it in JSON
608 			 * and send that in LWSSS_SER_RXPRE_PERF has
609 			 * priority 2
610 			 */
611 
612 #if defined(LWS_WITH_CONMON)
613 			if (conn->ss->conmon_json) {
614 				unsigned int xlen = conn->ss->conmon_len;
615 
616 				if (xlen > sizeof(s) - 3)
617 					xlen = sizeof(s) - 3;
618 				cp = (uint8_t *)s;
619 				p = (uint8_t *)s;
620 				p[0] = LWSSS_SER_RXPRE_PERF;
621 				lws_ser_wu16be(&p[1], (uint16_t)xlen);
622 				memcpy(&p[3], conn->ss->conmon_json, xlen);
623 
624 				lws_free_set_NULL(conn->ss->conmon_json);
625 				n = (int)(xlen + 3);
626 
627 				pay = 0;
628 				goto again;
629 			}
630 #endif
631 			/*
632 			 * if no fresh rx metadata, just pass through incoming
633 			 * dsh
634 			 */
635 
636 			if (lws_dsh_get_head(conn->dsh, KIND_SS_TO_P,
637 					     (void **)&p, &si))
638 				break;
639 
640 			cp = p;
641 
642 #if 0
643 			if (cp[0] == LWSSS_SER_RXPRE_RX_PAYLOAD &&
644 			    wsi->a.context->detailed_latency_cb) {
645 
646 				/*
647 				 * we're fulfilling rx that came in on ss
648 				 * by sending it back out to the client on
649 				 * the Unix Domain Socket
650 				 *
651 				 * +  7  u32  write will compute latency here...
652 				 * + 11  u32  ust we received from ss
653 				 *
654 				 * lws_write will report it and fill in
655 				 * LAT_DUR_PROXY_CLIENT_REQ_TO_WRITE
656 				 */
657 
658 				us = lws_now_usecs();
659 				lws_ser_wu32be(&p[7], us -
660 						      lws_ser_ru64be(&p[11]));
661 				lws_ser_wu64be(&p[11], us);
662 
663 				wsi->detlat.acc_size =
664 					wsi->detlat.req_size = si - 19;
665 				/* time proxy held it */
666 				wsi->detlat.latencies[
667 				            LAT_DUR_PROXY_RX_TO_ONWARD_TX] =
668 							lws_ser_ru32be(&p[7]);
669 			}
670 #endif
671 			pay = 1;
672 			n = (int)si;
673 			break;
674 		default:
675 			break;
676 		}
677 again:
678 		if (!n)
679 			break;
680 
681 		if (lws_fi(&wsi->fic, "ssproxy_client_write_fail"))
682 			n = -1;
683 		else
684 			n = lws_write(wsi, (uint8_t *)cp, (unsigned int)n, LWS_WRITE_RAW);
685 		if (n < 0) {
686 			lwsl_info("%s: WRITEABLE: %d\n", __func__, n);
687 
688 			goto hangup;
689 		}
690 
691 		switch (conn->state) {
692 		case LPCSPROX_REPORTING_FAIL:
693 			goto hangup;
694 		case LPCSPROX_OPERATIONAL:
695 			if (!conn)
696 				break;
697 			if (pay) {
698 				lws_dsh_free((void **)&p);
699 
700 				/*
701 				 * Did we go below the rx flow threshold for
702 				 * this dsh?
703 				 */
704 
705 				if (conn->onward_in_flow_control &&
706 				    conn->ss->policy->proxy_buflen_rxflow_on_above &&
707 				    conn->ss->wsi &&
708 				    lws_dsh_get_size(conn->dsh, KIND_SS_TO_P) <
709 				      conn->ss->policy->proxy_buflen_rxflow_off_below) {
710 					lwsl_info("%s: %s: rxflow enabling rx (%lu / %lu, lwm %lu)\n", __func__,
711 							lws_wsi_tag(conn->ss->wsi),
712 							(unsigned long)lws_dsh_get_size(conn->dsh, KIND_SS_TO_P),
713 							(unsigned long)conn->ss->policy->proxy_buflen,
714 							(unsigned long)conn->ss->policy->proxy_buflen_rxflow_off_below);
715 					/*
716 					 * Resume receiving taking in rx once
717 					 * below the low threshold
718 					 */
719 					lws_rx_flow_control(conn->ss->wsi,
720 							    LWS_RXFLOW_ALLOW);
721 					conn->onward_in_flow_control = 0;
722 				}
723 			}
724 			if (!lws_dsh_get_head(conn->dsh, KIND_SS_TO_P,
725 					     (void **)&p, &si)) {
726 				if (!lws_send_pipe_choked(wsi)) {
727 					cp = p;
728 					pay = 1;
729 					n = (int)si;
730 					goto again;
731 				}
732 				lws_callback_on_writable(wsi);
733 			}
734 			break;
735 		default:
736 			break;
737 		}
738 		break;
739 
740 	default:
741 		break;
742 	}
743 
744 	return lws_callback_http_dummy(wsi, reason, user, in, len);
745 
746 hangup:
747 	/* hang up on him */
748 
749 	return -1;
750 }
751 
752 static const struct lws_protocols protocols[] = {
753 	{
754 		"ssproxy-protocol",
755 		callback_ss_proxy,
756 		sizeof(struct raw_pss),
757 		2048, 2048, NULL, 0
758 	},
759 	{ NULL, NULL, 0, 0, 0, NULL, 0 }
760 };
761 
762 /*
763  * called from create_context()
764  */
765 
766 int
lws_ss_proxy_create(struct lws_context * context,const char * bind,int port)767 lws_ss_proxy_create(struct lws_context *context, const char *bind, int port)
768 {
769 	struct lws_context_creation_info info;
770 
771 	memset(&info, 0, sizeof(info));
772 
773 	info.vhost_name			= "ssproxy";
774 	info.options = LWS_SERVER_OPTION_ADOPT_APPLY_LISTEN_ACCEPT_CONFIG |
775 			LWS_SERVER_OPTION_SS_PROXY;
776 	info.port = port;
777 	if (!port) {
778 		if (!bind)
779 #if defined(__linux__)
780 			bind = "@proxy.ss.lws";
781 #else
782 			bind = "/tmp/proxy.ss.lws";
783 #endif
784 		info.options |= LWS_SERVER_OPTION_UNIX_SOCK;
785 	}
786 	info.iface			= bind;
787 #if defined(__linux__)
788 	info.unix_socket_perms		= "root:root";
789 #else
790 #endif
791 	info.listen_accept_role		= "raw-skt";
792 	info.listen_accept_protocol	= "ssproxy-protocol";
793 	info.protocols			= protocols;
794 
795 	if (!lws_create_vhost(context, &info)) {
796 		lwsl_err("%s: Failed to create ss proxy vhost\n", __func__);
797 
798 		return 1;
799 	}
800 
801 	return 0;
802 }
803