• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file
3  * Transmission Control Protocol for IP
4  * See also @ref tcp_raw
5  *
6  * @defgroup tcp_raw TCP
7  * @ingroup callbackstyle_api
8  * Transmission Control Protocol for IP\n
9  * @see @ref api
10  *
11  * Common functions for the TCP implementation, such as functions
12  * for manipulating the data structures and the TCP timer functions. TCP functions
13  * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n
14  *
15  * TCP connection setup
16  * --------------------
17  * The functions used for setting up connections is similar to that of
18  * the sequential API and of the BSD socket API. A new TCP connection
19  * identifier (i.e., a protocol control block - PCB) is created with the
20  * tcp_new() function. This PCB can then be either set to listen for new
21  * incoming connections or be explicitly connected to another host.
22  * - tcp_new()
23  * - tcp_bind()
24  * - tcp_listen() and tcp_listen_with_backlog()
25  * - tcp_accept()
26  * - tcp_connect()
27  *
28  * Sending TCP data
29  * ----------------
30  * TCP data is sent by enqueueing the data with a call to tcp_write() and
31  * triggering to send by calling tcp_output(). When the data is successfully
32  * transmitted to the remote host, the application will be notified with a
33  * call to a specified callback function.
34  * - tcp_write()
35  * - tcp_output()
36  * - tcp_sent()
37  *
38  * Receiving TCP data
39  * ------------------
40  * TCP data reception is callback based - an application specified
41  * callback function is called when new data arrives. When the
42  * application has taken the data, it has to call the tcp_recved()
43  * function to indicate that TCP can advertise increase the receive
44  * window.
45  * - tcp_recv()
46  * - tcp_recved()
47  *
48  * Application polling
49  * -------------------
50  * When a connection is idle (i.e., no data is either transmitted or
51  * received), lwIP will repeatedly poll the application by calling a
52  * specified callback function. This can be used either as a watchdog
53  * timer for killing connections that have stayed idle for too long, or
54  * as a method of waiting for memory to become available. For instance,
55  * if a call to tcp_write() has failed because memory wasn't available,
56  * the application may use the polling functionality to call tcp_write()
57  * again when the connection has been idle for a while.
58  * - tcp_poll()
59  *
60  * Closing and aborting connections
61  * --------------------------------
62  * - tcp_close()
63  * - tcp_abort()
64  * - tcp_err()
65  *
66  */
67 
68 /*
69  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without modification,
73  * are permitted provided that the following conditions are met:
74  *
75  * 1. Redistributions of source code must retain the above copyright notice,
76  *    this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce the above copyright notice,
78  *    this list of conditions and the following disclaimer in the documentation
79  *    and/or other materials provided with the distribution.
80  * 3. The name of the author may not be used to endorse or promote products
81  *    derived from this software without specific prior written permission.
82  *
83  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
84  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
85  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
86  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
87  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
88  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
91  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
92  * OF SUCH DAMAGE.
93  *
94  * This file is part of the lwIP TCP/IP stack.
95  *
96  * Author: Adam Dunkels <adam@sics.se>
97  *
98  */
99 
100 #include "lwip/opt.h"
101 
102 #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
103 
104 #include "lwip/def.h"
105 #include "lwip/mem.h"
106 #include "lwip/memp.h"
107 #include "lwip/tcp.h"
108 #include "lwip/priv/tcp_priv.h"
109 #include "lwip/debug.h"
110 #include "lwip/stats.h"
111 #include "lwip/ip6.h"
112 #include "lwip/ip6_addr.h"
113 #include "lwip/nd6.h"
114 #include "lwip/sys.h"
115 #include "lwip/tcp_info.h"
116 #include "lwip/lwip_rpl.h"
117 #include "lwip/priv/api_msg.h"
118 
119 #include <string.h>
120 
121 #ifdef LWIP_HOOK_FILENAME
122 #include LWIP_HOOK_FILENAME
123 #endif
124 
125 #ifndef TCP_LOCAL_PORT_RANGE_START
126 /* From http://www.iana.org/assignments/port-numbers:
127    "The Dynamic and/or Private Ports are those from 49152 through 65535" */
128 #define TCP_LOCAL_PORT_RANGE_START        0xc000
129 #define TCP_LOCAL_PORT_RANGE_END          0xffff
130 #define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START))
131 #endif
132 
133 #if LWIP_TCP_KEEPALIVE
134 #define TCP_KEEP_DUR(pcb)   ((pcb)->keep_cnt * (pcb)->keep_intvl)
135 #define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl)
136 #else /* LWIP_TCP_KEEPALIVE */
137 #define TCP_KEEP_DUR(pcb)   TCP_MAXIDLE
138 #define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT
139 #endif /* LWIP_TCP_KEEPALIVE */
140 
141 /* As initial send MSS, we use TCP_MSS but limit it to 536. */
142 #if TCP_MSS > 536
143 #define INITIAL_MSS 536
144 #else
145 #define INITIAL_MSS TCP_MSS
146 #endif
147 
148 const char *const tcp_state_str[] = {
149   "CLOSED",
150   "LISTEN",
151   "SYN_SENT",
152   "SYN_RCVD",
153   "ESTABLISHED",
154   "FIN_WAIT_1",
155   "FIN_WAIT_2",
156   "CLOSE_WAIT",
157   "CLOSING",
158   "LAST_ACK",
159   "TIME_WAIT"
160 };
161 
162 /* last local TCP port */
163 static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START;
164 
165 /* Incremented every coarse grained timer shot (typically every 500 ms). */
166 u32_t tcp_ticks;
167 static const u8_t tcp_backoff[13] =
168 { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7};
169 /* Times per slowtmr hits */
170 static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 };
171 
172 /* The TCP PCB lists. */
173 
174 /** List of all TCP PCBs bound but not yet (connected || listening) */
175 struct tcp_pcb *tcp_bound_pcbs;
176 /** List of all TCP PCBs in LISTEN state */
177 union tcp_listen_pcbs_t tcp_listen_pcbs;
178 /** List of all TCP PCBs that are in a state in which
179  * they accept or send data. */
180 struct tcp_pcb *tcp_active_pcbs;
181 /** List of all TCP PCBs in TIME-WAIT state */
182 struct tcp_pcb *tcp_tw_pcbs;
183 
184 /** An array with all (non-temporary) PCB lists, mainly used for smaller code size */
185 struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs,
186          &tcp_active_pcbs, &tcp_tw_pcbs
187 };
188 
189 u8_t tcp_active_pcbs_changed;
190 
191 /** Timer counter to handle calling slow-timer from tcp_tmr() */
192 static u8_t tcp_timer;
193 static u8_t tcp_timer_ctr;
194 static u16_t tcp_new_port(void);
195 
196 static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb);
197 #if LWIP_SACK
198 void tcp_connect_update_sack(struct tcp_pcb *pcb, u32_t iss);
199 #endif
200 #if LWIP_TCP_PCB_NUM_EXT_ARGS
201 static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args);
202 #endif
203 static void tcp_listen_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb_listen *lpcb);
204 
205 /**
206  * Initialize this module.
207  */
208 void
tcp_init(void)209 tcp_init(void)
210 {
211 #ifdef LWIP_RAND
212   tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND());
213 #endif /* LWIP_RAND */
214 }
215 
216 static void
tcp_generate_port_candidate(void)217 tcp_generate_port_candidate(void)
218 {
219 #ifdef LWIP_RAND
220   tcp_port = (u16_t)(TCP_LOCAL_PORT_RANGE_START +
221              (LWIP_RAND() % (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START + 1)));
222 #else
223   if (tcp_port++ == TCP_LOCAL_PORT_RANGE_END) {
224     tcp_port = TCP_LOCAL_PORT_RANGE_START;
225   }
226 #endif
227 }
228 
229 
230 /** Free a tcp pcb */
231 void
tcp_free(struct tcp_pcb * pcb)232 tcp_free(struct tcp_pcb *pcb)
233 {
234   LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN);
235 #if LWIP_TCP_PCB_NUM_EXT_ARGS
236   tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args);
237 #endif
238   memp_free(MEMP_TCP_PCB, pcb);
239 }
240 
241 /** Free a tcp listen pcb */
242 static void
tcp_free_listen(struct tcp_pcb * pcb)243 tcp_free_listen(struct tcp_pcb *pcb)
244 {
245   LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN);
246 #if LWIP_TCP_PCB_NUM_EXT_ARGS
247   tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args);
248 #endif
249   memp_free(MEMP_TCP_PCB_LISTEN, pcb);
250 }
251 
252 /**
253  * Called periodically to dispatch TCP timers.
254  */
255 void
tcp_tmr(void)256 tcp_tmr(void)
257 {
258   /* Call tcp_fasttmr() every TCP_TMR_INTERVAL ms */
259   tcp_fasttmr();
260 
261   if (++tcp_timer == TCP_SLOW_INTERVAL_PERIOD) {
262     /* Call tcp_tmr() every 250 ms, i.e., every other timer
263        tcp_tmr() is called. */
264     tcp_slowtmr();
265     tcp_timer = 0;
266   }
267 }
268 
269 #if LWIP_LOWPOWER
270 #include "lwip/lowpower.h"
271 
272 static u32_t
tcp_set_timer_tick_by_persist(struct tcp_pcb * pcb,u32_t tick)273 tcp_set_timer_tick_by_persist(struct tcp_pcb *pcb, u32_t tick)
274 {
275   u32_t val;
276 
277   if (pcb->persist_backoff > 0) {
278     u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1];
279     SET_TMR_TICK(tick, backoff_cnt);
280     return tick;
281   }
282 
283   /* timer not running */
284   if (pcb->rtime >= 0) {
285     val = pcb->rto - pcb->rtime;
286     if (val == 0) {
287       val = 1;
288     }
289     SET_TMR_TICK(tick, val);
290   }
291   return tick;
292 }
293 
294 static u32_t
tcp_set_timer_tick_by_keepalive(struct tcp_pcb * pcb,u32_t tick)295 tcp_set_timer_tick_by_keepalive(struct tcp_pcb *pcb, u32_t tick)
296 {
297   u32_t val;
298 
299   if (ip_get_option(pcb, SOF_KEEPALIVE) &&
300       ((pcb->state == ESTABLISHED) ||
301        (pcb->state == CLOSE_WAIT))) {
302     u32_t idle = (pcb->keep_idle) / TCP_SLOW_INTERVAL;
303     if (pcb->keep_cnt_sent == 0) {
304       val = idle - (tcp_ticks - pcb->tmr);
305     } else {
306       val = (tcp_ticks - pcb->tmr) - idle;
307       idle = (TCP_KEEP_INTVL(pcb) / TCP_SLOW_INTERVAL);
308       val  = idle - (val % idle);
309     }
310     /* need add 1 to trig timer */
311     val++;
312     SET_TMR_TICK(tick, val);
313   }
314 
315   return tick;
316 }
317 
tcp_set_timer_tick_by_tcp_state(struct tcp_pcb * pcb,u32_t tick)318 static u32_t tcp_set_timer_tick_by_tcp_state(struct tcp_pcb *pcb, u32_t tick)
319 {
320   u32_t val;
321 
322   /* Check if this PCB has stayed too long in FIN-WAIT-2 */
323   if (pcb->state == FIN_WAIT_2) {
324     /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */
325     if (pcb->flags & TF_RXCLOSED) {
326       val = TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL;
327       SET_TMR_TICK(tick, val);
328     }
329   }
330 
331   /* Check if this PCB has stayed too long in SYN-RCVD */
332   if (pcb->state == SYN_RCVD) {
333     val = TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL;
334     SET_TMR_TICK(tick, val);
335   }
336 
337   /* Check if this PCB has stayed too long in LAST-ACK */
338   if (pcb->state == LAST_ACK) {
339     /*
340      * In a TCP connection the end that performs the active close
341      * is required to stay in TIME_WAIT state for 2MSL of time
342      */
343     val = (2 * TCP_MSL) / TCP_SLOW_INTERVAL;
344     SET_TMR_TICK(tick, val);
345   }
346 
347   return tick;
348 }
349 
350 #if DRIVER_STATUS_CHECK
tcp_set_timer_tick_by_driver_status(struct tcp_pcb * pcb,u32_t tick,bool drv_flag)351 static u32_t tcp_set_timer_tick_by_driver_status(struct tcp_pcb *pcb, u32_t tick, bool drv_flag)
352 {
353   u32_t val;
354 
355   struct netif *netif = NULL;
356   if ((tcp_active_pcbs != NULL) && (drv_flag == lwIP_TRUE)) {
357     for (netif = netif_list; netif != NULL; netif = netif->next) {
358       /* network mask matches? */
359       if ((!(netif->flags & NETIF_FLAG_DRIVER_RDY) != 0)) {
360         val = DRIVER_WAKEUP_COUNT - netif->waketime + 1;
361         SET_TMR_TICK(tick, val);
362       }
363     }
364   }
365 
366   return tick;
367 }
368 #endif
369 
370 u32_t
tcp_slow_tmr_tick(void)371 tcp_slow_tmr_tick(void)
372 {
373   struct tcp_pcb *pcb = NULL;
374   u32_t tick = 0;
375 #if DRIVER_STATUS_CHECK
376   bool drv_flag = lwIP_FALSE;
377 #endif
378 
379   pcb = tcp_active_pcbs;
380   while (pcb != NULL) {
381     if (((pcb->state == SYN_SENT) && (pcb->nrtx >= TCP_SYNMAXRTX)) ||
382         (((pcb->state == FIN_WAIT_1) || (pcb->state == CLOSING)) && (pcb->nrtx >= TCP_FW1MAXRTX)) ||
383         (pcb->nrtx >= TCP_MAXRTX)) {
384       return 1;
385     }
386 
387     tick = tcp_set_timer_tick_by_persist(pcb, tick);
388 
389 #if DRIVER_STATUS_CHECK
390     if (pcb->drv_status == DRV_NOT_READY) {
391       /* iterate through netifs */
392       drv_flag = lwIP_TRUE;
393     }
394 #endif /* DRIVER_STATUS_CHECK */
395 
396     tick = tcp_set_timer_tick_by_keepalive(pcb, tick);
397 
398     /*
399      * If this PCB has queued out of sequence data, but has been
400      * inactive for too long, will drop the data (it will eventually
401      * be retransmitted).
402      */
403 #if TCP_QUEUE_OOSEQ
404     if (pcb->ooseq != NULL) {
405       SET_TMR_TICK(tick, 1);
406     }
407 #endif /* TCP_QUEUE_OOSEQ */
408 
409     tick = tcp_set_timer_tick_by_tcp_state(pcb, tick);
410 
411     u8_t ret = poll_tcp_needed(pcb->callback_arg, pcb);
412     if ((pcb->poll != NULL) && (ret != 0)) {
413       SET_TMR_TICK(tick, 1);
414     }
415     pcb = pcb->next;
416   }
417 
418 #if DRIVER_STATUS_CHECK
419   tick = tcp_set_timer_tick_by_driver_status(pcb, tick, drv_flag);
420 #endif /* DRIVER_STATUS_CHECK */
421 
422   LOWPOWER_DEBUG(("%s:%d tmr tick: %u\n", __func__, __LINE__, tick));
423   return tick;
424 }
425 
426 u32_t
tcp_fast_tmr_tick(void)427 tcp_fast_tmr_tick(void)
428 {
429   struct tcp_pcb *pcb = NULL;
430 
431   pcb = tcp_active_pcbs;
432   while (pcb != NULL) {
433     /* send delayed ACKs or send pending FIN */
434     if ((pcb->flags & TF_ACK_DELAY) ||
435         (pcb->flags & TF_CLOSEPEND) ||
436         (pcb->refused_data != NULL) ||
437         (pcb->tcp_pcb_flag & TCP_PBUF_FLAG_TCP_FIN_RECV_SYSPOST_FAIL)
438 #if LWIP_TCP_TLP_SUPPORT
439         || (pcb->tlp_time_stamp != 0)
440 #endif /* LWIP_TCP_TLP_SUPPORT */
441        ) {
442       LOWPOWER_DEBUG(("%s:%d tmr tick: 1\n", __func__, __LINE__));
443       return 1;
444     }
445     pcb = pcb->next;
446   }
447   LOWPOWER_DEBUG(("%s:%d tmr tick: 0\n", __func__, __LINE__));
448   return 0;
449 }
450 #endif /* LWIP_LOWPOWER */
451 
452 #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
453 /** Called when a listen pcb is closed. Iterates one pcb list and removes the
454  * closed listener pcb from pcb->listener if matching.
455  */
456 static void
tcp_remove_listener(struct tcp_pcb * list,struct tcp_pcb_listen * lpcb)457 tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb)
458 {
459   struct tcp_pcb *pcb;
460 
461   LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL);
462 
463   for (pcb = list; pcb != NULL; pcb = pcb->next) {
464     if (pcb->listener == lpcb) {
465       pcb->listener = NULL;
466     }
467   }
468 }
469 #endif
470 
471 /** Called when a listen pcb is closed. Iterates all pcb lists and removes the
472  * closed listener pcb from pcb->listener if matching.
473  */
474 static void
tcp_listen_closed(struct tcp_pcb * pcb)475 tcp_listen_closed(struct tcp_pcb *pcb)
476 {
477 #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
478   size_t i;
479   LWIP_ASSERT("pcb != NULL", pcb != NULL);
480   LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN);
481   for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) {
482     tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb);
483   }
484 #endif
485   LWIP_UNUSED_ARG(pcb);
486 }
487 
488 #if TCP_LISTEN_BACKLOG
489 /** @ingroup tcp_raw
490  * Delay accepting a connection in respect to the listen backlog:
491  * the number of outstanding connections is increased until
492  * tcp_backlog_accepted() is called.
493  *
494  * ATTENTION: the caller is responsible for calling tcp_backlog_accepted()
495  * or else the backlog feature will get out of sync!
496  *
497  * @param pcb the connection pcb which is not fully accepted yet
498  */
499 void
tcp_backlog_delayed(struct tcp_pcb * pcb)500 tcp_backlog_delayed(struct tcp_pcb *pcb)
501 {
502   LWIP_ASSERT("pcb != NULL", pcb != NULL);
503   LWIP_ASSERT_CORE_LOCKED();
504   if ((pcb->flags & TF_BACKLOGPEND) == 0) {
505     if (pcb->listener != NULL) {
506       pcb->listener->accepts_pending++;
507       LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0);
508       tcp_set_flags(pcb, TF_BACKLOGPEND);
509     }
510   }
511 }
512 
513 /** @ingroup tcp_raw
514  * A delayed-accept a connection is accepted (or closed/aborted): decreases
515  * the number of outstanding connections after calling tcp_backlog_delayed().
516  *
517  * ATTENTION: the caller is responsible for calling tcp_backlog_accepted()
518  * or else the backlog feature will get out of sync!
519  *
520  * @param pcb the connection pcb which is now fully accepted (or closed/aborted)
521  */
522 void
tcp_backlog_accepted(struct tcp_pcb * pcb)523 tcp_backlog_accepted(struct tcp_pcb *pcb)
524 {
525   LWIP_ASSERT("pcb != NULL", pcb != NULL);
526   LWIP_ASSERT_CORE_LOCKED();
527   if ((pcb->flags & TF_BACKLOGPEND) != 0) {
528     if (pcb->listener != NULL) {
529       LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0);
530       pcb->listener->accepts_pending--;
531       tcp_clear_flags(pcb, TF_BACKLOGPEND);
532     }
533   }
534 }
535 #endif /* TCP_LISTEN_BACKLOG */
536 
537 /**
538  * Closes the TX side of a connection held by the PCB.
539  * For tcp_close(), a RST is sent if the application didn't receive all data
540  * (tcp_recved() not called for all data passed to recv callback).
541  *
542  * Listening pcbs are freed and may not be referenced any more.
543  * Connection pcbs are freed if not yet connected and may not be referenced
544  * any more. If a connection is established (at least SYN received or in
545  * a closing state), the connection is closed, and put in a closing state.
546  * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
547  * unsafe to reference it.
548  *
549  * @param pcb the tcp_pcb to close
550  * @return ERR_OK if connection has been closed
551  *         another err_t if closing failed and pcb is not freed
552  */
553 static err_t
tcp_close_shutdown(struct tcp_pcb * pcb,u8_t rst_on_unacked_data)554 tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data)
555 {
556   LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL);
557 
558   if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) {
559     if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) {
560       /* Not all data received by application, send RST to tell the remote
561          side about this. */
562       LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED);
563 
564       /* don't call tcp_abort here: we must not deallocate the pcb since
565          that might not be expected when calling tcp_close */
566 #if DRIVER_STATUS_CHECK
567       if (pcb->drv_status == DRV_READY) {
568         tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
569                 pcb->local_port, pcb->remote_port);
570       }
571 #else
572       tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
573               pcb->local_port, pcb->remote_port);
574 #endif
575 
576       tcp_pcb_purge(pcb);
577       TCP_RMV_ACTIVE(pcb);
578       /* Deallocate the pcb since we already sent a RST for it */
579       if (tcp_input_pcb == pcb) {
580         /* prevent using a deallocated pcb: free it from tcp_input later */
581         tcp_trigger_input_pcb_close();
582       } else {
583         tcp_free(pcb);
584       }
585       return ERR_OK;
586     }
587   }
588 
589   /* - states which free the pcb are handled here,
590      - states which send FIN and change state are handled in tcp_close_shutdown_fin() */
591   switch (pcb->state) {
592     case CLOSED:
593       /* Closing a pcb in the CLOSED state might seem erroneous,
594        * however, it is in this state once allocated and as yet unused
595        * and the user needs some way to free it should the need arise.
596        * Calling tcp_close() with a pcb that has already been closed, (i.e. twice)
597        * or for a pcb that has been used and then entered the CLOSED state
598        * is erroneous, but this should never happen as the pcb has in those cases
599        * been freed, and so any remaining handles are bogus. */
600       if (pcb->local_port != 0) {
601         TCP_RMV(&tcp_bound_pcbs, pcb);
602       }
603       tcp_free(pcb);
604       break;
605     case LISTEN:
606       tcp_listen_closed(pcb);
607       tcp_listen_pcb_remove(&tcp_listen_pcbs.pcbs, (struct tcp_pcb_listen*)pcb);
608       tcp_free_listen(pcb);
609       break;
610     case SYN_SENT:
611       TCP_PCB_REMOVE_ACTIVE(pcb);
612       tcp_free(pcb);
613       MIB2_STATS_INC(mib2.tcpattemptfails);
614       break;
615     default:
616       return tcp_close_shutdown_fin(pcb);
617   }
618   return ERR_OK;
619 }
620 
621 static err_t
tcp_close_shutdown_fin(struct tcp_pcb * pcb)622 tcp_close_shutdown_fin(struct tcp_pcb *pcb)
623 {
624   err_t err;
625   LWIP_ASSERT("pcb != NULL", pcb != NULL);
626 
627   switch (pcb->state) {
628     case SYN_RCVD:
629       err = tcp_send_fin(pcb);
630       if (err == ERR_OK) {
631         tcp_backlog_accepted(pcb);
632         MIB2_STATS_INC(mib2.tcpattemptfails);
633         pcb->state = FIN_WAIT_1;
634       }
635       break;
636     case ESTABLISHED:
637       err = tcp_send_fin(pcb);
638       if (err == ERR_OK) {
639         MIB2_STATS_INC(mib2.tcpestabresets);
640         pcb->state = FIN_WAIT_1;
641       }
642       break;
643     case CLOSE_WAIT:
644       err = tcp_send_fin(pcb);
645       if (err == ERR_OK) {
646         MIB2_STATS_INC(mib2.tcpestabresets);
647         pcb->state = LAST_ACK;
648       }
649       break;
650     default:
651       /* Has already been closed, do nothing. */
652       return ERR_OK;
653   }
654 
655   if (err == ERR_OK) {
656     /* To ensure all data has been sent when tcp_close returns, we have
657        to make sure tcp_output doesn't fail.
658        Since we don't really have to ensure all data has been sent when tcp_close
659        returns (unsent data is sent from tcp timer functions, also), we don't care
660        for the return value of tcp_output for now. */
661     tcp_output(pcb);
662   } else if (err == ERR_MEM) {
663     /* Mark this pcb for closing. Closing is retried from tcp_tmr. */
664     tcp_set_flags(pcb, TF_CLOSEPEND);
665     /* We have to return ERR_OK from here to indicate to the callers that this
666        pcb should not be used any more as it will be freed soon via tcp_tmr.
667        This is OK here since sending FIN does not guarantee a time frime for
668        actually freeing the pcb, either (it is left in closure states for
669        remote ACK or timeout) */
670     return ERR_OK;
671   }
672   return err;
673 }
674 
675 /**
676  * @ingroup tcp_raw
677  * Closes the connection held by the PCB.
678  *
679  * Listening pcbs are freed and may not be referenced any more.
680  * Connection pcbs are freed if not yet connected and may not be referenced
681  * any more. If a connection is established (at least SYN received or in
682  * a closing state), the connection is closed, and put in a closing state.
683  * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
684  * unsafe to reference it (unless an error is returned).
685  *
686  * The function may return ERR_MEM if no memory
687  * was available for closing the connection. If so, the application
688  * should wait and try again either by using the acknowledgment
689  * callback or the polling functionality. If the close succeeds, the
690  * function returns ERR_OK.
691  *
692  * @param pcb the tcp_pcb to close
693  * @return ERR_OK if connection has been closed
694  *         another err_t if closing failed and pcb is not freed
695  */
696 err_t
tcp_close(struct tcp_pcb * pcb)697 tcp_close(struct tcp_pcb *pcb)
698 {
699   LWIP_ASSERT_CORE_LOCKED();
700 
701   LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG);
702   LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in "));
703 
704   tcp_debug_print_state(pcb->state);
705 
706   if (pcb->state != LISTEN) {
707     /* Set a flag not to receive any more data... */
708     tcp_set_flags(pcb, TF_RXCLOSED);
709   }
710   /* ... and close */
711   return tcp_close_shutdown(pcb, 1);
712 }
713 
714 /**
715  * @ingroup tcp_raw
716  * Causes all or part of a full-duplex connection of this PCB to be shut down.
717  * This doesn't deallocate the PCB unless shutting down both sides!
718  * Shutting down both sides is the same as calling tcp_close, so if it succeds
719  * (i.e. returns ER_OK), the PCB must not be referenced any more!
720  *
721  * @param pcb PCB to shutdown
722  * @param shut_rx shut down receive side if this is != 0
723  * @param shut_tx shut down send side if this is != 0
724  * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down)
725  *         another err_t on error.
726  */
727 err_t
tcp_shutdown(struct tcp_pcb * pcb,int shut_rx,int shut_tx)728 tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx)
729 {
730   LWIP_ASSERT_CORE_LOCKED();
731 
732   LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG);
733 
734   if (pcb->state == LISTEN) {
735     return ERR_CONN;
736   }
737   if (shut_rx) {
738     /* shut down the receive side: set a flag not to receive any more data... */
739     tcp_set_flags(pcb, TF_RXCLOSED);
740     if (shut_tx) {
741       /* shutting down the tx AND rx side is the same as closing for the raw API */
742       return tcp_close_shutdown(pcb, 1);
743     }
744     /* ... and free buffered data */
745     if (pcb->refused_data != NULL) {
746       pbuf_free(pcb->refused_data);
747       pcb->refused_data = NULL;
748     }
749   }
750   if (shut_tx) {
751     /* This can't happen twice since if it succeeds, the pcb's state is changed.
752        Only close in these states as the others directly deallocate the PCB */
753     switch (pcb->state) {
754       case SYN_RCVD:
755       case ESTABLISHED:
756       case CLOSE_WAIT:
757         return tcp_close_shutdown(pcb, (u8_t)shut_rx);
758       default:
759         /* Not (yet?) connected, cannot shutdown the TX side as that would bring us
760           into CLOSED state, where the PCB is deallocated. */
761         return ERR_CONN;
762     }
763   }
764   return ERR_OK;
765 }
766 
767 /**
768  * Abandons a connection and optionally sends a RST to the remote
769  * host.  Deletes the local protocol control block. This is done when
770  * a connection is killed because of shortage of memory.
771  *
772  * @param pcb the tcp_pcb to abort
773  * @param reset boolean to indicate whether a reset should be sent
774  */
775 void
tcp_abandon(struct tcp_pcb * pcb,int reset)776 tcp_abandon(struct tcp_pcb *pcb, int reset)
777 {
778   u32_t seqno, ackno;
779 #if LWIP_CALLBACK_API
780   tcp_err_fn errf;
781 #endif /* LWIP_CALLBACK_API */
782   void *errf_arg;
783 
784   LWIP_ASSERT_CORE_LOCKED();
785 
786   LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return);
787 
788   /* pcb->state LISTEN not allowed here */
789   LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs",
790               pcb->state != LISTEN);
791   /* Figure out on which TCP PCB list we are, and remove us. If we
792      are in an active state, call the receive function associated with
793      the PCB with a NULL argument, and send an RST to the remote end. */
794   if (pcb->state == TIME_WAIT) {
795     if (reset) {
796       LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n"));
797       tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, pcb->local_port, pcb->remote_port);
798     }
799     tcp_pcb_remove(&tcp_tw_pcbs, pcb);
800     tcp_free(pcb);
801   } else {
802     int send_rst = 0;
803     u16_t local_port = 0;
804     enum tcp_state last_state;
805     seqno = pcb->snd_nxt;
806     ackno = pcb->rcv_nxt;
807 #if LWIP_CALLBACK_API
808     errf = pcb->errf;
809 #endif /* LWIP_CALLBACK_API */
810     errf_arg = pcb->callback_arg;
811     if (pcb->state == CLOSED) {
812       if (pcb->local_port != 0) {
813         /* bound, not yet opened */
814         TCP_RMV(&tcp_bound_pcbs, pcb);
815       }
816     } else {
817       send_rst = reset;
818       local_port = pcb->local_port;
819       TCP_PCB_REMOVE_ACTIVE(pcb);
820     }
821     if (pcb->unacked != NULL) {
822       tcp_segs_free(pcb->unacked);
823     }
824     if (pcb->unsent != NULL) {
825       tcp_segs_free(pcb->unsent);
826     }
827 #if TCP_QUEUE_OOSEQ
828     if (pcb->ooseq != NULL) {
829       tcp_segs_free(pcb->ooseq);
830     }
831 #endif /* TCP_QUEUE_OOSEQ */
832     tcp_backlog_accepted(pcb);
833     if (send_rst) {
834       LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n"));
835       tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port);
836     }
837     last_state = pcb->state;
838     tcp_free(pcb);
839     TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT);
840   }
841 }
842 
843 /**
844  * @ingroup tcp_raw
845  * Aborts the connection by sending a RST (reset) segment to the remote
846  * host. The pcb is deallocated. This function never fails.
847  *
848  * ATTENTION: When calling this from one of the TCP callbacks, make
849  * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise
850  * or you will risk accessing deallocated memory or memory leaks!
851  *
852  * @param pcb the tcp pcb to abort
853  */
854 void
tcp_abort(struct tcp_pcb * pcb)855 tcp_abort(struct tcp_pcb *pcb)
856 {
857   tcp_abandon(pcb, 1);
858 }
859 
860 /**
861  * @ingroup tcp_raw
862  * Binds the connection to a local port number and IP address. If the
863  * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is
864  * bound to all local IP addresses.
865  * If another connection is bound to the same port, the function will
866  * return ERR_USE, otherwise ERR_OK is returned.
867  * @see MEMP_NUM_TCP_PCB_LISTEN and MEMP_NUM_TCP_PCB
868  *
869  * @param pcb the tcp_pcb to bind (no check is done whether this pcb is
870  *        already bound!)
871  * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind
872  *        to any local address
873  * @param port the local port to bind to
874  * @return ERR_USE if the port is already in use
875  *         ERR_VAL if bind failed because the PCB is not in a valid state
876  *         ERR_OK if bound
877  */
878 err_t
tcp_bind(struct tcp_pcb * pcb,const ip_addr_t * ipaddr,u16_t port)879 tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port)
880 {
881   int i;
882   int max_pcb_list = NUM_TCP_PCB_LISTS;
883   struct tcp_pcb *cpcb;
884 #if LWIP_IPV6 && LWIP_IPV6_SCOPES
885   ip_addr_t zoned_ipaddr;
886 #endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
887 
888   LWIP_ASSERT_CORE_LOCKED();
889 
890 #if LWIP_IPV4
891   /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
892   if (ipaddr == NULL) {
893     ipaddr = IP4_ADDR_ANY;
894   }
895 #else /* LWIP_IPV4 */
896   LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG);
897 #endif /* LWIP_IPV4 */
898 
899   LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG);
900 
901   LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL);
902 
903   if (netif_ipaddr_isbrdcast(ipaddr) || ip_addr_ismulticast(ipaddr)) {
904     return ERR_NOADDR;
905   }
906 
907 #if SO_REUSE
908   /* Unless the REUSEADDR flag is set,
909      we have to check the pcbs in TIME-WAIT state, also.
910      We do not dump TIME_WAIT pcb's; they can still be matched by incoming
911      packets using both local and remote IP addresses and ports to distinguish.
912    */
913   if (ip_get_option(pcb, SOF_REUSEADDR)) {
914     max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT;
915   }
916 #endif /* SO_REUSE */
917 
918 #if LWIP_IPV6 && LWIP_IPV6_SCOPES
919   /* If the given IP address should have a zone but doesn't, assign one now.
920    * This is legacy support: scope-aware callers should always provide properly
921    * zoned source addresses. Do the zone selection before the address-in-use
922    * check below; as such we have to make a temporary copy of the address. */
923   if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) {
924     ip_addr_copy(zoned_ipaddr, *ipaddr);
925     ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr));
926     ipaddr = &zoned_ipaddr;
927   }
928 #endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
929 
930   if (port == 0) {
931     port = tcp_new_port();
932     if (port == 0) {
933       return ERR_USE;
934     }
935   } else {
936     /* Check if the address already is in use (on all lists) */
937     for (i = 0; i < max_pcb_list; i++) {
938       for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
939         /* omit the confilcting check if the pcbs bond to diff netif */
940         if ((cpcb->netif_idx) && (pcb->netif_idx) && (pcb->netif_idx != cpcb->netif_idx))
941           continue;
942         if (cpcb->local_port == port) {
943 #if SO_REUSE
944           /* Omit checking for the same port if both pcbs have REUSEADDR set.
945              For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in
946              tcp_connect. */
947           if (!ip_get_option(pcb, SOF_REUSEADDR) ||
948               !ip_get_option(cpcb, SOF_REUSEADDR))
949 #endif /* SO_REUSE */
950           {
951             if (((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) ||
952                 (IP_IS_ANY_TYPE_VAL(cpcb->local_ip)) ||
953                 (IP_IS_ANY_TYPE_VAL(*ipaddr))) &&
954                 (ip_addr_isany(&cpcb->local_ip) ||
955                  ip_addr_isany(ipaddr) ||
956                  ip_addr_cmp(&cpcb->local_ip, ipaddr))) {
957               return ERR_USE;
958             }
959           }
960         }
961 
962         /* Address is already bound to an address, return EINVAL */
963         if (pcb == cpcb) {
964           return ERR_VAL;
965         }
966       }
967     }
968   }
969 
970   if (!ip_addr_isany(ipaddr)
971 #if LWIP_IPV4 && LWIP_IPV6
972       || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip))
973 #endif /* LWIP_IPV4 && LWIP_IPV6 */
974      ) {
975     ip_addr_set(&pcb->local_ip, ipaddr);
976   }
977   pcb->local_port = port;
978   TCP_REG(&tcp_bound_pcbs, pcb);
979   LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port));
980   return ERR_OK;
981 }
982 
983 /**
984  * @ingroup tcp_raw
985  * Binds the connection to a netif and IP address.
986  * After calling this function, all packets received via this PCB
987  * are guaranteed to have come in via the specified netif, and all
988  * outgoing packets will go out via the specified netif.
989  *
990  * @param pcb the tcp_pcb to bind.
991  * @param netif the netif to bind to. Can be NULL.
992  */
993 void
tcp_bind_netif(struct tcp_pcb * pcb,const struct netif * netif)994 tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif)
995 {
996   LWIP_ASSERT_CORE_LOCKED();
997   if (netif != NULL) {
998     pcb->netif_idx = netif_get_index(netif);
999   } else {
1000     pcb->netif_idx = NETIF_NO_INDEX;
1001   }
1002 }
1003 
1004 #if LWIP_CALLBACK_API
1005 /**
1006  * Default accept callback if no accept callback is specified by the user.
1007  */
1008 static err_t
tcp_accept_null(void * arg,struct tcp_pcb * pcb,err_t err)1009 tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err)
1010 {
1011   LWIP_UNUSED_ARG(arg);
1012   LWIP_UNUSED_ARG(err);
1013 
1014   LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL);
1015 
1016   tcp_abort(pcb);
1017 
1018   return ERR_ABRT;
1019 }
1020 #endif /* LWIP_CALLBACK_API */
1021 
1022 /**
1023  * @ingroup tcp_raw
1024  * Set the state of the connection to be LISTEN, which means that it
1025  * is able to accept incoming connections. The protocol control block
1026  * is reallocated in order to consume less memory. Setting the
1027  * connection to LISTEN is an irreversible process.
1028  * When an incoming connection is accepted, the function specified with
1029  * the tcp_accept() function will be called. The pcb has to be bound
1030  * to a local port with the tcp_bind() function.
1031  *
1032  * The tcp_listen() function returns a new connection identifier, and
1033  * the one passed as an argument to the function will be
1034  * deallocated. The reason for this behavior is that less memory is
1035  * needed for a connection that is listening, so tcp_listen() will
1036  * reclaim the memory needed for the original connection and allocate a
1037  * new smaller memory block for the listening connection.
1038  *
1039  * tcp_listen() may return NULL if no memory was available for the
1040  * listening connection. If so, the memory associated with the pcb
1041  * passed as an argument to tcp_listen() will not be deallocated.
1042  *
1043  * The backlog limits the number of outstanding connections
1044  * in the listen queue to the value specified by the backlog argument.
1045  * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h.
1046  *
1047  * @param pcb the original tcp_pcb
1048  * @param backlog the incoming connections queue limit
1049  * @return tcp_pcb used for listening, consumes less memory.
1050  *
1051  * @note The original tcp_pcb is freed. This function therefore has to be
1052  *       called like this:
1053  *             tpcb = tcp_listen_with_backlog(tpcb, backlog);
1054  */
1055 struct tcp_pcb *
tcp_listen_with_backlog(struct tcp_pcb * pcb,u8_t backlog)1056 tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog)
1057 {
1058   LWIP_ASSERT_CORE_LOCKED();
1059   return tcp_listen_with_backlog_and_err(pcb, backlog, NULL);
1060 }
1061 
1062 /**
1063  * @ingroup tcp_raw
1064  * Set the state of the connection to be LISTEN, which means that it
1065  * is able to accept incoming connections. The protocol control block
1066  * is reallocated in order to consume less memory. Setting the
1067  * connection to LISTEN is an irreversible process.
1068  *
1069  * @param pcb the original tcp_pcb
1070  * @param backlog the incoming connections queue limit
1071  * @param err when NULL is returned, this contains the error reason
1072  * @return tcp_pcb used for listening, consumes less memory.
1073  *
1074  * @note The original tcp_pcb is freed. This function therefore has to be
1075  *       called like this:
1076  *             tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err);
1077  */
1078 struct tcp_pcb *
tcp_listen_with_backlog_and_err(struct tcp_pcb * pcb,u8_t backlog,err_t * err)1079 tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err)
1080 {
1081   struct tcp_pcb_listen *lpcb = NULL;
1082   err_t res;
1083 
1084   LWIP_UNUSED_ARG(backlog);
1085 
1086   LWIP_ASSERT_CORE_LOCKED();
1087 
1088   LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done);
1089   LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done);
1090 
1091   /* already listening? */
1092   if (pcb->state == LISTEN) {
1093     lpcb = (struct tcp_pcb_listen *)pcb;
1094     res = ERR_ALREADY;
1095     goto done;
1096   }
1097 #if SO_REUSE
1098   if (ip_get_option(pcb, SOF_REUSEADDR)) {
1099     /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage
1100        is declared (listen-/connection-pcb), we have to make sure now that
1101        this port is only used once for every local IP. */
1102     for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
1103       if ((lpcb->local_port == pcb->local_port) &&
1104           ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) {
1105         /* this address/port is already used */
1106         lpcb = NULL;
1107         res = ERR_USE;
1108         goto done;
1109       }
1110     }
1111   }
1112 #endif /* SO_REUSE */
1113   lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN);
1114   if (lpcb == NULL) {
1115     res = ERR_MEM;
1116     goto done;
1117   }
1118   (void)memset_s(lpcb, sizeof(struct tcp_pcb_listen), 0, sizeof(struct tcp_pcb_listen));
1119   lpcb->callback_arg = pcb->callback_arg;
1120   lpcb->local_port = pcb->local_port;
1121   lpcb->state = LISTEN;
1122   lpcb->prio = pcb->prio;
1123 #if LWIP_SO_SNDBUF
1124   lpcb->snd_buf_static = pcb->snd_buf_static;
1125 #endif
1126 #if LWIP_TCP_MAXSEG
1127   lpcb->usr_mss = pcb->usr_mss;
1128 #endif /* LWIP_TCP_MAXSEG */
1129   lpcb->so_options = pcb->so_options;
1130   lpcb->netif_idx = pcb->netif_idx;
1131   lpcb->ttl = pcb->ttl;
1132   lpcb->tos = pcb->tos;
1133 #if LWIP_IPV4 && LWIP_IPV6
1134   IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type);
1135 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1136   ip_addr_copy(lpcb->local_ip, pcb->local_ip);
1137   if (pcb->local_port != 0) {
1138     TCP_RMV(&tcp_bound_pcbs, pcb);
1139   }
1140 #if LWIP_SO_PRIORITY
1141   lpcb->priority = pcb->priority;
1142 #endif /* LWIP_SO_PRIORITY */
1143 #if LWIP_TCP_PCB_NUM_EXT_ARGS
1144   /* copy over ext_args to listening pcb  */
1145   memcpy_s(&lpcb->ext_args, sizeof(pcb->ext_args), &pcb->ext_args, sizeof(pcb->ext_args));
1146 #endif
1147   tcp_free(pcb);
1148 #if LWIP_CALLBACK_API
1149   lpcb->accept = tcp_accept_null;
1150 #endif /* LWIP_CALLBACK_API */
1151 #if TCP_LISTEN_BACKLOG
1152   lpcb->accepts_pending = 0;
1153   tcp_backlog_set(lpcb, backlog);
1154 #endif /* TCP_LISTEN_BACKLOG */
1155   TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb);
1156   res = ERR_OK;
1157 done:
1158   if (err != NULL) {
1159     *err = res;
1160   }
1161   return (struct tcp_pcb *)lpcb;
1162 }
1163 
1164 /**
1165  * Update the state that tracks the available window space to advertise.
1166  *
1167  * Returns how much extra window would be advertised if we sent an
1168  * update now.
1169  */
1170 u32_t
tcp_update_rcv_ann_wnd(struct tcp_pcb * pcb)1171 tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb)
1172 {
1173   u32_t new_right_edge;
1174 
1175   LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL);
1176   new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd;
1177 
1178   if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) {
1179 #if PBUF_RX_RATIONING
1180     if (pbuf_ram_in_shortage() != lwIP_FALSE) {
1181       /* As pbuf_ram in shortage, we force ack-now instead of advertise more window */
1182       pcb->rcv_ann_wnd = LWIP_MIN(pcb->rcv_wnd, LWIP_MIN((TCP_WND / 2), (u32_t)pcb->mss));
1183       return TCP_WND_UPDATE_THRESHOLD;
1184     }
1185 #endif
1186     /* we can advertise more window */
1187     pcb->rcv_ann_wnd = pcb->rcv_wnd;
1188     return new_right_edge - pcb->rcv_ann_right_edge;
1189   } else {
1190     if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) {
1191       /* Can happen due to other end sending out of advertised window,
1192        * but within actual available (but not yet advertised) window */
1193       pcb->rcv_ann_wnd = 0;
1194     } else {
1195       /* keep the right edge of window constant */
1196       u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt;
1197 #if !LWIP_WND_SCALE
1198       LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff);
1199 #endif
1200       pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd;
1201     }
1202     return 0;
1203   }
1204 }
1205 
1206 /**
1207  * @ingroup tcp_raw
1208  * This function should be called by the application when it has
1209  * processed the data. The purpose is to advertise a larger window
1210  * when the data has been processed.
1211  *
1212  * @param pcb the tcp_pcb for which data is read
1213  * @param len the amount of bytes that have been read by the application
1214  */
1215 void
tcp_recved(struct tcp_pcb * pcb,u16_t len)1216 tcp_recved(struct tcp_pcb *pcb, u16_t len)
1217 {
1218   u32_t wnd_inflation;
1219   tcpwnd_size_t rcv_wnd;
1220 
1221   LWIP_ASSERT_CORE_LOCKED();
1222 
1223   LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return);
1224 
1225   /* pcb->state LISTEN not allowed here */
1226   LWIP_ASSERT("don't call tcp_recved for listen-pcbs",
1227               pcb->state != LISTEN);
1228 
1229   rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len);
1230   if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) {
1231     /* window got too big or tcpwnd_size_t overflow */
1232     LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n"));
1233     pcb->rcv_wnd = TCP_WND_MAX(pcb);
1234   } else  {
1235     pcb->rcv_wnd = rcv_wnd;
1236   }
1237 
1238   wnd_inflation = tcp_update_rcv_ann_wnd(pcb);
1239 
1240   /* If the change in the right edge of window is significant (default
1241    * watermark is TCP_WND/4), then send an explicit update now.
1242    * Otherwise wait for a packet to be sent in the normal course of
1243    * events (or more window to be available later) */
1244   if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) {
1245     tcp_ack_now(pcb);
1246     tcp_output(pcb);
1247   }
1248 
1249   LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n",
1250                           len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd)));
1251 }
1252 
1253 /**
1254  * Allocate a new local TCP port.
1255  *
1256  * @return a new (free) local TCP port number
1257  */
1258 static u16_t
tcp_new_port(void)1259 tcp_new_port(void)
1260 {
1261   u8_t i;
1262   u16_t n = 0;
1263   struct tcp_pcb *pcb;
1264 
1265 again:
1266   tcp_generate_port_candidate();
1267   /* Check all PCB lists. */
1268   for (i = 0; i < NUM_TCP_PCB_LISTS; i++) {
1269     for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) {
1270       if (pcb->local_port == tcp_port) {
1271         n++;
1272         if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) {
1273           return 0;
1274         }
1275         goto again;
1276       }
1277     }
1278   }
1279   return tcp_port;
1280 }
1281 
1282 /**
1283  * @ingroup tcp_raw
1284  * Connects to another host. The function given as the "connected"
1285  * argument will be called when the connection has been established.
1286  *  Sets up the pcb to connect to the remote host and sends the
1287  * initial SYN segment which opens the connection.
1288  *
1289  * The tcp_connect() function returns immediately; it does not wait for
1290  * the connection to be properly setup. Instead, it will call the
1291  * function specified as the fourth argument (the "connected" argument)
1292  * when the connection is established. If the connection could not be
1293  * properly established, either because the other host refused the
1294  * connection or because the other host didn't answer, the "err"
1295  * callback function of this pcb (registered with tcp_err, see below)
1296  * will be called.
1297  *
1298  * The tcp_connect() function can return ERR_MEM if no memory is
1299  * available for enqueueing the SYN segment. If the SYN indeed was
1300  * enqueued successfully, the tcp_connect() function returns ERR_OK.
1301  *
1302  * @param pcb the tcp_pcb used to establish the connection
1303  * @param ipaddr the remote ip address to connect to
1304  * @param port the remote tcp port to connect to
1305  * @param connected callback function to call when connected (on error,
1306                     the err calback will be called)
1307  * @return ERR_VAL if invalid arguments are given
1308  *         ERR_OK if connect request has been sent
1309  *         other err_t values if connect request couldn't be sent
1310  */
1311 err_t
tcp_connect(struct tcp_pcb * pcb,const ip_addr_t * ipaddr,u16_t port,tcp_connected_fn connected)1312 tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port,
1313             tcp_connected_fn connected)
1314 {
1315   struct netif *netif = NULL;
1316   err_t ret;
1317   u32_t iss;
1318   u16_t old_local_port;
1319 
1320   LWIP_ASSERT_CORE_LOCKED();
1321 
1322   LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG);
1323   LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG);
1324 
1325   LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN);
1326   LWIP_ERROR("tcp_connect: can not connect the Multicast IP address",
1327              !ip_addr_ismulticast(ipaddr), return ERR_RTE);
1328   if (pcb->netif_idx != NETIF_NO_INDEX) {
1329     netif = netif_get_by_index(pcb->netif_idx);
1330   } else {
1331     /* check if we have a route to the remote host */
1332     netif = ip_route_pcb(ipaddr, (struct ip_pcb*)pcb);
1333   }
1334   if (netif == NULL) {
1335     /* Don't even try to send a SYN packet if we have no route since that will fail. */
1336     return ERR_NETUNREACH;
1337   }
1338   LWIP_ERROR("tcp_connect: can not connect the Broadcast IP address",
1339              !ip_addr_isbroadcast(ipaddr, netif), return ERR_RTE);
1340   LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port));
1341 
1342   ip_addr_set(&pcb->remote_ip, ipaddr);
1343   pcb->remote_port = port;
1344 #if DRIVER_STATUS_CHECK
1345   if (!(netif->flags & NETIF_FLAG_DRIVER_RDY)) {
1346     pcb->drv_status = DRV_NOT_READY;
1347   } else {
1348     pcb->drv_status = DRV_READY;
1349   }
1350 #endif /* DRIVER_STATUS_CHECK */
1351   /* check if local IP has been assigned to pcb, if not, get one */
1352   if (ip_addr_isany(&pcb->local_ip)) {
1353     /* no local IP address set, yet. */
1354     const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip);
1355     if ((local_ip == NULL)) {
1356       /* Don't even try to send a SYN packet if we have no route
1357          since that will fail. */
1358       return ERR_NETUNREACH;
1359     }
1360     /* Use the address as local address of the pcb. */
1361     ip_addr_copy(pcb->local_ip, *local_ip);
1362   }
1363 
1364 #if LWIP_IPV6 && LWIP_IPV6_SCOPES
1365   /* If the given IP address should have a zone but doesn't, assign one now.
1366    * Given that we already have the target netif, this is easy and cheap. */
1367   if (IP_IS_V6(&pcb->remote_ip) &&
1368       ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) {
1369     ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif);
1370   }
1371 #endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
1372 
1373   old_local_port = pcb->local_port;
1374   if (pcb->local_port == 0) {
1375     pcb->local_port = tcp_new_port();
1376     if (pcb->local_port == 0) {
1377       return ERR_NOADDR;
1378     }
1379   } else {
1380 #if SO_REUSE
1381     if (ip_get_option(pcb, SOF_REUSEADDR)) {
1382       /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure
1383          now that the 5-tuple is unique. */
1384       struct tcp_pcb *cpcb;
1385       int i;
1386       /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */
1387       for (i = 2; i < NUM_TCP_PCB_LISTS; i++) {
1388         for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
1389           if ((cpcb->local_port == pcb->local_port) &&
1390               (cpcb->remote_port == port) &&
1391               ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) &&
1392               ip_addr_cmp(&cpcb->remote_ip, ipaddr)) {
1393             /* linux returns EISCONN here, but ERR_USE should be OK for us */
1394             return ERR_USE;
1395           }
1396         }
1397       }
1398     }
1399 #endif /* SO_REUSE */
1400   }
1401 
1402   iss = tcp_next_iss(pcb);
1403   pcb->rcv_nxt = 0;
1404   pcb->snd_nxt = iss;
1405   pcb->snd_sml = iss;
1406   pcb->lastack = iss - 1;
1407   pcb->snd_wl2 = iss - 1;
1408   pcb->snd_lbb = iss - 1;
1409   pcb->fast_recovery_point = iss;
1410   pcb->rto_end = iss;
1411 
1412   /* Start with a window that does not need scaling. When window scaling is
1413      enabled and used, the window is enlarged when both sides agree on scaling. */
1414   pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND);
1415 #if PBUF_RX_RATIONING
1416   if (pbuf_ram_in_shortage() != lwIP_FALSE) {
1417     pcb->rcv_ann_wnd = LWIP_MIN(pcb->rcv_ann_wnd, INITIAL_MSS);
1418   }
1419 #endif
1420   pcb->rcv_ann_right_edge = pcb->rcv_nxt;
1421   pcb->snd_wnd = TCP_WND;
1422   /* As initial send MSS, we use TCP_MSS but limit it to 536.
1423      The send MSS is updated when an MSS option is received. */
1424 #if LWIP_TCP_MAXSEG
1425   pcb->mss = ((pcb->usr_mss == 0) ? (INITIAL_MSS) : (pcb->usr_mss));
1426 #else
1427   pcb->mss = INITIAL_MSS;
1428 #endif
1429 #if TCP_CALCULATE_EFF_SEND_MSS
1430   pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip);
1431 #endif /* TCP_CALCULATE_EFF_SEND_MSS */
1432 
1433   LWIP_TCP_CALC_INITIAL_CWND(pcb->mss, pcb->iw);
1434   pcb->cwnd = pcb->iw;
1435 
1436 #if LWIP_CALLBACK_API
1437   pcb->connected = connected;
1438 #else /* LWIP_CALLBACK_API */
1439   LWIP_UNUSED_ARG(connected);
1440 #endif /* LWIP_CALLBACK_API */
1441 
1442 #if LWIP_SACK
1443   tcp_connect_update_sack(pcb, iss);
1444 #endif
1445 
1446   /* Send a SYN together with the MSS option. */
1447   ret = tcp_enqueue_flags(pcb, TCP_SYN);
1448   if (ret == ERR_OK) {
1449     /* SYN segment was enqueued, changed the pcbs state now */
1450     pcb->state = SYN_SENT;
1451     if (old_local_port != 0) {
1452       TCP_RMV(&tcp_bound_pcbs, pcb);
1453     }
1454     TCP_REG_ACTIVE(pcb);
1455     MIB2_STATS_INC(mib2.tcpactiveopens);
1456 
1457     tcp_output(pcb);
1458   }
1459   return ret;
1460 }
1461 
1462 /**
1463  * Called every 500 ms and implements the retransmission timer and the timer that
1464  * removes PCBs that have been in TIME-WAIT for enough time. It also increments
1465  * various timers such as the inactivity timer in each PCB.
1466  *
1467  * Automatically called from tcp_tmr().
1468  */
1469 void
tcp_slowtmr(void)1470 tcp_slowtmr(void)
1471 {
1472   struct tcp_pcb *pcb, *prev;
1473   tcpwnd_size_t eff_wnd;
1474   u8_t pcb_remove;      /* flag if a PCB should be removed */
1475   u8_t pcb_reset;       /* flag if a RST should be sent when removing */
1476   err_t err;
1477 #if DRIVER_STATUS_CHECK
1478   struct netif *netif = NULL;
1479 #endif /* DRIVER_STATUS_CHECK */
1480   u8_t connect_timeout;
1481 
1482   err = ERR_OK;
1483 
1484   ++tcp_ticks;
1485   ++tcp_timer_ctr;
1486 
1487 tcp_slowtmr_start:
1488   /* Steps through all of the active PCBs. */
1489   prev = NULL;
1490   pcb = tcp_active_pcbs;
1491   if (pcb == NULL) {
1492     LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n"));
1493   }
1494 
1495 #if DRIVER_STATUS_CHECK
1496   for (netif = netif_list; netif != NULL; netif = netif->next) {
1497     /* network mask matches? */
1498     if (!(netif->flags & NETIF_FLAG_DRIVER_RDY)) {
1499       if (netif->waketime <= DRIVER_WAKEUP_COUNT) {
1500         netif->waketime++;
1501       }
1502     }
1503   }
1504 #endif /* DRIVER_STATUS_CHECK */
1505 
1506   while (pcb != NULL) {
1507     LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n"));
1508     LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED);
1509     LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN);
1510     LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT);
1511     if (pcb->last_timer == tcp_timer_ctr) {
1512       /* skip this pcb, we have already processed it */
1513       prev = pcb;
1514       pcb = pcb->next;
1515       continue;
1516     }
1517     pcb->last_timer = tcp_timer_ctr;
1518 
1519     pcb_remove = 0;
1520     pcb_reset = 0;
1521 
1522     connect_timeout = 0;
1523     if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) {
1524       ++pcb_remove;
1525       ++connect_timeout;
1526       LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("tcp_slowtmr: max SYN retries reached\n"));
1527     } else if ((pcb->state == FIN_WAIT_1 || pcb->state == CLOSING) && (pcb->nrtx >= TCP_FW1MAXRTX)) {
1528       ++pcb_remove;
1529       LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG,
1530                   ("tcp_slowtmr: max DATA retries reached in FIN_WAIT_1 or CLOSING state\n"));
1531     } else if (pcb->nrtx >= TCP_MAXRTX) {
1532       if (pcb->state == SYN_SENT) {
1533         ++connect_timeout;
1534       }
1535       ++pcb_remove;
1536       LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("tcp_slowtmr: max DATA retries reached\n"));
1537     } else {
1538       if (pcb->persist_backoff > 0) {
1539         LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL);
1540         LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL);
1541         if (pcb->persist_probe >= TCP_MAXRTX) {
1542           ++pcb_remove; /* max probes reached */
1543         } else {
1544           u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1];
1545           if (pcb->persist_cnt < backoff_cnt) {
1546             pcb->persist_cnt++;
1547           }
1548           if (pcb->persist_cnt >= backoff_cnt) {
1549             int next_slot = 1; /* increment timer to next slot */
1550             /* If snd_wnd is zero, send 1 byte probes */
1551             if (pcb->snd_wnd == 0) {
1552               if (tcp_zero_window_probe(pcb) != ERR_OK) {
1553                 next_slot = 0; /* try probe again with current slot */
1554               }
1555               /* snd_wnd not fully closed, split unsent head and fill window */
1556             } else {
1557               if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) {
1558                 if (tcp_output(pcb) == ERR_OK) {
1559                   /* sending will cancel persist timer, else retry with current slot */
1560                   next_slot = 0;
1561                 }
1562               }
1563             }
1564             if (next_slot) {
1565               pcb->persist_cnt = 0;
1566               if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) {
1567                 pcb->persist_backoff++;
1568               }
1569             }
1570           }
1571         }
1572       } else {
1573         /* Increase the retransmission timer if it is running */
1574         if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) {
1575           ++pcb->rtime;
1576         }
1577 
1578         if (pcb->rtime >= pcb->rto) {
1579           /* Time for a retransmission. */
1580           LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F
1581                                       " pcb->rto %"S16_F"\n",
1582                                       pcb->rtime, pcb->rto));
1583           /* If prepare phase fails but we have unsent data but no unacked data,
1584              still execute the backoff calculations below, as this means we somehow
1585              failed to send segment. */
1586           if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) {
1587             /* Double retransmission time-out unless we are trying to
1588              * connect to somebody (i.e., we are in SYN_SENT). */
1589             if (pcb->state != SYN_SENT) {
1590               u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1);
1591               if (pcb->sa != -1) {
1592                 pcb->rto = (int16_t)((((pcb->sa >> 3) + pcb->sv) / TCP_SLOW_INTERVAL) * tcp_backoff[backoff_idx]);
1593               } else {
1594                 pcb->rto = (int16_t)((TCP_INITIAL_RTO_DURATION / TCP_SLOW_INTERVAL) * tcp_backoff[backoff_idx]);
1595               }
1596             } else {
1597               /* overflow can not happen, max value of RTO will be capped by 64 seconds, and minimum 1second */
1598               pcb->rto = (s16_t)((u16_t)pcb->rto << 1);
1599             }
1600 
1601             /*
1602               RFC 6298 section 2.5
1603               A maximum value MAY be placed on RTO provided it is at least 60 seconds.
1604             */
1605             pcb->rto = (s16_t)LWIP_MIN(TCP_MAX_RTO_TICKS, LWIP_MAX(TCP_MIN_RTO_TICKS, pcb->rto));
1606 
1607             /* Reset the retransmission timer. */
1608             pcb->rtime = 0;
1609             pcb->dupacks = 0;
1610 
1611             /* Reduce congestion window and ssthresh. */
1612             eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd);
1613 
1614             /*
1615               RFC 5681
1616               Does not comply to calucation
1617               ssthresh = max (FlightSize/2, 2*SMSS)            (4)
1618 
1619               Instead min value of SSTHRESH is kept to 8, to limit lower threshold, may not hold good for low bandwidth
1620               scenario..
1621 
1622               Threshold reduction is 5/8,instead of 1/2.. slightly more optimistic hence more aggressive
1623             */
1624             pcb->ssthresh = (tcpwnd_size_t)(((u64_t)eff_wnd * 5) >> 3); /* divide by 5/8 */
1625 
1626             /* initial threshold may not be true enough to adjust to network condition
1627                it must be adjusted */
1628             /* max value of mss has to be limited */
1629             pcb->ssthresh = (tcpwnd_size_t)(LWIP_MAX(pcb->ssthresh, (tcpwnd_size_t)(pcb->mss << 3)));
1630 
1631             /**
1632               Violating below section of RFC 5681 Section 3.1
1633               Furthermore, upon a timeout (as specified in [RFC2988]) cwnd MUST be
1634                set to no more than the loss window, LW, which equals 1 full-sized
1635                segment (regardless of the value of IW).  Therefore, after
1636                retransmitting the dropped segment the TCP sender uses the slow start
1637                algorithm to increase the window from 1 full-sized segment to the new
1638                value of ssthresh, at which point congestion avoidance again takes
1639                over.
1640 
1641               It should be set of minimum of atleast 2 to avoid issues due to Delayed acks
1642             */
1643 #ifndef LWIP_INITIAL_CWND_OVERRIDE
1644             pcb->cwnd = LWIP_MIN(pcb->iw, (tcpwnd_size_t)pcb->mss<<1);
1645 #else
1646             pcb->cwnd = (tcpwnd_size_t)(pcb->mss * LWIP_INITIAL_CWND_OVERRIDE);
1647 #endif
1648 
1649             LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F
1650                                          " ssthresh %"TCPWNDSIZE_F"\n",
1651                                          pcb->cwnd, pcb->ssthresh));
1652             pcb->bytes_acked = 0;
1653             /*
1654              4)  Retransmit timeouts:
1655                  After a retransmit timeout, record the highest sequence number
1656                  transmitted in the variable recover, and exit the fast recovery
1657                  procedure if applicable.
1658             */
1659             tcp_clear_flags(pcb, TF_INFR);
1660             /* The following needs to be called AFTER cwnd is set to one
1661                mss - STJ */
1662             tcp_rexmit_rto_commit(pcb);
1663             /* RFC 6582 Section 4 : Handling Duplicate Acknowledgments after a Timeout.. */
1664             pcb->fast_recovery_point = pcb->snd_nxt;
1665           }
1666         }
1667       }
1668     }
1669 
1670 #if DRIVER_STATUS_CHECK
1671     if (pcb->drv_status == DRV_NOT_READY) {
1672       /* iterate through netifs */
1673       for (netif = netif_list; netif != NULL; netif = netif->next) {
1674         /* network mask matches? */
1675         if (tcp_is_netif_addr_check_success(pcb, netif)) {
1676           if (netif->waketime > DRIVER_WAKEUP_COUNT) {
1677             /* waketime already incremented for a previous pcb, So, remove this pcb */
1678             LWIP_DEBUGF(DRV_STS_DEBUG, ("Driver Wake time max count (%d) reached. Removing PCB\n", netif->waketime));
1679             pcb_remove++;
1680           }
1681           break;
1682         }
1683       }
1684     }
1685 #endif /* DRIVER_STATUS_CHECK */
1686 
1687     /* Check if this PCB has stayed too long in FIN-WAIT-2 */
1688     if (pcb->state == FIN_WAIT_2) {
1689       /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */
1690       if (pcb->flags & TF_RXCLOSED) {
1691         /* PCB was fully closed (either through close() or SHUT_RDWR):
1692            normal FIN-WAIT timeout handling. */
1693         if ((u32_t)(tcp_ticks - pcb->tmr) >
1694             TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) {
1695           ++pcb_remove;
1696           LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n"));
1697         }
1698       }
1699     }
1700 
1701     /* Check if KEEPALIVE should be sent */
1702     if (ip_get_option(pcb, SOF_KEEPALIVE) &&
1703         ((pcb->state == ESTABLISHED) ||
1704          (pcb->state == CLOSE_WAIT))) {
1705       if ((u32_t)(tcp_ticks - pcb->tmr) >
1706           (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) {
1707         LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to "));
1708         ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip);
1709         LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("\n"));
1710 
1711         ++pcb_remove;
1712         ++pcb_reset;
1713       } else if ((u32_t)(tcp_ticks - pcb->tmr) >
1714                  (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb))
1715                  / TCP_SLOW_INTERVAL) {
1716         err = tcp_keepalive(pcb);
1717         if (err == ERR_OK) {
1718           pcb->keep_cnt_sent++;
1719         }
1720       }
1721     }
1722 
1723     /* If this PCB has queued out of sequence data, but has been
1724        inactive for too long, will drop the data (it will eventually
1725        be retransmitted). */
1726 #if TCP_QUEUE_OOSEQ
1727     if (pcb->ooseq != NULL &&
1728         (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) {
1729       LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n"));
1730       tcp_free_ooseq(pcb);
1731     }
1732 #endif /* TCP_QUEUE_OOSEQ */
1733 
1734     /* Check if this PCB has stayed too long in SYN-RCVD */
1735     if (pcb->state == SYN_RCVD) {
1736       if ((u32_t)(tcp_ticks - pcb->tmr) >
1737           TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) {
1738         ++pcb_remove;
1739         LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n"));
1740       }
1741     }
1742 
1743     /* Check if this PCB has stayed too long in LAST-ACK */
1744     if (pcb->state == LAST_ACK) {
1745       if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
1746         ++pcb_remove;
1747         LWIP_DEBUGF(TCP_DEBUG | TCP_ERR_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n"));
1748       }
1749     }
1750 
1751     /* If the PCB should be removed, do it. */
1752     if (pcb_remove) {
1753       struct tcp_pcb *pcb2;
1754 #if LWIP_CALLBACK_API
1755       tcp_err_fn err_fn = pcb->errf;
1756 #endif /* LWIP_CALLBACK_API */
1757       void *err_arg;
1758       enum tcp_state last_state;
1759       tcp_pcb_purge(pcb);
1760       /* Remove PCB from tcp_active_pcbs list. */
1761       if (prev != NULL) {
1762         LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs);
1763         prev->next = pcb->next;
1764       } else {
1765         /* This PCB was the first. */
1766         LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb);
1767         tcp_active_pcbs = pcb->next;
1768       }
1769 
1770       if (pcb_reset) {
1771         tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
1772                 pcb->local_port, pcb->remote_port);
1773       }
1774 
1775       err_arg = pcb->callback_arg;
1776       last_state = pcb->state;
1777       pcb2 = pcb;
1778       pcb = pcb->next;
1779       tcp_free(pcb2);
1780 
1781       tcp_active_pcbs_changed = 0;
1782       if (!connect_timeout) {
1783         TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT);
1784       } else {
1785         TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_CONNECTIMEOUT);
1786       }
1787       if (tcp_active_pcbs_changed) {
1788         goto tcp_slowtmr_start;
1789       }
1790     } else {
1791       /* get the 'next' element now and work with 'prev' below (in case of abort) */
1792       prev = pcb;
1793       pcb = pcb->next;
1794 
1795       /* We check if we should poll the connection. */
1796       ++prev->polltmr;
1797       if (prev->polltmr >= prev->pollinterval) {
1798         prev->polltmr = 0;
1799         LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n"));
1800         tcp_active_pcbs_changed = 0;
1801         TCP_EVENT_POLL(prev, err);
1802         if (tcp_active_pcbs_changed) {
1803           goto tcp_slowtmr_start;
1804         }
1805         /* if err == ERR_ABRT, 'prev' is already deallocated */
1806         if (err == ERR_OK) {
1807           tcp_output(prev);
1808         }
1809       }
1810     }
1811   }
1812 
1813 
1814   /* Steps through all of the TIME-WAIT PCBs. */
1815   prev = NULL;
1816   pcb = tcp_tw_pcbs;
1817   while (pcb != NULL) {
1818     LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
1819     pcb_remove = 0;
1820 
1821     /* Check if this PCB has stayed long enough in TIME-WAIT */
1822     if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
1823       ++pcb_remove;
1824     }
1825 
1826     /* If the PCB should be removed, do it. */
1827     if (pcb_remove) {
1828       struct tcp_pcb *pcb2;
1829       tcp_pcb_purge(pcb);
1830       /* Remove PCB from tcp_tw_pcbs list. */
1831       if (prev != NULL) {
1832         LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs);
1833         prev->next = pcb->next;
1834       } else {
1835         /* This PCB was the first. */
1836         LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb);
1837         tcp_tw_pcbs = pcb->next;
1838       }
1839       pcb2 = pcb;
1840       pcb = pcb->next;
1841       tcp_free(pcb2);
1842     } else {
1843       prev = pcb;
1844       pcb = pcb->next;
1845     }
1846   }
1847 }
1848 
1849 /**
1850  * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously
1851  * "refused" by upper layer (application) and sends delayed ACKs or pending FINs.
1852  *
1853  * Automatically called from tcp_tmr().
1854  */
1855 void
tcp_fasttmr(void)1856 tcp_fasttmr(void)
1857 {
1858   err_t err;
1859   struct tcp_pcb *pcb;
1860 #if LWIP_TCP_TLP_SUPPORT
1861   u32_t time_now = sys_now();
1862 #endif
1863   ++tcp_timer_ctr;
1864 
1865 tcp_fasttmr_start:
1866   pcb = tcp_active_pcbs;
1867 
1868   while (pcb != NULL) {
1869 #if LWIP_TCP_TLP_SUPPORT
1870     if (pcb->tlp_time_stamp) {
1871       LWIP_DEBUGF(TCP_TLP_DEBUG, ("tcp_fasttmr: pcb %p, PTO left %"S32_F", sys_now %"U32_F"\n",
1872                   pcb, (s32_t)(pcb->tlp_time_stamp - time_now), time_now));
1873       if ((s32_t)(pcb->tlp_time_stamp - time_now) <= 0) {
1874         pcb->tlp_pto_cnt++;
1875         tcp_pto_fire(pcb);
1876         if (pcb->tlp_pto_cnt >= TCP_TLP_MAX_PROBE_CNT) {
1877           LWIP_TCP_TLP_CLEAR_VARS(pcb);
1878           if ((pcb->unacked != NULL) && (pcb->rtime == -1)) {
1879             pcb->rtime = 0;
1880           }
1881         }
1882       }
1883     }
1884 #endif /* LWIP_TCP_TLP_SUPPORT */
1885 
1886     if (pcb->last_timer != tcp_timer_ctr) {
1887       struct tcp_pcb *next;
1888       pcb->last_timer = tcp_timer_ctr;
1889       /* send delayed ACKs */
1890       if (pcb->flags & TF_ACK_DELAY) {
1891         LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n"));
1892         tcp_ack_now(pcb);
1893         tcp_output(pcb);
1894         tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
1895       }
1896       /* send pending FIN */
1897       if (pcb->flags & TF_CLOSEPEND) {
1898         LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n"));
1899         tcp_clear_flags(pcb, TF_CLOSEPEND);
1900         tcp_close_shutdown_fin(pcb);
1901       }
1902 
1903       next = pcb->next;
1904 
1905       /* If there is data which was previously "refused" by upper layer */
1906       if (pcb->refused_data != NULL) {
1907         tcp_active_pcbs_changed = 0;
1908         tcp_process_refused_data(pcb);
1909         if (tcp_active_pcbs_changed) {
1910           /* application callback has changed the pcb list: restart the loop */
1911           goto tcp_fasttmr_start;
1912         }
1913       }
1914 
1915       if (pcb->tcp_pcb_flag & TCP_PBUF_FLAG_TCP_FIN_RECV_SYSPOST_FAIL) {
1916         TCP_EVENT_CLOSED(pcb, err);
1917         if (err == ERR_OK) {
1918           pcb->tcp_pcb_flag = (u8_t)(pcb->tcp_pcb_flag & (~TCP_PBUF_FLAG_TCP_FIN_RECV_SYSPOST_FAIL));
1919         }
1920       }
1921 
1922       pcb = next;
1923     } else {
1924       pcb = pcb->next;
1925     }
1926   }
1927 }
1928 
1929 /** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */
1930 void
tcp_txnow(void)1931 tcp_txnow(void)
1932 {
1933   struct tcp_pcb *pcb;
1934 
1935   for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
1936     if (pcb->flags & TF_NAGLEMEMERR) {
1937       tcp_output(pcb);
1938     }
1939   }
1940 }
1941 
1942 /** Pass pcb->refused_data to the recv callback */
1943 err_t
tcp_process_refused_data(struct tcp_pcb * pcb)1944 tcp_process_refused_data(struct tcp_pcb *pcb)
1945 {
1946 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1947   struct pbuf *rest;
1948 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1949 
1950   LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG);
1951 
1952 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1953   while (pcb->refused_data != NULL)
1954 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1955   {
1956     err_t err;
1957     u8_t refused_flags = pcb->refused_data->flags;
1958     /* set pcb->refused_data to NULL in case the callback frees it and then
1959        closes the pcb */
1960     struct pbuf *refused_data = pcb->refused_data;
1961 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1962     pbuf_split_64k(refused_data, &rest);
1963     pcb->refused_data = rest;
1964 #else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1965     pcb->refused_data = NULL;
1966 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1967     /* Notify again application with data previously received. */
1968     LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n"));
1969     TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err);
1970     if (err == ERR_OK) {
1971       /* did refused_data include a FIN? */
1972       if ((refused_flags & PBUF_FLAG_TCP_FIN)
1973 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1974           && (rest == NULL)
1975 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1976          ) {
1977         /* correct rcv_wnd as the application won't call tcp_recved()
1978            for the FIN's seqno */
1979         if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) {
1980           pcb->rcv_wnd++;
1981         }
1982         TCP_EVENT_CLOSED(pcb, err);
1983         if (err == ERR_ABRT) {
1984           return ERR_ABRT;
1985         }
1986         else if (err == ERR_OK) {
1987           pcb->tcp_pcb_flag = (u8_t)(pcb->tcp_pcb_flag & (~TCP_PBUF_FLAG_TCP_FIN_RECV_SYSPOST_FAIL));
1988         }
1989         else {
1990           pcb->tcp_pcb_flag |= TCP_PBUF_FLAG_TCP_FIN_RECV_SYSPOST_FAIL;
1991         }
1992       }
1993     } else if (err == ERR_ABRT) {
1994       /* if err == ERR_ABRT, 'pcb' is already deallocated */
1995       /* Drop incoming packets because pcb is "full" (only if the incoming
1996          segment contains data). */
1997       LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n"));
1998       return ERR_ABRT;
1999     } else {
2000       /* data is still refused, pbuf is still valid (go on for ACK-only packets) */
2001 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
2002       if (rest != NULL) {
2003         pbuf_cat(refused_data, rest);
2004       }
2005 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
2006       pcb->refused_data = refused_data;
2007       return ERR_INPROGRESS;
2008     }
2009   }
2010   return ERR_OK;
2011 }
2012 
2013 #if LWIP_SACK_PERF_OPT
2014 /*
2015  * Deallocates a list of Fast retransmitted TCP segments (tcp_seg structures).
2016  *
2017  * @param seg fr_segs list of TCP segments to free
2018  */
2019 void
tcp_fr_segs_free(struct tcp_sack_fast_rxmited * seg)2020 tcp_fr_segs_free(struct tcp_sack_fast_rxmited *seg)
2021 {
2022   while (seg != NULL) {
2023     struct tcp_sack_fast_rxmited *next = seg->next;
2024     mem_free(seg);
2025     seg = next;
2026   }
2027 }
2028 #endif
2029 
2030 /**
2031  * Deallocates a list of TCP segments (tcp_seg structures).
2032  *
2033  * @param seg tcp_seg list of TCP segments to free
2034  */
2035 void
tcp_segs_free(struct tcp_seg * seg)2036 tcp_segs_free(struct tcp_seg *seg)
2037 {
2038   while (seg != NULL) {
2039     struct tcp_seg *next = seg->next;
2040     tcp_seg_free(seg);
2041     seg = next;
2042   }
2043 }
2044 
2045 /**
2046  * Frees a TCP segment (tcp_seg structure).
2047  *
2048  * @param seg single tcp_seg to free
2049  */
2050 void
tcp_seg_free(struct tcp_seg * seg)2051 tcp_seg_free(struct tcp_seg *seg)
2052 {
2053   if (seg != NULL) {
2054     if (seg->p != NULL) {
2055       pbuf_free(seg->p);
2056 #if TCP_DEBUG
2057       seg->p = NULL;
2058 #endif /* TCP_DEBUG */
2059     }
2060     memp_free(MEMP_TCP_SEG, seg);
2061   }
2062 }
2063 
2064 /**
2065  * @ingroup tcp
2066  * Sets the priority of a connection.
2067  *
2068  * @param pcb the tcp_pcb to manipulate
2069  * @param prio new priority
2070  */
2071 void
tcp_setprio(struct tcp_pcb * pcb,u8_t prio)2072 tcp_setprio(struct tcp_pcb *pcb, u8_t prio)
2073 {
2074   LWIP_ASSERT_CORE_LOCKED();
2075 
2076   LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return);
2077 
2078   pcb->prio = prio;
2079 }
2080 
2081 #if TCP_QUEUE_OOSEQ
2082 /**
2083  * Returns a copy of the given TCP segment.
2084  * The pbuf and data are not copied, only the pointers
2085  *
2086  * @param seg the old tcp_seg
2087  * @return a copy of seg
2088  */
2089 struct tcp_seg *
tcp_seg_copy(struct tcp_seg * seg)2090 tcp_seg_copy(struct tcp_seg *seg)
2091 {
2092   struct tcp_seg *cseg;
2093 
2094   LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL);
2095 
2096   cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG);
2097   if (cseg == NULL) {
2098     return NULL;
2099   }
2100   SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg));
2101   pbuf_ref(cseg->p);
2102   return cseg;
2103 }
2104 #endif /* TCP_QUEUE_OOSEQ */
2105 
2106 #if LWIP_CALLBACK_API
2107 /**
2108  * Default receive callback that is called if the user didn't register
2109  * a recv callback for the pcb.
2110  */
2111 err_t
tcp_recv_null(void * arg,struct tcp_pcb * pcb,struct pbuf * p,err_t err)2112 tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
2113 {
2114   LWIP_UNUSED_ARG(arg);
2115 
2116   LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG);
2117 
2118   if (p != NULL) {
2119     tcp_recved(pcb, p->tot_len);
2120     pbuf_free(p);
2121   } else if (err == ERR_OK) {
2122     return tcp_close(pcb);
2123   }
2124   return ERR_OK;
2125 }
2126 #endif /* LWIP_CALLBACK_API */
2127 
2128 /**
2129  * Kills the oldest active connection that has a lower priority than 'prio'.
2130  *
2131  * @param prio minimum priority
2132  */
2133 static void
tcp_kill_prio(u8_t prio)2134 tcp_kill_prio(u8_t prio)
2135 {
2136   struct tcp_pcb *pcb, *inactive;
2137   u32_t inactivity;
2138   u8_t mprio;
2139 
2140   mprio = LWIP_MIN(TCP_PRIO_MAX, prio);
2141 
2142   /* We want to kill connections with a lower prio, so bail out if
2143    * supplied prio is 0 - there can never be a lower prio
2144    */
2145   if (mprio == 0) {
2146     return;
2147   }
2148 
2149   /* We only want kill connections with a lower prio, so decrement prio by one
2150    * and start searching for oldest connection with same or lower priority than mprio.
2151    * We want to find the connections with the lowest possible prio, and among
2152    * these the one with the longest inactivity time.
2153    */
2154   mprio--;
2155 
2156   inactivity = 0;
2157   inactive = NULL;
2158   for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
2159         /* lower prio is always a kill candidate */
2160     if ((pcb->prio < mprio) ||
2161         /* longer inactivity is also a kill candidate */
2162         ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) {
2163       inactivity = tcp_ticks - pcb->tmr;
2164       inactive   = pcb;
2165       mprio      = pcb->prio;
2166     }
2167   }
2168   if (inactive != NULL) {
2169     LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n",
2170                             (void *)inactive, inactivity));
2171     tcp_abort(inactive);
2172   }
2173 }
2174 
2175 /**
2176  * Kills the oldest connection that is in specific state.
2177  * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available.
2178  */
2179 static void
tcp_kill_state(enum tcp_state state)2180 tcp_kill_state(enum tcp_state state)
2181 {
2182   struct tcp_pcb *pcb, *inactive;
2183   u32_t inactivity;
2184 
2185   LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK));
2186 
2187   inactivity = 0;
2188   inactive = NULL;
2189   /* Go through the list of active pcbs and get the oldest pcb that is in state
2190      CLOSING/LAST_ACK. */
2191   for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
2192     if (pcb->state == state) {
2193       if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
2194         inactivity = tcp_ticks - pcb->tmr;
2195         inactive = pcb;
2196       }
2197     }
2198   }
2199   if (inactive != NULL) {
2200     LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n",
2201                             tcp_state_str[state], (void *)inactive, inactivity));
2202     /* Don't send a RST, since no data is lost. */
2203     tcp_abandon(inactive, 0);
2204   }
2205 }
2206 
2207 /**
2208  * Kills the oldest connection that is in TIME_WAIT state.
2209  * Called from tcp_alloc() if no more connections are available.
2210  */
2211 static void
tcp_kill_timewait(void)2212 tcp_kill_timewait(void)
2213 {
2214   struct tcp_pcb *pcb, *inactive;
2215   u32_t inactivity;
2216 
2217   inactivity = 0;
2218   inactive = NULL;
2219   /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */
2220   for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
2221     if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
2222       inactivity = tcp_ticks - pcb->tmr;
2223       inactive = pcb;
2224     }
2225   }
2226   if (inactive != NULL) {
2227     LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n",
2228                             (void *)inactive, inactivity));
2229     tcp_abort(inactive);
2230   }
2231 }
2232 
2233 /* Called when allocating a pcb fails.
2234  * In this case, we want to handle all pcbs that want to close first: if we can
2235  * now send the FIN (which failed before), the pcb might be in a state that is
2236  * OK for us to now free it.
2237  */
2238 static void
tcp_handle_closepend(void)2239 tcp_handle_closepend(void)
2240 {
2241   struct tcp_pcb *pcb = tcp_active_pcbs;
2242 
2243   while (pcb != NULL) {
2244     struct tcp_pcb *next = pcb->next;
2245     /* send pending FIN */
2246     if (pcb->flags & TF_CLOSEPEND) {
2247       LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n"));
2248       tcp_clear_flags(pcb, TF_CLOSEPEND);
2249       tcp_close_shutdown_fin(pcb);
2250     }
2251     pcb = next;
2252   }
2253 }
2254 
2255 /**
2256  * Allocate a new tcp_pcb structure.
2257  *
2258  * @param prio priority for the new pcb
2259  * @return a new tcp_pcb that initially is in state CLOSED
2260  */
2261 struct tcp_pcb *
tcp_alloc(u8_t prio)2262 tcp_alloc(u8_t prio)
2263 {
2264   struct tcp_pcb *pcb;
2265 
2266   LWIP_ASSERT_CORE_LOCKED();
2267 
2268   pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
2269   if (pcb == NULL) {
2270     /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */
2271     tcp_handle_closepend();
2272 
2273     /* Try killing oldest connection in TIME-WAIT. */
2274     LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n"));
2275     tcp_kill_timewait();
2276     /* Try to allocate a tcp_pcb again. */
2277     pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
2278     if (pcb == NULL) {
2279       /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */
2280       LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n"));
2281       tcp_kill_state(LAST_ACK);
2282       /* Try to allocate a tcp_pcb again. */
2283       pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
2284       if (pcb == NULL) {
2285         /* Try killing oldest connection in CLOSING. */
2286         LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n"));
2287         tcp_kill_state(CLOSING);
2288         /* Try to allocate a tcp_pcb again. */
2289         pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
2290         if (pcb == NULL) {
2291           /* Try killing oldest active connection with lower priority than the new one. */
2292           LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio));
2293           tcp_kill_prio(prio);
2294           /* Try to allocate a tcp_pcb again. */
2295           pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
2296           if (pcb != NULL) {
2297             /* adjust err stats: memp_malloc failed multiple times before */
2298             MEMP_STATS_DEC(err, MEMP_TCP_PCB);
2299           }
2300         }
2301         if (pcb != NULL) {
2302           /* adjust err stats: memp_malloc failed multiple times before */
2303           MEMP_STATS_DEC(err, MEMP_TCP_PCB);
2304         }
2305       }
2306       if (pcb != NULL) {
2307         /* adjust err stats: memp_malloc failed multiple times before */
2308         MEMP_STATS_DEC(err, MEMP_TCP_PCB);
2309       }
2310     }
2311     if (pcb != NULL) {
2312       /* adjust err stats: memp_malloc failed above */
2313       MEMP_STATS_DEC(err, MEMP_TCP_PCB);
2314     }
2315   }
2316   if (pcb != NULL) {
2317     /* zero out the whole pcb, so there is no need to initialize members to zero */
2318     (void)memset_s(pcb, sizeof(struct tcp_pcb), 0, sizeof(struct tcp_pcb));
2319     pcb->prio = prio;
2320     pcb->snd_buf = TCP_SND_BUF;
2321 #if LWIP_SO_SNDBUF
2322     pcb->snd_buf_static = TCP_SND_BUF;
2323 #endif
2324 
2325     pcb->snd_queuelen_max = TCP_SND_QUEUELEN;
2326     pcb->snd_queuelen_lowat = pcb->snd_queuelen_max >> 1;
2327 #if LWIP_SO_SNDBUF
2328     pcb->snd_buf_lowat = pcb->snd_buf_static >> 1;
2329 #else
2330     pcb->snd_buf_lowat = pcb->snd_buf >> 1;
2331 #endif
2332     /* Start with a window that does not need scaling. When window scaling is
2333        enabled and used, the window is enlarged when both sides agree on scaling. */
2334     pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND);
2335 #if PBUF_RX_RATIONING
2336     if (pbuf_ram_in_shortage() != lwIP_FALSE) {
2337       pcb->rcv_ann_wnd = LWIP_MIN(pcb->rcv_ann_wnd, INITIAL_MSS);
2338     }
2339 #endif
2340     pcb->ttl = TCP_TTL;
2341     /* As initial send MSS, we use TCP_MSS but limit it to 536.
2342        The send MSS is updated when an MSS option is received. */
2343     pcb->mss = INITIAL_MSS;
2344     pcb->rcv_mss = INITIAL_MSS; /* Default value if MSS option is not received from peer. */
2345 #if LWIP_TCP_MAXSEG
2346     pcb->usr_mss = 0; /* zero means usr_mss is not set by user */
2347 #endif /* LWIP_TCP_MAXSEG */
2348     /*
2349       RFC 6298 section 2.1
2350       Until a round-trip time (RTT) measurement has been made for a
2351       segment sent between the sender and receiver, the sender SHOULD
2352       set RTO <- 1 second, though the "backing off" on repeated
2353       retransmission discussed in (5.5) still applies.
2354     */
2355     pcb->rto = (TCP_INITIAL_RTO_DURATION / TCP_SLOW_INTERVAL);
2356     pcb->sv = 0;
2357     pcb->sa = -1; /* -1 means no valid RTT sample */
2358     pcb->rtime = -1;
2359     pcb->cwnd = 1;
2360     pcb->tmr = tcp_ticks;
2361     pcb->last_timer = tcp_timer_ctr;
2362     pcb->persist_probe = 0;
2363 
2364 #if LWIP_TCP_TLP_SUPPORT
2365     LWIP_TCP_TLP_CLEAR_VARS(pcb);
2366 #endif /* LWIP_TCP_TLP_SUPPORT */
2367 
2368     LWIP_TCP_CALC_INITIAL_CWND(pcb->mss, pcb->iw);
2369     pcb->cwnd = pcb->iw;
2370     pcb->bytes_acked = 0;
2371 
2372     /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example
2373     of using the largest advertised receive window.  We've seen complications with
2374     receiving TCPs that use window scaling and/or window auto-tuning where the
2375     initial advertised window is very small and then grows rapidly once the
2376     connection is established. To avoid these complications, we set ssthresh to the
2377     largest effective cwnd (amount of in-flight data) that the sender can have. */
2378     pcb->ssthresh = TCP_SND_BUF;
2379 
2380 #if LWIP_CALLBACK_API
2381     pcb->recv = tcp_recv_null;
2382 #endif /* LWIP_CALLBACK_API */
2383 
2384     /* Init KEEPALIVE timer */
2385     pcb->keep_idle  = TCP_KEEPIDLE_DEFAULT;
2386 
2387 #if LWIP_TCP_KEEPALIVE
2388     pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT;
2389     pcb->keep_cnt   = TCP_KEEPCNT_DEFAULT;
2390 #endif /* LWIP_TCP_KEEPALIVE */
2391     pcb->keep_cnt_sent = 0;
2392     pcb->tcp_pcb_flag = 0;
2393 #if LWIP_SACK
2394     pcb->sacked = 0;
2395 #endif
2396   }
2397   return pcb;
2398 }
2399 
2400 /**
2401  * @ingroup tcp_raw
2402  * Creates a new TCP protocol control block but doesn't place it on
2403  * any of the TCP PCB lists.
2404  * The pcb is not put on any list until binding using tcp_bind().
2405  * If memory is not available for creating the new pcb, NULL is returned.
2406  * @see MEMP_NUM_TCP_PCB_LISTEN and MEMP_NUM_TCP_PCB
2407  *
2408  * @internal: Maybe there should be a idle TCP PCB list where these
2409  * PCBs are put on. Port reservation using tcp_bind() is implemented but
2410  * allocated pcbs that are not bound can't be killed automatically if wanting
2411  * to allocate a pcb with higher prio (@see tcp_kill_prio())
2412  *
2413  * @return a new tcp_pcb that initially is in state CLOSED
2414  */
2415 struct tcp_pcb *
tcp_new(void)2416 tcp_new(void)
2417 {
2418   return tcp_alloc(TCP_PRIO_NORMAL);
2419 }
2420 
2421 /**
2422  * @ingroup tcp_raw
2423  * Creates a new TCP protocol control block but doesn't
2424  * place it on any of the TCP PCB lists.
2425  * The pcb is not put on any list until binding using tcp_bind().
2426  * @see MEMP_NUM_TCP_PCB_LISTEN and MEMP_NUM_TCP_PCB
2427  *
2428  * @param type IP address type, see @ref lwip_ip_addr_type definitions.
2429  * If you want to listen to IPv4 and IPv6 (dual-stack) connections,
2430  * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE.
2431  * @return a new tcp_pcb that initially is in state CLOSED
2432  */
2433 struct tcp_pcb *
tcp_new_ip_type(u8_t type)2434 tcp_new_ip_type(u8_t type)
2435 {
2436   struct tcp_pcb *pcb;
2437   pcb = tcp_alloc(TCP_PRIO_NORMAL);
2438 #if LWIP_IPV4 && LWIP_IPV6
2439   if (pcb != NULL) {
2440     IP_SET_TYPE_VAL(pcb->local_ip, type);
2441     IP_SET_TYPE_VAL(pcb->remote_ip, type);
2442   }
2443 #else
2444   LWIP_UNUSED_ARG(type);
2445 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2446   return pcb;
2447 }
2448 
2449 /**
2450  * @ingroup tcp_raw
2451  * Specifies the program specific state that should be passed to all
2452  * other callback functions. The "pcb" argument is the current TCP
2453  * connection control block, and the "arg" argument is the argument
2454  * that will be passed to the callbacks.
2455  *
2456  * @param pcb tcp_pcb to set the callback argument
2457  * @param arg void pointer argument to pass to callback functions
2458  */
2459 void
tcp_arg(struct tcp_pcb * pcb,void * arg)2460 tcp_arg(struct tcp_pcb *pcb, void *arg)
2461 {
2462   LWIP_ASSERT_CORE_LOCKED();
2463   /* This function is allowed to be called for both listen pcbs and
2464      connection pcbs. */
2465   if (pcb != NULL) {
2466     pcb->callback_arg = arg;
2467   }
2468 }
2469 #if LWIP_CALLBACK_API
2470 
2471 /**
2472  * @ingroup tcp_raw
2473  * Sets the callback function that will be called when new data
2474  * arrives. The callback function will be passed a NULL pbuf to
2475  * indicate that the remote host has closed the connection. If the
2476  * callback function returns ERR_OK or ERR_ABRT it must have
2477  * freed the pbuf, otherwise it must not have freed it.
2478  *
2479  * @param pcb tcp_pcb to set the recv callback
2480  * @param recv callback function to call for this pcb when data is received
2481  */
2482 void
tcp_recv(struct tcp_pcb * pcb,tcp_recv_fn recv)2483 tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv)
2484 {
2485   LWIP_ASSERT_CORE_LOCKED();
2486   if (pcb != NULL) {
2487     LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN);
2488     pcb->recv = recv;
2489   }
2490 }
2491 
2492 /**
2493  * @ingroup tcp_raw
2494  * Specifies the callback function that should be called when data has
2495  * successfully been received (i.e., acknowledged) by the remote
2496  * host. The len argument passed to the callback function gives the
2497  * amount bytes that was acknowledged by the last acknowledgment.
2498  *
2499  * @param pcb tcp_pcb to set the sent callback
2500  * @param sent callback function to call for this pcb when data is successfully sent
2501  */
2502 void
tcp_sent(struct tcp_pcb * pcb,tcp_sent_fn sent)2503 tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent)
2504 {
2505   LWIP_ASSERT_CORE_LOCKED();
2506   if (pcb != NULL) {
2507     LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN);
2508     pcb->sent = sent;
2509   }
2510 }
2511 
2512 /**
2513  * @ingroup tcp_raw
2514  * Used to specify the function that should be called when a fatal error
2515  * has occurred on the connection.
2516  *
2517  * If a connection is aborted because of an error, the application is
2518  * alerted of this event by the err callback. Errors that might abort a
2519  * connection are when there is a shortage of memory. The callback
2520  * function to be called is set using the tcp_err() function.
2521  *
2522  * @note The corresponding pcb is already freed when this callback is called!
2523  *
2524  * @param pcb tcp_pcb to set the err callback
2525  * @param err callback function to call for this pcb when a fatal error
2526  *        has occurred on the connection
2527  */
2528 void
tcp_err(struct tcp_pcb * pcb,tcp_err_fn err)2529 tcp_err(struct tcp_pcb *pcb, tcp_err_fn err)
2530 {
2531   LWIP_ASSERT_CORE_LOCKED();
2532   if (pcb != NULL) {
2533     LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN);
2534     pcb->errf = err;
2535   }
2536 }
2537 
2538 /**
2539  * @ingroup tcp_raw
2540  * Used for specifying the function that should be called when a
2541  * LISTENing connection has been connected to another host.
2542  * @see MEMP_NUM_TCP_PCB_LISTEN and MEMP_NUM_TCP_PCB
2543  *
2544  * @param pcb tcp_pcb to set the accept callback
2545  * @param accept callback function to call for this pcb when LISTENing
2546  *        connection has been connected to another host
2547  */
2548 void
tcp_accept(struct tcp_pcb * pcb,tcp_accept_fn accept)2549 tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept)
2550 {
2551   LWIP_ASSERT_CORE_LOCKED();
2552   if ((pcb != NULL) && (pcb->state == LISTEN)) {
2553     struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb;
2554     lpcb->accept = accept;
2555   }
2556 }
2557 #endif /* LWIP_CALLBACK_API */
2558 
2559 
2560 /**
2561  * @ingroup tcp_raw
2562  * Specifies the polling interval and the callback function that should
2563  * be called to poll the application. The interval is specified in
2564  * number of TCP coarse grained timer shots, which typically occurs
2565  * twice a second. An interval of 10 means that the application would
2566  * be polled every 5 seconds.
2567  *
2568  * When a connection is idle (i.e., no data is either transmitted or
2569  * received), lwIP will repeatedly poll the application by calling a
2570  * specified callback function. This can be used either as a watchdog
2571  * timer for killing connections that have stayed idle for too long, or
2572  * as a method of waiting for memory to become available. For instance,
2573  * if a call to tcp_write() has failed because memory wasn't available,
2574  * the application may use the polling functionality to call tcp_write()
2575  * again when the connection has been idle for a while.
2576  */
2577 void
tcp_poll(struct tcp_pcb * pcb,tcp_poll_fn poll,u8_t interval)2578 tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval)
2579 {
2580   LWIP_ASSERT_CORE_LOCKED();
2581 
2582   LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return);
2583   LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN);
2584 
2585 #if LWIP_CALLBACK_API
2586   pcb->poll = poll;
2587 #else /* LWIP_CALLBACK_API */
2588   LWIP_UNUSED_ARG(poll);
2589 #endif /* LWIP_CALLBACK_API */
2590   pcb->pollinterval = interval;
2591 }
2592 
2593 /**
2594  * Purges a TCP PCB. Removes any buffered data and frees the buffer memory
2595  * (pcb->ooseq, pcb->unsent and pcb->unacked are freed).
2596  *
2597  * @param pcb tcp_pcb to purge. The pcb itself is not deallocated!
2598  */
2599 void
tcp_pcb_purge(struct tcp_pcb * pcb)2600 tcp_pcb_purge(struct tcp_pcb *pcb)
2601 {
2602 #if LWIP_SACK
2603   struct _sack_seq *ptr = NULL;
2604 #endif
2605   LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return);
2606 
2607   if (pcb->state != CLOSED &&
2608       pcb->state != TIME_WAIT &&
2609       pcb->state != LISTEN) {
2610 
2611     LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n"));
2612 
2613     tcp_backlog_accepted(pcb);
2614 
2615     if (pcb->refused_data != NULL) {
2616       LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n"));
2617       pbuf_free(pcb->refused_data);
2618       pcb->refused_data = NULL;
2619     }
2620     if (pcb->unsent != NULL) {
2621       LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n"));
2622     }
2623     if (pcb->unacked != NULL) {
2624       LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n"));
2625     }
2626 #if TCP_QUEUE_OOSEQ
2627     if (pcb->ooseq != NULL) {
2628       LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n"));
2629       tcp_free_ooseq(pcb);
2630     }
2631 #endif /* TCP_QUEUE_OOSEQ */
2632 
2633     /* Stop the retransmission timer as it will expect data on unacked
2634        queue if it fires */
2635     pcb->rtime = -1;
2636 
2637     tcp_segs_free(pcb->unsent);
2638     tcp_segs_free(pcb->unacked);
2639     pcb->unacked = pcb->unsent = NULL;
2640 #if LWIP_SACK_PERF_OPT
2641     tcp_fr_segs_free(pcb->fr_segs);
2642     pcb->fr_segs = NULL;
2643     pcb->last_frseg = NULL;
2644 #endif
2645 
2646 #if LWIP_SACK
2647     if (pcb->sack_seq != NULL) {
2648       do {
2649         LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: Freeing sack options data\n"));
2650         ptr = pcb->sack_seq->next;
2651         mem_free(pcb->sack_seq);
2652         pcb->sack_seq = ptr;
2653       } while (pcb->sack_seq != NULL);
2654       pcb->sack_seq = NULL;
2655     }
2656 #endif
2657 #if TCP_OVERSIZE
2658     pcb->unsent_oversize = 0;
2659 #endif /* TCP_OVERSIZE */
2660   }
2661 }
2662 
2663 /*
2664  * Removes listen pcb  from a PCB list.
2665  *
2666  * @param pcblist PCB list from which listen pcb to be remvoed
2667  * @param pcb tcp_pcb_listen to be removed. The pcb itself is NOT deallocated!
2668  * Note: 'pcb' from 'pcblist' is from 'tcp_listen_pcbs'
2669  */
2670 static void
tcp_listen_pcb_remove(struct tcp_pcb ** pcblist,struct tcp_pcb_listen * lpcb)2671 tcp_listen_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb_listen *lpcb)
2672 {
2673   TCP_RMV(pcblist, (struct tcp_pcb *)lpcb);
2674 
2675   lpcb->state = CLOSED;
2676   /* reset the local port to prevent the pcb from being 'bound' */
2677   lpcb->local_port = 0;
2678 
2679   LWIP_ASSERT("tcp_listen_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane());
2680 }
2681 
2682 #if LWIP_SMALL_SIZE
tcp_remove(struct tcp_pcb ** pcbs,struct tcp_pcb * npcb)2683 void tcp_remove(struct tcp_pcb **pcbs, struct tcp_pcb *npcb)
2684 {
2685   if (*(pcbs) == (npcb)) {
2686     (*(pcbs)) = (*pcbs)->next;
2687   } else {
2688     struct tcp_pcb *tcp_tmp_pcb = NULL;
2689     for (tcp_tmp_pcb = *pcbs;
2690         tcp_tmp_pcb != NULL;
2691         tcp_tmp_pcb = tcp_tmp_pcb->next) {
2692       if (tcp_tmp_pcb->next == (npcb)) {
2693         tcp_tmp_pcb->next = (npcb)->next;
2694         break;
2695       }
2696     }
2697   }
2698   (npcb)->next = NULL;
2699 }
2700 #endif
2701 /**
2702  * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first.
2703  *
2704  * @param pcblist PCB list to purge.
2705  * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated!
2706  */
2707 void
tcp_pcb_remove(struct tcp_pcb ** pcblist,struct tcp_pcb * pcb)2708 tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb)
2709 {
2710   LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL);
2711   LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL);
2712 
2713   TCP_RMV(pcblist, pcb);
2714 
2715   tcp_pcb_purge(pcb);
2716 
2717   /* if there is an outstanding delayed ACKs, send it */
2718   if ((pcb->state != TIME_WAIT) &&
2719       (pcb->state != LISTEN) &&
2720       (pcb->flags & TF_ACK_DELAY)) {
2721     tcp_ack_now(pcb);
2722     tcp_output(pcb);
2723   }
2724 
2725   if (pcb->state != LISTEN) {
2726     LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL);
2727     LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL);
2728 #if TCP_QUEUE_OOSEQ
2729     LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL);
2730 #endif /* TCP_QUEUE_OOSEQ */
2731   }
2732 
2733 #if LWIP_SACK
2734   pcb->sacked = 0;
2735 #if LWIP_TCP_TLP_SUPPORT
2736   LWIP_TCP_TLP_CLEAR_VARS(pcb);
2737 #endif /* LWIP_TCP_TLP_SUPPORT */
2738 #endif /* LWIP_SACK */
2739   pcb->state = CLOSED;
2740   /* reset the local port to prevent the pcb from being 'bound' */
2741   pcb->local_port = 0;
2742 
2743   pcb->tcp_pcb_flag = 0;
2744   LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane());
2745 }
2746 
2747 /**
2748  * Calculates a new initial sequence number for new connections.
2749  *
2750  * @return u32_t pseudo random sequence number
2751  */
2752 u32_t
tcp_next_iss(struct tcp_pcb * pcb)2753 tcp_next_iss(struct tcp_pcb *pcb)
2754 {
2755 #ifdef LWIP_HOOK_TCP_ISN
2756   LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL);
2757   return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port);
2758 #else /* LWIP_HOOK_TCP_ISN */
2759   static u32_t iss = 6510;
2760 
2761   LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL);
2762   LWIP_UNUSED_ARG(pcb);
2763   iss = iss + (u32_t)(LWIP_RAND());
2764 
2765   return iss;
2766 #endif /* LWIP_HOOK_TCP_ISN */
2767 }
2768 
2769 #if TCP_CALCULATE_EFF_SEND_MSS
2770 /**
2771  * Calculates the effective send mss that can be used for a specific IP address
2772  * by calculating the minimum of TCP_MSS and the mtu (if set) of the target
2773  * netif (if not NULL).
2774  */
2775 u16_t
tcp_eff_send_mss_netif(u16_t sendmss,struct netif * outif,const ip_addr_t * dest)2776 tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest)
2777 {
2778   u16_t mss_s;
2779   u16_t mtu;
2780 
2781   LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */
2782 
2783   LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL);
2784 
2785 #if LWIP_IPV6
2786 #if LWIP_IPV4
2787   if (IP_IS_V6(dest))
2788 #endif /* LWIP_IPV4 */
2789   {
2790     /* First look in destination cache, to see if there is a Path MTU. */
2791     mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif);
2792   }
2793 #if LWIP_IPV4
2794   else
2795 #endif /* LWIP_IPV4 */
2796 #endif /* LWIP_IPV6 */
2797 #if LWIP_IPV4
2798   {
2799     if (outif == NULL) {
2800       /* limit max value which can be supported on the stack */
2801       mss_s = (u16_t)LWIP_MIN(sendmss, (u16_t)(IP_FRAG_MAX_MTU - IP_HLEN - TCP_HLEN));
2802       return mss_s;
2803     }
2804     mtu = outif->mtu;
2805   }
2806 #endif /* LWIP_IPV4 */
2807 
2808   if (mtu != 0) {
2809     u16_t offset;
2810 #if LWIP_IPV6
2811 #if LWIP_IPV4
2812     if (IP_IS_V6(dest))
2813 #endif /* LWIP_IPV4 */
2814     {
2815       offset = IP6_HLEN + TCP_HLEN;
2816     }
2817 #if LWIP_IPV4
2818     else
2819 #endif /* LWIP_IPV4 */
2820 #endif /* LWIP_IPV6 */
2821 #if LWIP_IPV4
2822     {
2823       offset = IP_HLEN + TCP_HLEN;
2824     }
2825 #endif /* LWIP_IPV4 */
2826 
2827 #if LWIP_RIPPLE && LWIP_IPV6
2828     if (IP_IS_V6(dest) && !ip6_addr_islinklocal(ip_2_ip6(dest)) &&
2829         (lwip_rpl_is_router() == lwIP_TRUE) && (lwip_rpl_is_rpl_netif(outif) != lwIP_FALSE)) {
2830       offset += lwip_hbh_len(NULL);
2831     }
2832 #endif
2833     /* MMS_S is the maximum size for a transport-layer message that TCP may send, do not keep 0 mss */
2834     mss_s = (u16_t)((mtu > offset) ? (u16_t)(mtu - offset) : (u16_t)TCP_MSS);
2835 
2836     /* RFC 1122, chap 4.2.2.6:
2837      * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize
2838      * We correct for TCP options in tcp_write(), and don't support IP options.
2839      */
2840     sendmss = LWIP_MIN(sendmss, mss_s);
2841   }
2842   return sendmss;
2843 }
2844 #endif /* TCP_CALCULATE_EFF_SEND_MSS */
2845 
2846 /** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */
2847 static void
tcp_netif_ip_addr_changed_pcblist(const ip_addr_t * old_addr,struct tcp_pcb * pcb_list)2848 tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list)
2849 {
2850   struct tcp_pcb *pcb;
2851   pcb = pcb_list;
2852 
2853   LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL);
2854 
2855   while (pcb != NULL) {
2856     /* PCB bound to current local interface address? */
2857     if (ip_addr_cmp(&pcb->local_ip, old_addr)
2858 #if LWIP_AUTOIP
2859         /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */
2860         && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip)))
2861 #endif /* LWIP_AUTOIP */
2862        ) {
2863       /* this connection must be aborted */
2864       struct tcp_pcb *next = pcb->next;
2865       LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb));
2866       tcp_abort(pcb);
2867       pcb = next;
2868     } else {
2869       pcb = pcb->next;
2870     }
2871   }
2872 }
2873 
2874 /** This function is called from netif.c when address is changed or netif is removed
2875  *
2876  * @param old_addr IP address of the netif before change
2877  * @param new_addr IP address of the netif after change or NULL if netif has been removed
2878  */
2879 void
tcp_netif_ip_addr_changed(const ip_addr_t * old_addr,const ip_addr_t * new_addr)2880 tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr)
2881 {
2882   struct tcp_pcb_listen *lpcb;
2883 
2884   if ((old_addr != NULL) && !ip_addr_isany(old_addr)) {
2885     tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs);
2886     tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs);
2887 
2888     if (!ip_addr_isany(new_addr)) {
2889       /* PCB bound to current local interface address? */
2890       for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
2891         /* PCB bound to current local interface address? */
2892         if (ip_addr_cmp(&lpcb->local_ip, old_addr)) {
2893           /* The PCB is listening to the old ipaddr and
2894             * is set to listen to the new one instead */
2895           ip_addr_copy(lpcb->local_ip, *new_addr);
2896         }
2897       }
2898     }
2899   }
2900 }
2901 
2902 const char *
tcp_debug_state_str(enum tcp_state s)2903 tcp_debug_state_str(enum tcp_state s)
2904 {
2905   return tcp_state_str[s];
2906 }
2907 
2908 err_t
tcp_tcp_get_tcp_addrinfo(struct tcp_pcb * pcb,int local,ip_addr_t * addr,u16_t * port)2909 tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port)
2910 {
2911   if (pcb) {
2912     if (local) {
2913       if (addr) {
2914         *addr = pcb->local_ip;
2915       }
2916       if (port) {
2917         *port = pcb->local_port;
2918       }
2919     } else {
2920       if (addr) {
2921         *addr = pcb->remote_ip;
2922       }
2923       if (port) {
2924         *port = pcb->remote_port;
2925       }
2926     }
2927     return ERR_OK;
2928   }
2929   return ERR_VAL;
2930 }
2931 
2932 #if TCP_QUEUE_OOSEQ
2933 /* Free all ooseq pbufs (and possibly reset SACK state) */
2934 void
tcp_free_ooseq(struct tcp_pcb * pcb)2935 tcp_free_ooseq(struct tcp_pcb *pcb)
2936 {
2937   if (pcb->ooseq) {
2938     tcp_segs_free(pcb->ooseq);
2939     pcb->ooseq = NULL;
2940   }
2941 }
2942 #endif /* TCP_QUEUE_OOSEQ */
2943 
2944 #if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG
2945 /**
2946  * Print a tcp header for debugging purposes.
2947  *
2948  * @param tcphdr pointer to a struct tcp_hdr
2949  */
2950 void
tcp_debug_print(struct tcp_hdr * tcphdr)2951 tcp_debug_print(struct tcp_hdr *tcphdr)
2952 {
2953   LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n"));
2954   LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2955   LWIP_DEBUGF(TCP_DEBUG, ("|    %5"U16_F"      |    %5"U16_F"      | (src port, dest port)\n",
2956                           lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest)));
2957   LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2958   LWIP_DEBUGF(TCP_DEBUG, ("|           %010"U32_F"          | (seq no)\n",
2959                           lwip_ntohl(tcphdr->seqno)));
2960   LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2961   LWIP_DEBUGF(TCP_DEBUG, ("|           %010"U32_F"          | (ack no)\n",
2962                           lwip_ntohl(tcphdr->ackno)));
2963   LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2964   LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" |   |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"|     %5"U16_F"     | (hdrlen, flags (",
2965                           TCPH_HDRLEN(tcphdr),
2966                           (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1),
2967                           (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1),
2968                           (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1),
2969                           (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1),
2970                           (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1),
2971                           (u16_t)(TCPH_FLAGS(tcphdr)      & 1),
2972                           lwip_ntohs(tcphdr->wnd)));
2973   tcp_debug_print_flags(TCPH_FLAGS(tcphdr));
2974   LWIP_DEBUGF(TCP_DEBUG, ("), win)\n"));
2975   LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2976   LWIP_DEBUGF(TCP_DEBUG, ("|    0x%04"X16_F"     |     %5"U16_F"     | (chksum, urgp)\n",
2977                           lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp)));
2978   LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2979 }
2980 
2981 /**
2982  * Print a tcp state for debugging purposes.
2983  *
2984  * @param s enum tcp_state to print
2985  */
2986 void
tcp_debug_print_state(enum tcp_state s)2987 tcp_debug_print_state(enum tcp_state s)
2988 {
2989   LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s]));
2990 }
2991 
2992 /**
2993  * Print tcp flags for debugging purposes.
2994  *
2995  * @param flags tcp flags, all active flags are printed
2996  */
2997 void
tcp_debug_print_flags(u8_t flags)2998 tcp_debug_print_flags(u8_t flags)
2999 {
3000   if (flags & TCP_FIN) {
3001     LWIP_DEBUGF(TCP_DEBUG, ("FIN "));
3002   }
3003   if (flags & TCP_SYN) {
3004     LWIP_DEBUGF(TCP_DEBUG, ("SYN "));
3005   }
3006   if (flags & TCP_RST) {
3007     LWIP_DEBUGF(TCP_DEBUG, ("RST "));
3008   }
3009   if (flags & TCP_PSH) {
3010     LWIP_DEBUGF(TCP_DEBUG, ("PSH "));
3011   }
3012   if (flags & TCP_ACK) {
3013     LWIP_DEBUGF(TCP_DEBUG, ("ACK "));
3014   }
3015   if (flags & TCP_URG) {
3016     LWIP_DEBUGF(TCP_DEBUG, ("URG "));
3017   }
3018   if (flags & TCP_ECE) {
3019     LWIP_DEBUGF(TCP_DEBUG, ("ECE "));
3020   }
3021   if (flags & TCP_CWR) {
3022     LWIP_DEBUGF(TCP_DEBUG, ("CWR "));
3023   }
3024   LWIP_DEBUGF(TCP_DEBUG, ("\n"));
3025 }
3026 
3027 /**
3028  * Print all tcp_pcbs in every list for debugging purposes.
3029  */
3030 void
tcp_debug_print_pcbs(void)3031 tcp_debug_print_pcbs(void)
3032 {
3033   struct tcp_pcb *pcb;
3034   struct tcp_pcb_listen *pcbl;
3035 
3036   LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n"));
3037   for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
3038     LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
3039                             pcb->local_port, pcb->remote_port,
3040                             pcb->snd_nxt, pcb->rcv_nxt));
3041     tcp_debug_print_state(pcb->state);
3042   }
3043 
3044   LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n"));
3045   for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) {
3046     LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port));
3047     tcp_debug_print_state(pcbl->state);
3048   }
3049 
3050   LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n"));
3051   for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
3052     LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
3053                             pcb->local_port, pcb->remote_port,
3054                             pcb->snd_nxt, pcb->rcv_nxt));
3055     tcp_debug_print_state(pcb->state);
3056   }
3057 }
3058 
3059 /**
3060  * Check state consistency of the tcp_pcb lists.
3061  */
3062 s16_t
tcp_pcbs_sane(void)3063 tcp_pcbs_sane(void)
3064 {
3065   struct tcp_pcb *pcb;
3066   for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
3067     LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED);
3068     LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN);
3069     LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT);
3070   }
3071   for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
3072     LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
3073   }
3074   return 1;
3075 }
3076 #endif /* TCP_DEBUG */
3077 
3078 #if LWIP_TCP_INFO
3079 /*
3080   mapping lwIP-tcp-state to Linux-tcp-state.
3081   remove this function by making lwIP-tcp-state consistent with Linux-tcp-state in the future.
3082 */
lwip_tcp_state_mapping(enum tcp_state state)3083 static u8_t lwip_tcp_state_mapping(enum tcp_state state)
3084 {
3085   u8_t ret;
3086   switch (state) {
3087     case CLOSED:
3088       ret = TCP_CLOSE;
3089       break;
3090     case LISTEN:
3091       ret = TCP_LISTEN;
3092       break;
3093     case SYN_SENT:
3094       ret = TCP_SYN_SENT;
3095       break;
3096     case SYN_RCVD:
3097       ret = TCP_SYN_RECV;
3098       break;
3099     case ESTABLISHED:
3100       ret = TCP_ESTABLISHED;
3101       break;
3102     case FIN_WAIT_1:
3103       ret = TCP_FIN_WAIT1;
3104       break;
3105     case FIN_WAIT_2:
3106       ret = TCP_FIN_WAIT2;
3107       break;
3108     case CLOSE_WAIT:
3109       ret = TCP_CLOSE_WAIT;
3110       break;
3111     case CLOSING:
3112       ret = TCP_CLOSING;
3113       break;
3114     case LAST_ACK:
3115       ret = TCP_LAST_ACK;
3116       break;
3117     case TIME_WAIT:
3118       ret = TCP_TIME_WAIT;
3119       break;
3120     default:
3121       ret = TCP_CLOSE;
3122   }
3123 
3124   return ret;
3125 }
3126 
3127 /*
3128   function to get tcp information from lwip stack
3129 */
tcp_get_info(const struct tcp_pcb * pcb,struct tcp_info * tcpinfo)3130 void tcp_get_info(const struct tcp_pcb *pcb, struct tcp_info *tcpinfo)
3131 {
3132   u32_t unacked = 0;
3133   struct tcp_seg *useg = NULL;
3134   (void)memset_s(tcpinfo, sizeof(struct tcp_info), 0, sizeof(struct tcp_info));
3135 
3136   tcpinfo->tcpi_state = lwip_tcp_state_mapping(pcb->state);
3137 
3138   /* No data for listening socket */
3139   if (pcb->state == LISTEN) {
3140     return;
3141   }
3142 
3143   /* RTO retransmissions backoff, tcpi_retransmits equals tcpi_backoff for non-Thin stream */
3144   tcpinfo->tcpi_retransmits = pcb->nrtx;
3145   tcpinfo->tcpi_backoff = pcb->nrtx;
3146 
3147   /* Number of keep alive probes or zero window probes */
3148   tcpinfo->tcpi_probes = pcb->keep_cnt_sent ? pcb->keep_cnt_sent : pcb->persist_backoff;
3149 
3150 #if LWIP_TCP_TIMESTAMPS
3151   if (pcb->flags & TF_TIMESTAMP) {
3152     tcpinfo->tcpi_options = tcpinfo->tcpi_options | TCPI_OPT_TIMESTAMPS;
3153   }
3154 #endif
3155 
3156 #if LWIP_SACK
3157   if (pcb->flags & TF_SACK) {
3158     tcpinfo->tcpi_options = tcpinfo->tcpi_options | TCPI_OPT_SACK;
3159   }
3160 #endif
3161 
3162 #if LWIP_WND_SCALE
3163   if (pcb->flags & TF_WND_SCALE) {
3164     tcpinfo->tcpi_options = tcpinfo->tcpi_options | TCPI_OPT_WSCALE;
3165   }
3166 #endif
3167 
3168   /* RTO duration in usec */
3169   tcpinfo->tcpi_rto = ((u32_t)(s32_t)pcb->rto * TCP_SLOW_INTERVAL * 1000U);
3170 
3171   /* receive option else same value as send mss */
3172   tcpinfo->tcpi_snd_mss = pcb->mss;
3173   tcpinfo->tcpi_rcv_mss = pcb->rcv_mss;
3174 
3175   useg = pcb->unacked;
3176 
3177   /* only unacked segments, sacked or fast retransmitted packets are not counted */
3178   for (; useg != NULL; useg = useg->next, unacked++);
3179   tcpinfo->tcpi_unacked = unacked;
3180 
3181   unacked = 0;
3182   for (useg = pcb->unsent; useg != NULL; useg = useg->next) {
3183     if ((ntohl(useg->tcphdr->seqno) + (u32_t)(TCP_TCPLEN(useg)) - 1) < pcb->snd_nxt) {
3184       unacked++;
3185     } else {
3186       break;
3187     }
3188   }
3189   tcpinfo->tcpi_unacked += unacked;
3190 
3191   /* time in us */
3192   if (pcb->sa == -1) {
3193     tcpinfo->tcpi_rtt = 0;
3194     tcpinfo->tcpi_rttvar = 0;
3195   } else {
3196     tcpinfo->tcpi_rtt = (((u32_t)pcb->sa) >> 3) * TCP_SLOW_INTERVAL * US_PER_MSECOND;
3197     tcpinfo->tcpi_rttvar = (((u32_t)pcb->sv) >> 2) * TCP_SLOW_INTERVAL * US_PER_MSECOND;
3198   }
3199   /* congestion wnd and slow start threshold */
3200   tcpinfo->tcpi_snd_ssthresh = pcb->ssthresh;
3201   tcpinfo->tcpi_snd_cwnd = pcb->cwnd;
3202 
3203   /* constant reordering */
3204   tcpinfo->tcpi_reordering = DUPACK_THRESH;
3205 }
3206 #endif /* LWIP_TCP_INFO */
3207 
3208 #if DRIVER_STATUS_CHECK
tcp_is_netif_addr_check_success(const struct tcp_pcb * pcb,const struct netif * netif)3209 unsigned char tcp_is_netif_addr_check_success(const struct tcp_pcb *pcb, const struct netif *netif)
3210 {
3211 #if LWIP_IPV6
3212   if (IP_IS_V6_VAL(pcb->remote_ip)) {
3213     int i = 0;
3214     for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
3215       if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))
3216         && ip6_addr_netcmp(ip_2_ip6(&(pcb->remote_ip)), netif_ip6_addr(netif, i))) {
3217         return 1;
3218       }
3219     }
3220     return 0;
3221   }
3222 #endif /* LWIP_IPV6 */
3223 
3224 #if LWIP_IPV4
3225   if (!ip4_addr_isany_val(*netif_ip4_addr(netif))
3226     && ip4_addr_netcmp(ip_2_ip4(&pcb->remote_ip), ip_2_ip4(&netif->ip_addr), ip_2_ip4(&netif->netmask))) {
3227     return 1;
3228   }
3229 #endif
3230   return 0;
3231 }
3232 
tcp_update_drv_status_to_pcbs(struct tcp_pcb * pcb_list,const struct netif * netif,u8_t status)3233 void tcp_update_drv_status_to_pcbs(struct tcp_pcb *pcb_list, const struct netif *netif, u8_t status)
3234 {
3235   struct tcp_pcb *pcb = NULL;
3236   for (pcb = pcb_list; pcb != NULL; pcb = pcb->next) {
3237     /* network mask matches? */
3238     if (netif_is_up(netif)) {
3239       if (tcp_is_netif_addr_check_success(pcb, netif)) {
3240         pcb->drv_status = status;
3241       }
3242     }
3243   }
3244 }
3245 
tcpip_upd_status_to_tcp_pcbs(const struct netif * netif,u8_t status)3246 void tcpip_upd_status_to_tcp_pcbs(const struct netif *netif, u8_t status)
3247 {
3248   LWIP_ERROR("netif_set_driver_ready: invalid arguments", (netif != NULL), return);
3249 
3250   tcp_update_drv_status_to_pcbs(tcp_active_pcbs, netif, status);
3251   tcp_update_drv_status_to_pcbs(tcp_tw_pcbs, netif, status);
3252 
3253   return;
3254 }
3255 
tcp_ip_flush_pcblist_on_wake_queue(const struct netif * netif,struct tcp_pcb * pcb_list,u8_t status)3256 void tcp_ip_flush_pcblist_on_wake_queue(const struct netif *netif, struct tcp_pcb *pcb_list, u8_t status)
3257 {
3258   struct tcp_pcb *pcb = NULL;
3259   /* iterate all PCB for that netif, and if any thing is down, change status and mark status UP */
3260   for (pcb = pcb_list; pcb != NULL; pcb = pcb->next) {
3261     if (tcp_is_netif_addr_check_success(pcb, netif)) {
3262       tcp_flush_pcb_on_wake_queue(pcb, status);
3263       /* Reset the RTO timer if its running */
3264       if (pcb->rtime > 0) {
3265         pcb->rtime = 0;
3266       }
3267     }
3268   }
3269 }
3270 
tcp_ip_event_sendplus_on_wake_queue(const struct netif * netif)3271 void tcp_ip_event_sendplus_on_wake_queue(const struct netif *netif)
3272 {
3273   struct tcp_pcb *pcb = NULL;
3274   for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
3275     if (tcp_is_netif_addr_check_success(pcb, netif)) {
3276       if (pcb->sndplus && pcb->callback_arg) {
3277         pcb->sndplus(pcb->callback_arg, pcb);
3278       }
3279     }
3280   }
3281 }
3282 
tcpip_flush_on_wake_queue(const struct netif * netif,u8_t status)3283 void tcpip_flush_on_wake_queue(const struct netif *netif, u8_t status)
3284 {
3285   tcp_ip_flush_pcblist_on_wake_queue(netif, tcp_active_pcbs, status);
3286   tcp_ip_flush_pcblist_on_wake_queue(netif, tcp_tw_pcbs, status);
3287 }
3288 #endif
3289 
3290 #if LWIP_IPV6
3291 /* This function is for making sure that accept() should not block indefinetely
3292   *  when removing IPv6 address used for accept() by using API[netifapi_netif_rmv_ip6_address].
3293   */
tcp_unlock_accept(const ip6_addr_t * ipaddr)3294 void tcp_unlock_accept(const ip6_addr_t *ipaddr)
3295 {
3296   struct tcp_pcb_listen *pcb = NULL;
3297   for (pcb = tcp_listen_pcbs.listen_pcbs; pcb != NULL; pcb = pcb->next) {
3298     if (ip6_addr_cmp(ipaddr, ip_2_ip6(&(pcb->local_ip)))) {
3299       err_t err;
3300       TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_ABRT, err);
3301       LWIP_UNUSED_ARG(err); /* err not useful here */
3302     }
3303   }
3304 }
3305 #endif
3306 
3307 #if LWIP_TCP_TLP_SUPPORT
3308 
3309 /* draft-dukkipati-tcpm-tcp-loss-probe-01
3310 
3311     Conditions for scheduling PTO:
3312     (a) Connection is in Open state.
3313             Open state:  the sender has so far received in-sequence ACKs with no SACK blocks,
3314             and no other indications (such as retransmission timeout) that a loss may have occurred.
3315     (b) Connection is either cwnd limited or application limited.
3316     (c) Number of consecutive PTOs <= 2.
3317     (d) Connection is SACK enabled.
3318 
3319     When the above conditions are met, then we need to decide at what duration PTO needs to be
3320     fired, below is explained:
3321     a) FlightSize > 1: schedule PTO in max(2*SRTT, 10ms).
3322     b) FlightSize == 1: schedule PTO in max(2*SRTT, 1.5*SRTT+WCDelAckT).
3323     c) If RTO is earlier, schedule PTO in its place: PTO = min(RTO, PTO).
3324 
3325     Deviation in lwip:
3326     1, PTO is fired in tcp_fasttmr, so The real PTO trigger timer is not very precise. This issue could be
3327        fixed if we install one PTO timer for every TCP PCB.
3328     2, lwip don't comply with condition (a). PTO would be scheduled in both Open and Disorder state,
3329        not just Open state. Let's consider this scenario: 4 segments was sent out, but only segment 2 was
3330        sacked. In this scenario, TCP Early Retransmit cannot be trigged as the total unacked count was 4
3331        and greater than DupACK threshold. But if we stop PTO as it is in Disorder state, TLP is not workable
3332        to trigger FR. so The only recovery method is RTO retransmit. Recent-ACK can be used to do fast
3333        recovery in this scenario, but lwip don't support RACK now.
3334 */
tcp_tlp_schedule_probe(struct tcp_pcb * pcb,u32_t wnd)3335 void tcp_tlp_schedule_probe(struct tcp_pcb *pcb, u32_t wnd)
3336 {
3337   u32_t pto_duration;
3338   u32_t time_now;
3339   u32_t srtt;
3340 
3341   if ((pcb->unacked != NULL) && (pcb->flags & TF_SACK) && (pcb->state == ESTABLISHED)) {    /* (d) */
3342     if ((!(pcb->flags & TF_IN_SACK_RTO)) && (!(pcb->flags & TF_IN_SACK_FRLR))) { /* (a) */
3343       if ((pcb->unsent == NULL) || (((ntohl(pcb->unsent->tcphdr->seqno) - pcb->lastack) + pcb->unsent->len) > wnd)) { /* (b) */
3344         if (pcb->tlp_pto_cnt < TCP_TLP_MAX_PROBE_CNT) { /* (c) */
3345           if (pcb->sa != -1) {
3346             srtt = (((u32_t)pcb->sa) >> 3); /* time duration in ms */
3347           } else {
3348             srtt = 100; /* use 100ms as the default SRTT if no valid RTT sample */
3349           }
3350 
3351           time_now = sys_now();
3352           /* FlightSize > 1: schedule PTO in max(2*SRTT, 10ms) */
3353           if (pcb->unacked->next != NULL) {
3354             pto_duration = LWIP_MAX(srtt << 1, 20); /* it should be 20ms here */
3355             LWIP_DEBUGF(TCP_TLP_DEBUG,
3356                         ("tcp_tlp_schedule_probe: FlightSize > 1: pto duration %"S32_F", sys_now %"U32_F"\n",
3357                          pto_duration, time_now));
3358           }
3359           /* FlightSize == 1: schedule PTO in max(2*SRTT, 1.5*SRTT+WCDelAckT) */
3360           else {
3361             pto_duration = LWIP_MAX((srtt << 1), ((((srtt << 1) + srtt) >> 1) + LWIP_TCP_TLP_WCDELACKT));
3362             LWIP_DEBUGF(TCP_TLP_DEBUG,
3363                         ("tcp_tlp_schedule_probe: FlightSize == 1: pto duration %"S32_F", sys_now %"U32_F"\n",
3364                          pto_duration, time_now));
3365           }
3366 
3367           pto_duration = LWIP_MIN(pto_duration, (u32_t)(pcb->rto * TCP_SLOW_INTERVAL));
3368           pcb->tlp_time_stamp = time_now + pto_duration;
3369           if (pcb->tlp_time_stamp == 0) { /* tlp_time_stamp 0 indicates PTO not scheduled */
3370             pcb->tlp_time_stamp = 1;
3371           }
3372           pcb->rtime = -1; /* stop RTO as PTO would be triggerd before RTO */
3373         }
3374       }
3375     }
3376   }
3377 
3378   return;
3379 }
3380 #endif /* LWIP_TCP_TLP_SUPPORT */
3381 
3382 #if LWIP_TCP_PCB_NUM_EXT_ARGS
3383 /**
3384  * @defgroup tcp_raw_extargs ext arguments
3385  * @ingroup tcp_raw
3386  * Additional data storage per tcp pcb\n
3387  * @see @ref tcp_raw
3388  *
3389  * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb)
3390  * includes a number of additional argument entries in an array.
3391  *
3392  * To support memory management, in addition to a 'void *', callbacks can be
3393  * provided to manage transition from listening pcbs to connections and to
3394  * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks).
3395  *
3396  * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get
3397  * to store and load arguments from this index for a given pcb.
3398  */
3399 
3400 static u8_t tcp_ext_arg_id;
3401 
3402 /**
3403  * @ingroup tcp_raw_extargs
3404  * Allocate an index to store data in ext_args member of struct tcp_pcb.
3405  * Returned value is an index in mentioned array.
3406  * The index is *global* over all pcbs!
3407  *
3408  * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb)
3409  * includes a number of additional argument entries in an array.
3410  *
3411  * To support memory management, in addition to a 'void *', callbacks can be
3412  * provided to manage transition from listening pcbs to connections and to
3413  * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks).
3414  *
3415  * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get
3416  * to store and load arguments from this index for a given pcb.
3417  *
3418  * @return a unique index into struct tcp_pcb.ext_args
3419  */
3420 u8_t
tcp_ext_arg_alloc_id(void)3421 tcp_ext_arg_alloc_id(void)
3422 {
3423   u8_t result = tcp_ext_arg_id;
3424   tcp_ext_arg_id++;
3425 
3426   LWIP_ASSERT_CORE_LOCKED();
3427 
3428 #if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255
3429 #error LWIP_TCP_PCB_NUM_EXT_ARGS
3430 #endif
3431   LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS);
3432   return result;
3433 }
3434 
3435 /**
3436  * @ingroup tcp_raw_extargs
3437  * Set callbacks for a given index of ext_args on the specified pcb.
3438  *
3439  * @param pcb tcp_pcb for which to set the callback
3440  * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id)
3441  * @param callbacks callback table (const since it is referenced, not copied!)
3442  */
3443 void
tcp_ext_arg_set_callbacks(struct tcp_pcb * pcb,uint8_t id,const struct tcp_ext_arg_callbacks * const callbacks)3444 tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks)
3445 {
3446   LWIP_ASSERT("pcb != NULL", pcb != NULL);
3447   LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS);
3448   LWIP_ASSERT("callbacks != NULL", callbacks != NULL);
3449 
3450   LWIP_ASSERT_CORE_LOCKED();
3451 
3452   pcb->ext_args[id].callbacks = callbacks;
3453 }
3454 
3455 /**
3456  * @ingroup tcp_raw_extargs
3457  * Set data for a given index of ext_args on the specified pcb.
3458  *
3459  * @param pcb tcp_pcb for which to set the data
3460  * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id)
3461  * @param arg data pointer to set
3462  */
tcp_ext_arg_set(struct tcp_pcb * pcb,uint8_t id,void * arg)3463 void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg)
3464 {
3465   LWIP_ASSERT("pcb != NULL", pcb != NULL);
3466   LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS);
3467 
3468   LWIP_ASSERT_CORE_LOCKED();
3469 
3470   pcb->ext_args[id].data = arg;
3471 }
3472 
3473 /**
3474  * @ingroup tcp_raw_extargs
3475  * Set data for a given index of ext_args on the specified pcb.
3476  *
3477  * @param pcb tcp_pcb for which to set the data
3478  * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id)
3479  * @return data pointer at the given index
3480  */
tcp_ext_arg_get(const struct tcp_pcb * pcb,uint8_t id)3481 void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id)
3482 {
3483   LWIP_ASSERT("pcb != NULL", pcb != NULL);
3484   LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS);
3485 
3486   LWIP_ASSERT_CORE_LOCKED();
3487 
3488   return pcb->ext_args[id].data;
3489 }
3490 
3491 /** This function calls the "destroy" callback for all ext_args once a pcb is
3492  * freed.
3493  */
3494 static void
tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args * ext_args)3495 tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args)
3496 {
3497   int i;
3498   LWIP_ASSERT("ext_args != NULL", ext_args != NULL);
3499 
3500   for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) {
3501     if (ext_args[i].callbacks != NULL) {
3502       if (ext_args[i].callbacks->destroy != NULL) {
3503         ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data);
3504       }
3505     }
3506   }
3507 }
3508 
3509 /** This function calls the "passive_open" callback for all ext_args if a connection
3510  * is in the process of being accepted. This is called just after the SYN is
3511  * received and before a SYN/ACK is sent, to allow to modify the very first
3512  * segment sent even on passive open. Naturally, the "accepted" callback of the
3513  * pcb has not been called yet!
3514  */
3515 err_t
tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen * lpcb,struct tcp_pcb * cpcb)3516 tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb)
3517 {
3518   int i;
3519   LWIP_ASSERT("lpcb != NULL", lpcb != NULL);
3520   LWIP_ASSERT("cpcb != NULL", cpcb != NULL);
3521 
3522   for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) {
3523     if (lpcb->ext_args[i].callbacks != NULL) {
3524       if (lpcb->ext_args[i].callbacks->passive_open != NULL) {
3525         err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb);
3526         if (err != ERR_OK) {
3527           return err;
3528         }
3529       }
3530     }
3531   }
3532   return ERR_OK;
3533 }
3534 #endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */
3535 
3536 #endif /* LWIP_TCP */
3537