Lines Matching +full:tp +full:- +full:sensitive +full:- +full:adjust
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
32 * Cacophonix Gaul : draft-minshall-nagle-01
55 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
56 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
58 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
60 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
61 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
63 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
64 tp->highest_sack = skb; in tcp_event_new_data_sent()
66 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
67 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent()
77 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
83 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
85 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || in tcp_acceptable_seq()
86 (tp->rx_opt.wscale_ok && in tcp_acceptable_seq()
87 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq()
88 return tp->snd_nxt; in tcp_acceptable_seq()
90 return tcp_wnd_end(tp); in tcp_acceptable_seq()
94 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
97 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
109 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() local
111 int mss = tp->advmss; in tcp_advertise_mss()
118 tp->advmss = mss; in tcp_advertise_mss()
130 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() local
131 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
132 u32 cwnd = tp->snd_cwnd; in tcp_cwnd_restart()
136 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
139 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
141 tp->snd_cwnd = max(cwnd, restart_cwnd); in tcp_cwnd_restart()
142 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_restart()
143 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
147 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent() argument
153 if (tcp_packets_in_flight(tp) == 0) in tcp_event_data_sent()
156 tp->lsndtime = now; in tcp_event_data_sent()
161 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
162 icsk->icsk_ack.pingpong = 1; in tcp_event_data_sent()
169 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent() local
171 if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { in tcp_event_ack_sent()
173 tp->compressed_ack - TCP_FASTRETRANS_THRESH); in tcp_event_ack_sent()
174 tp->compressed_ack = TCP_FASTRETRANS_THRESH; in tcp_event_ack_sent()
175 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_event_ack_sent()
179 if (unlikely(rcv_nxt != tp->rcv_nxt)) in tcp_event_ack_sent()
187 * will be offered. Store the results in the tp structure.
211 * we will truncate our initial window offering to 32K-1 in tcp_select_initial_window()
216 if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) in tcp_select_initial_window()
227 space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); in tcp_select_initial_window()
242 * value can be stuffed directly into th->window for an outgoing
247 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() local
248 u32 old_win = tp->rcv_wnd; in tcp_select_window()
249 u32 cur_win = tcp_receive_window(tp); in tcp_select_window()
257 * window in time. --DaveM in tcp_select_window()
264 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
266 tp->rcv_wnd = new_win; in tcp_select_window()
267 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
272 if (!tp->rx_opt.rcv_wscale && in tcp_select_window()
273 sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) in tcp_select_window()
276 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
279 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
283 tp->pred_flags = 0; in tcp_select_window()
294 /* Packet ECN state for a SYN-ACK */
297 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() local
299 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
300 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
301 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
310 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() local
312 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || in tcp_ecn_send_syn()
322 tp->ecn_flags = 0; in tcp_ecn_send_syn()
325 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
326 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
334 if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) in tcp_ecn_clear_syn()
335 /* tp->ecn_flags are cleared at a later point in time when in tcp_ecn_clear_syn()
338 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn()
344 if (inet_rsk(req)->ecn_ok) in tcp_ecn_make_synack()
345 th->ece = 1; in tcp_ecn_make_synack()
354 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() local
356 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
357 /* Not-retransmitted data segment: set ECT and inject CWR. */ in tcp_ecn_send()
358 if (skb->len != tcp_header_len && in tcp_ecn_send()
359 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
361 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
362 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
363 th->cwr = 1; in tcp_ecn_send()
364 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
370 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
371 th->ece = 1; in tcp_ecn_send()
375 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
380 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
382 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
383 TCP_SKB_CB(skb)->sacked = 0; in tcp_init_nondata_skb()
387 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
390 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
393 static inline bool tcp_urg_mode(const struct tcp_sock *tp) in tcp_urg_mode() argument
395 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
433 * Beware: Something in the Internet is very sensitive to the ordering of
435 * Luckily we can at least blame others for their non-compliance but from
436 * inter-operability perspective it seems that we're somewhat stuck with
444 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, in tcp_options_write() argument
447 u16 options = opts->options; /* mungable copy */ in tcp_options_write()
453 opts->hash_location = (__u8 *)ptr; in tcp_options_write()
457 if (unlikely(opts->mss)) { in tcp_options_write()
460 opts->mss); in tcp_options_write()
476 *ptr++ = htonl(opts->tsval); in tcp_options_write()
477 *ptr++ = htonl(opts->tsecr); in tcp_options_write()
491 opts->ws); in tcp_options_write()
494 if (unlikely(opts->num_sack_blocks)) { in tcp_options_write()
495 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
496 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
502 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * in tcp_options_write()
505 for (this_sack = 0; this_sack < opts->num_sack_blocks; in tcp_options_write()
511 tp->rx_opt.dsack = 0; in tcp_options_write()
515 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; in tcp_options_write()
519 if (foc->exp) { in tcp_options_write()
520 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; in tcp_options_write()
525 len = TCPOLEN_FASTOPEN_BASE + foc->len; in tcp_options_write()
530 memcpy(p, foc->val, foc->len); in tcp_options_write()
532 p[foc->len] = TCPOPT_NOP; in tcp_options_write()
533 p[foc->len + 1] = TCPOPT_NOP; in tcp_options_write()
541 static void smc_set_option(const struct tcp_sock *tp, in smc_set_option() argument
547 if (tp->syn_smc) { in smc_set_option()
549 opts->options |= OPTION_SMC; in smc_set_option()
550 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option()
557 static void smc_set_option_cond(const struct tcp_sock *tp, in smc_set_option_cond() argument
564 if (tp->syn_smc && ireq->smc_ok) { in smc_set_option_cond()
566 opts->options |= OPTION_SMC; in smc_set_option_cond()
567 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option_cond()
581 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options() local
583 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
587 if (unlikely(rcu_access_pointer(tp->md5sig_info))) { in tcp_syn_options()
588 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
590 opts->options |= OPTION_MD5; in tcp_syn_options()
591 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_syn_options()
598 * advertised. But we subtract them from tp->mss_cache so that in tcp_syn_options()
605 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
606 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_syn_options()
608 if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { in tcp_syn_options()
609 opts->options |= OPTION_TS; in tcp_syn_options()
610 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; in tcp_syn_options()
611 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
612 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_syn_options()
614 if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { in tcp_syn_options()
615 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
616 opts->options |= OPTION_WSCALE; in tcp_syn_options()
617 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_syn_options()
619 if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { in tcp_syn_options()
620 opts->options |= OPTION_SACK_ADVERTISE; in tcp_syn_options()
621 if (unlikely(!(OPTION_TS & opts->options))) in tcp_syn_options()
622 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_syn_options()
625 if (fastopen && fastopen->cookie.len >= 0) { in tcp_syn_options()
626 u32 need = fastopen->cookie.len; in tcp_syn_options()
628 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_syn_options()
632 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_syn_options()
633 opts->fastopen_cookie = &fastopen->cookie; in tcp_syn_options()
634 remaining -= need; in tcp_syn_options()
635 tp->syn_fastopen = 1; in tcp_syn_options()
636 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
640 smc_set_option(tp, opts, &remaining); in tcp_syn_options()
642 return MAX_TCP_OPTION_SPACE - remaining; in tcp_syn_options()
645 /* Set up TCP options for SYN-ACKs. */
659 opts->options |= OPTION_MD5; in tcp_synack_options()
660 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_synack_options()
668 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
673 opts->mss = mss; in tcp_synack_options()
674 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_synack_options()
676 if (likely(ireq->wscale_ok)) { in tcp_synack_options()
677 opts->ws = ireq->rcv_wscale; in tcp_synack_options()
678 opts->options |= OPTION_WSCALE; in tcp_synack_options()
679 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_synack_options()
681 if (likely(ireq->tstamp_ok)) { in tcp_synack_options()
682 opts->options |= OPTION_TS; in tcp_synack_options()
683 opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off; in tcp_synack_options()
684 opts->tsecr = req->ts_recent; in tcp_synack_options()
685 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_synack_options()
687 if (likely(ireq->sack_ok)) { in tcp_synack_options()
688 opts->options |= OPTION_SACK_ADVERTISE; in tcp_synack_options()
689 if (unlikely(!ireq->tstamp_ok)) in tcp_synack_options()
690 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_synack_options()
692 if (foc != NULL && foc->len >= 0) { in tcp_synack_options()
693 u32 need = foc->len; in tcp_synack_options()
695 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_synack_options()
699 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_synack_options()
700 opts->fastopen_cookie = foc; in tcp_synack_options()
701 remaining -= need; in tcp_synack_options()
707 return MAX_TCP_OPTION_SPACE - remaining; in tcp_synack_options()
717 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options() local
721 opts->options = 0; in tcp_established_options()
725 if (unlikely(rcu_access_pointer(tp->md5sig_info))) { in tcp_established_options()
726 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
728 opts->options |= OPTION_MD5; in tcp_established_options()
734 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
735 opts->options |= OPTION_TS; in tcp_established_options()
736 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; in tcp_established_options()
737 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
741 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
743 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
744 opts->num_sack_blocks = in tcp_established_options()
746 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / in tcp_established_options()
748 if (likely(opts->num_sack_blocks)) in tcp_established_options()
750 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; in tcp_established_options()
765 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
779 if ((1 << sk->sk_state) & in tcp_tsq_write()
782 struct tcp_sock *tp = tcp_sk(sk); in tcp_tsq_write() local
784 if (tp->lost_out > tp->retrans_out && in tcp_tsq_write()
785 tp->snd_cwnd > tcp_packets_in_flight(tp)) { in tcp_tsq_write()
786 tcp_mstamp_refresh(tp); in tcp_tsq_write()
790 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
800 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
807 * transferring tsq->head because tcp_wfree() might
816 struct tcp_sock *tp; in tcp_tasklet_func() local
820 list_splice_init(&tsq->head, &list); in tcp_tasklet_func()
824 tp = list_entry(q, struct tcp_sock, tsq_node); in tcp_tasklet_func()
825 list_del(&tp->tsq_node); in tcp_tasklet_func()
827 sk = (struct sock *)tp; in tcp_tasklet_func()
829 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tasklet_func()
841 * tcp_release_cb - tcp release_sock() callback
853 flags = sk->sk_tsq_flags; in tcp_release_cb()
857 } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); in tcp_release_cb()
867 * 3) socket owned by us (sk->sk_lock.owned == 1) in tcp_release_cb()
883 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
896 INIT_LIST_HEAD(&tsq->head); in tcp_tasklet_init()
897 tasklet_init(&tsq->tasklet, in tcp_tasklet_init()
910 struct sock *sk = skb->sk; in tcp_wfree()
911 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree() local
917 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
922 * - less callbacks to tcp_write_xmit(), reducing stress (batches) in tcp_wfree()
923 * - chance for incoming ACK (processed by another cpu maybe) in tcp_wfree()
924 * to migrate this flow (skb->ooo_okay will be eventually set) in tcp_wfree()
926 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
929 for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { in tcp_wfree()
937 nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); in tcp_wfree()
944 empty = list_empty(&tsq->head); in tcp_wfree()
945 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
947 tasklet_schedule(&tsq->tasklet); in tcp_wfree()
960 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); in tcp_pace_kick() local
961 struct sock *sk = (struct sock *)tp; in tcp_pace_kick()
976 rate = sk->sk_pacing_rate; in tcp_internal_pacing()
980 len_ns = (u64)skb->len * NSEC_PER_SEC; in tcp_internal_pacing()
982 hrtimer_start(&tcp_sk(sk)->pacing_timer, in tcp_internal_pacing()
988 static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) in tcp_update_skb_after_send() argument
990 skb->skb_mstamp = tp->tcp_mstamp; in tcp_update_skb_after_send()
991 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_update_skb_after_send()
1010 struct tcp_sock *tp; in __tcp_transmit_skb() local
1020 tp = tcp_sk(sk); in __tcp_transmit_skb()
1023 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq in __tcp_transmit_skb()
1024 - tp->snd_una; in __tcp_transmit_skb()
1035 return -ENOBUFS; in __tcp_transmit_skb()
1037 skb->skb_mstamp = tp->tcp_mstamp; in __tcp_transmit_skb()
1043 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) in __tcp_transmit_skb()
1054 * TODO: Ideally, in-flight pure ACK packets should not matter here. in __tcp_transmit_skb()
1055 * One way to get this would be to set skb->truesize = 2 on them. in __tcp_transmit_skb()
1057 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in __tcp_transmit_skb()
1064 skb->pfmemalloc = 0; in __tcp_transmit_skb()
1070 skb->sk = sk; in __tcp_transmit_skb()
1071 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; in __tcp_transmit_skb()
1073 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1075 skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); in __tcp_transmit_skb()
1078 th = (struct tcphdr *)skb->data; in __tcp_transmit_skb()
1079 th->source = inet->inet_sport; in __tcp_transmit_skb()
1080 th->dest = inet->inet_dport; in __tcp_transmit_skb()
1081 th->seq = htonl(tcb->seq); in __tcp_transmit_skb()
1082 th->ack_seq = htonl(rcv_nxt); in __tcp_transmit_skb()
1084 tcb->tcp_flags); in __tcp_transmit_skb()
1086 th->check = 0; in __tcp_transmit_skb()
1087 th->urg_ptr = 0; in __tcp_transmit_skb()
1090 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in __tcp_transmit_skb()
1091 if (before(tp->snd_up, tcb->seq + 0x10000)) { in __tcp_transmit_skb()
1092 th->urg_ptr = htons(tp->snd_up - tcb->seq); in __tcp_transmit_skb()
1093 th->urg = 1; in __tcp_transmit_skb()
1094 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in __tcp_transmit_skb()
1095 th->urg_ptr = htons(0xFFFF); in __tcp_transmit_skb()
1096 th->urg = 1; in __tcp_transmit_skb()
1100 tcp_options_write((__be32 *)(th + 1), tp, &opts); in __tcp_transmit_skb()
1101 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1102 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { in __tcp_transmit_skb()
1103 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1109 th->window = htons(min(tp->rcv_wnd, 65535U)); in __tcp_transmit_skb()
1115 tp->af_specific->calc_md5_hash(opts.hash_location, in __tcp_transmit_skb()
1120 icsk->icsk_af_ops->send_check(sk, skb); in __tcp_transmit_skb()
1122 if (likely(tcb->tcp_flags & TCPHDR_ACK)) in __tcp_transmit_skb()
1125 if (skb->len != tcp_header_size) { in __tcp_transmit_skb()
1126 tcp_event_data_sent(tp, sk); in __tcp_transmit_skb()
1127 tp->data_segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1128 tp->bytes_sent += skb->len - tcp_header_size; in __tcp_transmit_skb()
1132 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1136 tp->segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1137 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ in __tcp_transmit_skb()
1138 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in __tcp_transmit_skb()
1139 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); in __tcp_transmit_skb()
1142 skb->tstamp = 0; in __tcp_transmit_skb()
1145 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in __tcp_transmit_skb()
1148 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1155 tcp_update_skb_after_send(tp, oskb); in __tcp_transmit_skb()
1165 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1175 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb() local
1178 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb()
1181 sk->sk_wmem_queued += skb->truesize; in tcp_queue_skb()
1182 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1188 if (skb->len <= mss_now) { in tcp_set_skb_tso_segs()
1190 * non-TSO case. in tcp_set_skb_tso_segs()
1193 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_set_skb_tso_segs()
1195 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); in tcp_set_skb_tso_segs()
1196 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1205 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount() local
1207 tp->packets_out -= decr; in tcp_adjust_pcount()
1209 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1210 tp->sacked_out -= decr; in tcp_adjust_pcount()
1211 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1212 tp->retrans_out -= decr; in tcp_adjust_pcount()
1213 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1214 tp->lost_out -= decr; in tcp_adjust_pcount()
1217 if (tcp_is_reno(tp) && decr > 0) in tcp_adjust_pcount()
1218 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1220 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1221 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1222 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_adjust_pcount()
1223 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1225 tcp_verify_left_out(tp); in tcp_adjust_pcount()
1230 return TCP_SKB_CB(skb)->txstamp_ack || in tcp_has_tx_tstamp()
1231 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); in tcp_has_tx_tstamp()
1239 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1241 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1243 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1244 shinfo2->tx_flags |= tsflags; in tcp_fragment_tstamp()
1245 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1246 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; in tcp_fragment_tstamp()
1247 TCP_SKB_CB(skb)->txstamp_ack = 0; in tcp_fragment_tstamp()
1253 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; in tcp_skb_fragment_eor()
1254 TCP_SKB_CB(skb)->eor = 0; in tcp_skb_fragment_eor()
1264 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1266 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1278 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment() local
1285 if (WARN_ON(len > skb->len)) in tcp_fragment()
1286 return -EINVAL; in tcp_fragment()
1288 nsize = skb_headlen(skb) - len; in tcp_fragment()
1297 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); in tcp_fragment()
1298 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1303 return -ENOMEM; in tcp_fragment()
1307 return -ENOMEM; in tcp_fragment()
1312 return -ENOMEM; /* We'll just try again later. */ in tcp_fragment()
1314 sk->sk_wmem_queued += buff->truesize; in tcp_fragment()
1315 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1316 nlen = skb->len - len - nsize; in tcp_fragment()
1317 buff->truesize += nlen; in tcp_fragment()
1318 skb->truesize -= nlen; in tcp_fragment()
1321 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1322 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1323 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1326 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1327 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1328 TCP_SKB_CB(buff)->tcp_flags = flags; in tcp_fragment()
1329 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1334 buff->ip_summed = CHECKSUM_PARTIAL; in tcp_fragment()
1336 buff->tstamp = skb->tstamp; in tcp_fragment()
1346 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; in tcp_fragment()
1349 * adjust the various packet counters. in tcp_fragment()
1351 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1352 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1363 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); in tcp_fragment()
1379 len -= eat; in __pskb_trim_head()
1386 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1387 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1391 eat -= size; in __pskb_trim_head()
1393 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
1395 shinfo->frags[k].page_offset += eat; in __pskb_trim_head()
1396 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
1402 shinfo->nr_frags = k; in __pskb_trim_head()
1404 skb->data_len -= len; in __pskb_trim_head()
1405 skb->len = skb->data_len; in __pskb_trim_head()
1415 return -ENOMEM; in tcp_trim_head()
1419 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1420 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_trim_head()
1423 skb->truesize -= delta_truesize; in tcp_trim_head()
1424 sk->sk_wmem_queued -= delta_truesize; in tcp_trim_head()
1429 /* Any change of skb->len requires recalculation of tso factor. */ in tcp_trim_head()
1439 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss() local
1444 It is MMS_S - sizeof(tcphdr) of rfc1122 in __tcp_mtu_to_mss()
1446 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1449 if (icsk->icsk_af_ops->net_frag_header_len) { in __tcp_mtu_to_mss()
1453 mss_now -= icsk->icsk_af_ops->net_frag_header_len; in __tcp_mtu_to_mss()
1457 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1458 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1461 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1464 mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); in __tcp_mtu_to_mss()
1472 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1473 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1479 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu() local
1484 tp->tcp_header_len + in tcp_mss_to_mtu()
1485 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1486 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1489 if (icsk->icsk_af_ops->net_frag_header_len) { in tcp_mss_to_mtu()
1493 mtu += icsk->icsk_af_ops->net_frag_header_len; in tcp_mss_to_mtu()
1502 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init() local
1506 icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; in tcp_mtup_init()
1507 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1508 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1509 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); in tcp_mtup_init()
1510 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1511 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1512 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtup_init()
1518 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1521 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1525 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1527 tp->mss_cache is current effective sending mss, including
1530 tp->rx_opt.mss_clamp.
1535 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1536 are READ ONLY outside this function. --ANK (980731)
1540 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss() local
1544 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1545 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
1548 mss_now = tcp_bound_to_half_wnd(tp, mss_now); in tcp_sync_mss()
1551 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
1552 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
1553 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1554 tp->mss_cache = mss_now; in tcp_sync_mss()
1565 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss() local
1572 mss_now = tp->mss_cache; in tcp_current_mss()
1576 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1582 /* The mss_cache is sized based on tp->tcp_header_len, which assumes in tcp_current_mss()
1585 * we have to adjust mss_now correspondingly */ in tcp_current_mss()
1586 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1587 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1588 mss_now -= delta; in tcp_current_mss()
1594 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1600 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited() local
1602 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1603 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1605 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1606 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1607 if (win_used < tp->snd_cwnd) { in tcp_cwnd_application_limited()
1608 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1609 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; in tcp_cwnd_application_limited()
1611 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1613 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_application_limited()
1618 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
1619 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate() local
1622 * window, and remember whether we were cwnd-limited then. in tcp_cwnd_validate()
1624 if (!before(tp->snd_una, tp->max_packets_seq) || in tcp_cwnd_validate()
1625 tp->packets_out > tp->max_packets_out) { in tcp_cwnd_validate()
1626 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1627 tp->max_packets_seq = tp->snd_nxt; in tcp_cwnd_validate()
1628 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1633 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1634 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_validate()
1637 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1638 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1640 if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && in tcp_cwnd_validate()
1641 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1642 !ca_ops->cong_control) in tcp_cwnd_validate()
1652 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
1653 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
1654 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
1660 static bool tcp_minshall_check(const struct tcp_sock *tp) in tcp_minshall_check() argument
1662 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1663 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1667 * Note that a TSO packet might end with a sub-mss segment
1669 * if ((skb->len % mss) != 0)
1670 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1672 * skb_pcount = skb->len / mss_now
1674 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, in tcp_minshall_update() argument
1677 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1678 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1688 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, in tcp_nagle_check() argument
1693 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
1704 bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift, in tcp_tso_autosize()
1705 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); in tcp_tso_autosize()
1722 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
1725 min_tso = ca_ops->min_tso_segs ? in tcp_tso_segs()
1726 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
1727 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs; in tcp_tso_segs()
1730 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
1740 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point() local
1743 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
1749 needed = min(skb->len, window); in tcp_mss_split_point()
1759 if (tcp_nagle_check(partial != 0, tp, nonagle)) in tcp_mss_split_point()
1760 return needed - partial; in tcp_mss_split_point()
1768 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, in tcp_cwnd_test() argument
1774 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in tcp_cwnd_test()
1778 in_flight = tcp_packets_in_flight(tp); in tcp_cwnd_test()
1779 cwnd = tp->snd_cwnd; in tcp_cwnd_test()
1787 return min(halfcwnd, cwnd - in_flight); in tcp_cwnd_test()
1809 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
1822 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
1825 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
1832 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, in tcp_snd_wnd_test() argument
1836 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
1838 if (skb->len > cur_mss) in tcp_snd_wnd_test()
1839 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
1841 return !after(end_seq, tcp_wnd_end(tp)); in tcp_snd_wnd_test()
1848 * know that all the data is in scatter-gather pages, and that the
1856 int nlen = skb->len - len; in tso_fragment()
1860 if (skb->len != skb->data_len) in tso_fragment()
1865 return -ENOMEM; in tso_fragment()
1867 sk->sk_wmem_queued += buff->truesize; in tso_fragment()
1868 sk_mem_charge(sk, buff->truesize); in tso_fragment()
1869 buff->truesize += nlen; in tso_fragment()
1870 skb->truesize -= nlen; in tso_fragment()
1873 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
1874 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
1875 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
1878 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
1879 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
1880 TCP_SKB_CB(buff)->tcp_flags = flags; in tso_fragment()
1883 TCP_SKB_CB(buff)->sacked = 0; in tso_fragment()
1887 buff->ip_summed = CHECKSUM_PARTIAL; in tso_fragment()
1914 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer() local
1918 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
1924 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0) in tcp_tso_should_defer()
1927 in_flight = tcp_packets_in_flight(tp); in tcp_tso_should_defer()
1930 BUG_ON(tp->snd_cwnd <= in_flight); in tcp_tso_should_defer()
1932 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
1935 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
1939 /* If a full-sized TSO skb can be sent, do it. */ in tcp_tso_should_defer()
1940 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
1944 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1947 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
1949 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); in tcp_tso_should_defer()
1963 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
1971 age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); in tcp_tso_should_defer()
1973 if (age < (tp->srtt_us >> 4)) in tcp_tso_should_defer()
1978 * 1) We are cwnd-limited in tcp_tso_should_defer()
1979 * 2) We are rwnd-limited in tcp_tso_should_defer()
1983 if (cong_win <= skb->len) { in tcp_tso_should_defer()
1988 if (send_win <= skb->len) { in tcp_tso_should_defer()
1995 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_tso_should_defer()
2007 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe() local
2012 interval = net->ipv4.sysctl_tcp_probe_interval; in tcp_mtu_check_reprobe()
2013 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
2018 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
2019 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2021 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
2022 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2025 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_check_reprobe()
2035 if (len <= skb->len) in tcp_can_coalesce_send_queue_head()
2038 if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) in tcp_can_coalesce_send_queue_head()
2041 len -= skb->len; in tcp_can_coalesce_send_queue_head()
2054 * -1 otherwise
2059 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe() local
2073 if (likely(!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
2074 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
2075 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2076 tp->snd_cwnd < 11 || in tcp_mtu_probe()
2077 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) in tcp_mtu_probe()
2078 return -1; in tcp_mtu_probe()
2081 * and current mss_clamp. if (search_high - search_low) in tcp_mtu_probe()
2085 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2086 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
2087 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
2088 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
2093 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2094 interval < net->ipv4.sysctl_tcp_probe_threshold) { in tcp_mtu_probe()
2099 return -1; in tcp_mtu_probe()
2103 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
2104 return -1; in tcp_mtu_probe()
2106 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
2107 return -1; in tcp_mtu_probe()
2108 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
2112 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { in tcp_mtu_probe()
2113 if (!tcp_packets_in_flight(tp)) in tcp_mtu_probe()
2114 return -1; in tcp_mtu_probe()
2120 return -1; in tcp_mtu_probe()
2125 return -1; in tcp_mtu_probe()
2126 sk->sk_wmem_queued += nskb->truesize; in tcp_mtu_probe()
2127 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2131 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
2132 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
2133 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; in tcp_mtu_probe()
2134 TCP_SKB_CB(nskb)->sacked = 0; in tcp_mtu_probe()
2135 nskb->csum = 0; in tcp_mtu_probe()
2136 nskb->ip_summed = CHECKSUM_PARTIAL; in tcp_mtu_probe()
2143 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
2146 if (skb->len <= copy) { in tcp_mtu_probe()
2149 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_mtu_probe()
2153 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; in tcp_mtu_probe()
2158 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
2160 if (!skb_shinfo(skb)->nr_frags) { in tcp_mtu_probe()
2166 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2174 tcp_init_tso_segs(nskb, nskb->len); in tcp_mtu_probe()
2177 * be resegmented into mss-sized pieces by tcp_write_xmit(). in tcp_mtu_probe()
2182 tp->snd_cwnd--; in tcp_mtu_probe()
2185 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2186 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2187 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2192 return -1; in tcp_mtu_probe()
2198 hrtimer_is_queued(&tcp_sk(sk)->pacing_timer); in tcp_pacing_check()
2205 * - better RTT estimation and ACK scheduling
2206 * - faster recovery
2207 * - high rates
2217 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift); in tcp_small_queue_check()
2219 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); in tcp_small_queue_check()
2222 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2231 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2237 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2243 static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) in tcp_chrono_set() argument
2246 enum tcp_chrono old = tp->chrono_type; in tcp_chrono_set()
2249 tp->chrono_stat[old - 1] += now - tp->chrono_start; in tcp_chrono_set()
2250 tp->chrono_start = now; in tcp_chrono_set()
2251 tp->chrono_type = new; in tcp_chrono_set()
2256 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_start() local
2263 if (type > tp->chrono_type) in tcp_chrono_start()
2264 tcp_chrono_set(tp, type); in tcp_chrono_start()
2269 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_stop() local
2280 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); in tcp_chrono_stop()
2281 else if (type == tp->chrono_type) in tcp_chrono_stop()
2282 tcp_chrono_set(tp, TCP_CHRONO_BUSY); in tcp_chrono_stop()
2290 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2302 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit() local
2312 tcp_mstamp_refresh(tp); in tcp_write_xmit()
2333 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2335 tcp_update_skb_after_send(tp, skb); in tcp_write_xmit()
2339 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
2348 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { in tcp_write_xmit()
2354 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2366 if (tso_segs > 1 && !tcp_urg_mode(tp)) in tcp_write_xmit()
2373 if (skb->len > limit && in tcp_write_xmit()
2383 * We do not want to send a pure-ack packet and have in tcp_write_xmit()
2386 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) in tcp_write_xmit()
2398 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2412 tp->prr_out += sent_pkts; in tcp_write_xmit()
2417 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); in tcp_write_xmit()
2421 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2427 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe() local
2434 if (tp->fastopen_rsk) in tcp_schedule_loss_probe()
2437 early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; in tcp_schedule_loss_probe()
2442 !tp->packets_out || !tcp_is_sack(tp) || in tcp_schedule_loss_probe()
2443 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()
2444 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()
2451 if (tp->srtt_us) { in tcp_schedule_loss_probe()
2452 timeout = usecs_to_jiffies(tp->srtt_us >> 2); in tcp_schedule_loss_probe()
2453 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2463 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
2493 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe() local
2499 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2502 tp->tlp_retrans = 0; in tcp_send_loss_probe()
2504 if (skb && tcp_snd_wnd_test(tp, skb, mss)) { in tcp_send_loss_probe()
2505 pcount = tp->packets_out; in tcp_send_loss_probe()
2507 if (tp->packets_out > pcount) in tcp_send_loss_probe()
2511 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
2513 WARN_ONCE(tp->packets_out, in tcp_send_loss_probe()
2515 tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); in tcp_send_loss_probe()
2516 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2527 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2529 (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2541 tp->tlp_retrans = 1; in tcp_send_loss_probe()
2545 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
2549 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2565 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2580 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2582 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
2594 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2600 * since header prediction assumes th->window stays fixed.
2602 * Strictly speaking, keeping th->window fixed violates the receiver
2634 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2640 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window() local
2645 * fluctuations. --SAW 1998/11/1 in __tcp_select_window()
2647 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
2650 int full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
2659 icsk->icsk_ack.quick = 0; in __tcp_select_window()
2662 tp->rcv_ssthresh = min(tp->rcv_ssthresh, in __tcp_select_window()
2663 4U * tp->advmss); in __tcp_select_window()
2668 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
2671 * of the maximum allowed, try to move to zero-window, else in __tcp_select_window()
2681 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
2682 free_space = tp->rcv_ssthresh; in __tcp_select_window()
2687 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
2694 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
2696 window = tp->rcv_wnd; in __tcp_select_window()
2705 if (window <= free_space - mss || window > free_space) in __tcp_select_window()
2723 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_skb_collapse_tstamp()
2724 shinfo->tskey = next_shinfo->tskey; in tcp_skb_collapse_tstamp()
2725 TCP_SKB_CB(skb)->txstamp_ack |= in tcp_skb_collapse_tstamp()
2726 TCP_SKB_CB(next_skb)->txstamp_ack; in tcp_skb_collapse_tstamp()
2733 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans() local
2737 next_skb_size = next_skb->len; in tcp_collapse_retrans()
2751 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
2754 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
2759 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
2760 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; in tcp_collapse_retrans()
2763 tcp_clear_retrans_hints_partial(tp); in tcp_collapse_retrans()
2764 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
2765 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
2783 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
2795 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse() local
2799 if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) in tcp_retrans_try_collapse()
2801 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
2811 space -= skb->len; in tcp_retrans_try_collapse()
2821 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
2830 * state updates are done by the caller. Returns non-zero if an
2836 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb() local
2842 if (icsk->icsk_mtup.probe_size) in __tcp_retransmit_skb()
2843 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
2848 if (refcount_read(&sk->sk_wmem_alloc) > in __tcp_retransmit_skb()
2849 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), in __tcp_retransmit_skb()
2850 sk->sk_sndbuf)) in __tcp_retransmit_skb()
2851 return -EAGAIN; in __tcp_retransmit_skb()
2854 return -EBUSY; in __tcp_retransmit_skb()
2856 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
2857 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { in __tcp_retransmit_skb()
2859 return -EINVAL; in __tcp_retransmit_skb()
2861 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2862 return -ENOMEM; in __tcp_retransmit_skb()
2865 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
2866 return -EHOSTUNREACH; /* Routing failure or similar. */ in __tcp_retransmit_skb()
2875 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && in __tcp_retransmit_skb()
2876 TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
2877 return -EAGAIN; in __tcp_retransmit_skb()
2880 if (skb->len > len) { in __tcp_retransmit_skb()
2883 return -ENOMEM; /* We'll try again later. */ in __tcp_retransmit_skb()
2886 return -ENOMEM; in __tcp_retransmit_skb()
2890 diff -= tcp_skb_pcount(skb); in __tcp_retransmit_skb()
2893 if (skb->len < cur_mss) in __tcp_retransmit_skb()
2898 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) in __tcp_retransmit_skb()
2904 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
2906 tp->total_retrans += segs; in __tcp_retransmit_skb()
2907 tp->bytes_retrans += skb->len; in __tcp_retransmit_skb()
2909 /* make sure skb->data is aligned on arches that require it in __tcp_retransmit_skb()
2910 * and check if ack-trimming & collapsing extended the headroom in __tcp_retransmit_skb()
2913 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
2920 -ENOBUFS; in __tcp_retransmit_skb()
2924 tcp_update_skb_after_send(tp, skb); in __tcp_retransmit_skb()
2931 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) in __tcp_retransmit_skb()
2933 TCP_SKB_CB(skb)->seq, segs, err); in __tcp_retransmit_skb()
2936 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
2938 } else if (err != -EBUSY) { in __tcp_retransmit_skb()
2946 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb() local
2951 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
2955 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
2956 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2959 if (!tp->retrans_stamp) in tcp_retransmit_skb()
2960 tp->retrans_stamp = tcp_skb_timestamp(skb); in tcp_retransmit_skb()
2964 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
2965 tp->undo_retrans = 0; in tcp_retransmit_skb()
2966 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2979 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue() local
2983 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
2987 skb = tp->retransmit_skb_hint ?: rtx_head; in tcp_xmit_retransmit_queue()
2998 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
3000 segs = tp->snd_cwnd - tcp_packets_in_flight(tp); in tcp_xmit_retransmit_queue()
3003 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
3009 if (tp->retrans_out >= tp->lost_out) { in tcp_xmit_retransmit_queue()
3017 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3035 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
3038 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) in tcp_xmit_retransmit_queue()
3040 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
3056 if (size <= sk->sk_forward_alloc) in sk_forced_mem_schedule()
3059 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; in sk_forced_mem_schedule()
3062 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in sk_forced_mem_schedule()
3063 mem_cgroup_charge_skmem(sk->sk_memcg, amt); in sk_forced_mem_schedule()
3072 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin() local
3080 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3084 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; in tcp_send_fin()
3085 TCP_SKB_CB(tskb)->end_seq++; in tcp_send_fin()
3086 tp->write_seq++; in tcp_send_fin()
3090 * We need to set tp->snd_nxt to the value it would have in tcp_send_fin()
3092 * does not change tp->snd_nxt. in tcp_send_fin()
3094 tp->snd_nxt++; in tcp_send_fin()
3098 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
3104 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); in tcp_send_fin()
3106 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3108 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
3118 * by RFC 2525, section 2.17. -DaveM
3148 /* Send a crossed SYN-ACK during socket establishment.
3159 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
3161 return -EFAULT; in tcp_send_synack()
3163 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
3171 return -ENOMEM; in tcp_send_synack()
3172 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); in tcp_send_synack()
3176 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3177 sk->sk_wmem_queued += nskb->truesize; in tcp_send_synack()
3178 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3182 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
3189 * tcp_make_synack - Prepare a SYN-ACK.
3203 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack() local
3231 * sk->sk_wmem_alloc in an atomic, we can promote to rw. in tcp_make_synack()
3238 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_make_synack()
3242 if (unlikely(req->cookie_ts)) in tcp_make_synack()
3243 skb->skb_mstamp = cookie_init_timestamp(req); in tcp_make_synack()
3246 skb->skb_mstamp = tcp_clock_us(); in tcp_make_synack()
3250 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); in tcp_make_synack()
3252 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); in tcp_make_synack()
3259 th = (struct tcphdr *)skb->data; in tcp_make_synack()
3261 th->syn = 1; in tcp_make_synack()
3262 th->ack = 1; in tcp_make_synack()
3264 th->source = htons(ireq->ir_num); in tcp_make_synack()
3265 th->dest = ireq->ir_rmt_port; in tcp_make_synack()
3266 skb->mark = ireq->ir_mark; in tcp_make_synack()
3267 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_make_synack()
3268 th->seq = htonl(tcp_rsk(req)->snt_isn); in tcp_make_synack()
3270 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); in tcp_make_synack()
3273 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); in tcp_make_synack()
3275 th->doff = (tcp_header_size >> 2); in tcp_make_synack()
3279 /* Okay, we have all we need - do the md5 hash if needed */ in tcp_make_synack()
3281 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, in tcp_make_synack()
3287 skb->tstamp = 0; in tcp_make_synack()
3303 if (likely(ca && try_module_get(ca->owner))) { in tcp_ca_dst_init()
3304 module_put(icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
3305 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
3306 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
3315 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init() local
3322 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_connect_init()
3323 if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) in tcp_connect_init()
3324 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_connect_init()
3327 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3328 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; in tcp_connect_init()
3332 if (tp->rx_opt.user_mss) in tcp_connect_init()
3333 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3334 tp->max_window = 0; in tcp_connect_init()
3340 if (!tp->window_clamp) in tcp_connect_init()
3341 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); in tcp_connect_init()
3342 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_connect_init()
3347 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3348 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3349 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3356 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3357 &tp->rcv_wnd, in tcp_connect_init()
3358 &tp->window_clamp, in tcp_connect_init()
3359 sock_net(sk)->ipv4.sysctl_tcp_window_scaling, in tcp_connect_init()
3363 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3364 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3366 sk->sk_err = 0; in tcp_connect_init()
3368 tp->snd_wnd = 0; in tcp_connect_init()
3369 tcp_init_wl(tp, 0); in tcp_connect_init()
3371 tp->snd_una = tp->write_seq; in tcp_connect_init()
3372 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3373 tp->snd_up = tp->write_seq; in tcp_connect_init()
3374 tp->snd_nxt = tp->write_seq; in tcp_connect_init()
3376 if (likely(!tp->repair)) in tcp_connect_init()
3377 tp->rcv_nxt = 0; in tcp_connect_init()
3379 tp->rcv_tstamp = tcp_jiffies32; in tcp_connect_init()
3380 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3381 tp->copied_seq = tp->rcv_nxt; in tcp_connect_init()
3383 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
3384 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3385 tcp_clear_retrans(tp); in tcp_connect_init()
3390 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb() local
3393 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
3395 sk->sk_wmem_queued += skb->truesize; in tcp_connect_queue_skb()
3396 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3397 tp->write_seq = tcb->end_seq; in tcp_connect_queue_skb()
3398 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3402 * queue a data-only packet after the regular SYN, such that regular SYNs
3403 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3410 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data() local
3411 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3415 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3416 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3419 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and in tcp_send_syn_data()
3420 * user-MSS. Reserve maximum option space for middleboxes that add in tcp_send_syn_data()
3423 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); in tcp_send_syn_data()
3425 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - in tcp_send_syn_data()
3428 space = min_t(size_t, space, fo->size); in tcp_send_syn_data()
3430 /* limit to order-0 allocations */ in tcp_send_syn_data()
3433 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); in tcp_send_syn_data()
3436 syn_data->ip_summed = CHECKSUM_PARTIAL; in tcp_send_syn_data()
3437 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); in tcp_send_syn_data()
3440 &fo->data->msg_iter); in tcp_send_syn_data()
3452 if (space == fo->size) in tcp_send_syn_data()
3453 fo->data = NULL; in tcp_send_syn_data()
3454 fo->copied = space; in tcp_send_syn_data()
3457 if (syn_data->len) in tcp_send_syn_data()
3460 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3462 syn->skb_mstamp = syn_data->skb_mstamp; in tcp_send_syn_data()
3469 TCP_SKB_CB(syn_data)->seq++; in tcp_send_syn_data()
3470 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; in tcp_send_syn_data()
3472 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
3473 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
3479 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
3480 tp->packets_out -= tcp_skb_pcount(syn_data); in tcp_send_syn_data()
3484 if (fo->cookie.len > 0) in tcp_send_syn_data()
3485 fo->cookie.len = 0; in tcp_send_syn_data()
3486 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
3488 tp->syn_fastopen = 0; in tcp_send_syn_data()
3490 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ in tcp_send_syn_data()
3497 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect() local
3503 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
3504 return -EHOSTUNREACH; /* Routing failure or similar. */ in tcp_connect()
3508 if (unlikely(tp->repair)) { in tcp_connect()
3513 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); in tcp_connect()
3515 return -ENOBUFS; in tcp_connect()
3517 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
3518 tcp_mstamp_refresh(tp); in tcp_connect()
3519 tp->retrans_stamp = tcp_time_stamp(tp); in tcp_connect()
3522 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
3525 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3526 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3527 if (err == -ECONNREFUSED) in tcp_connect()
3530 /* We change tp->snd_nxt after the tcp_transmit_skb() call in tcp_connect()
3533 tp->snd_nxt = tp->write_seq; in tcp_connect()
3534 tp->pushed_seq = tp->write_seq; in tcp_connect()
3537 tp->snd_nxt = TCP_SKB_CB(buff)->seq; in tcp_connect()
3538 tp->pushed_seq = TCP_SKB_CB(buff)->seq; in tcp_connect()
3544 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
3556 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
3560 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack() local
3563 if (icsk->icsk_ack.pingpong || in tcp_send_delayed_ack()
3564 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
3570 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements in tcp_send_delayed_ack()
3573 if (tp->srtt_us) { in tcp_send_delayed_ack()
3574 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
3588 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
3592 if (icsk->icsk_ack.blocked || in tcp_send_delayed_ack()
3593 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { in tcp_send_delayed_ack()
3598 if (!time_before(timeout, icsk->icsk_ack.timeout)) in tcp_send_delayed_ack()
3599 timeout = icsk->icsk_ack.timeout; in tcp_send_delayed_ack()
3601 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; in tcp_send_delayed_ack()
3602 icsk->icsk_ack.timeout = timeout; in tcp_send_delayed_ack()
3603 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3612 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
3623 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
3646 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); in tcp_send_ack()
3656 * Current solution: to send TWO zero-length segments in urgent mode:
3658 * out-of-date with SND.UNA-1 to probe window.
3662 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb() local
3669 return -1; in tcp_xmit_probe_skb()
3677 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
3685 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
3686 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
3695 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup() local
3698 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
3699 return -1; in tcp_write_wakeup()
3702 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
3705 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
3707 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
3708 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
3714 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
3715 skb->len > mss) { in tcp_write_wakeup()
3717 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3720 return -1; in tcp_write_wakeup()
3724 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3730 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
3742 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0() local
3749 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
3751 icsk->icsk_probes_out = 0; in tcp_send_probe0()
3752 icsk->icsk_backoff = 0; in tcp_send_probe0()
3757 if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) in tcp_send_probe0()
3758 icsk->icsk_backoff++; in tcp_send_probe0()
3759 icsk->icsk_probes_out++; in tcp_send_probe0()
3768 if (!icsk->icsk_probes_out) in tcp_send_probe0()
3769 icsk->icsk_probes_out = 1; in tcp_send_probe0()
3779 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; in tcp_rtx_synack()
3783 tcp_rsk(req)->txhash = net_tx_rndhash(); in tcp_rtx_synack()
3784 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); in tcp_rtx_synack()
3789 tcp_sk(sk)->total_retrans++; in tcp_rtx_synack()