• Home
  • Raw
  • Download

Lines Matching refs:tp

326 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)  in tcp_ecn_queue_cwr()  argument
328 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
329 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
346 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
348 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_withdraw_cwr()
353 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
361 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
368 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
371 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
373 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
378 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
389 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack() argument
391 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) in tcp_ecn_rcv_synack()
392 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_synack()
395 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn() argument
397 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) in tcp_ecn_rcv_syn()
398 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_syn()
401 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo() argument
403 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_rcv_ecn_echo()
415 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
423 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + in tcp_sndbuf_expand()
430 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp)); in tcp_sndbuf_expand()
431 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand()
474 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
479 while (tp->rcv_ssthresh <= window) { in __tcp_grow_window()
511 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
514 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; in tcp_grow_window()
525 incr = 2 * tp->advmss; in tcp_grow_window()
531 tp->rcv_ssthresh += min(room, incr); in tcp_grow_window()
543 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
549 tcp_mstamp_refresh(tp); in tcp_init_buffer_space()
550 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_init_buffer_space()
551 tp->rcvq_space.seq = tp->copied_seq; in tcp_init_buffer_space()
555 if (tp->window_clamp >= maxwin) { in tcp_init_buffer_space()
556 tp->window_clamp = maxwin; in tcp_init_buffer_space()
558 if (tcp_app_win && maxwin > 4 * tp->advmss) in tcp_init_buffer_space()
559 tp->window_clamp = max(maxwin - in tcp_init_buffer_space()
561 4 * tp->advmss); in tcp_init_buffer_space()
566 tp->window_clamp > 2 * tp->advmss && in tcp_init_buffer_space()
567 tp->window_clamp + tp->advmss > maxwin) in tcp_init_buffer_space()
568 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); in tcp_init_buffer_space()
570 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space()
571 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_buffer_space()
572 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, in tcp_init_buffer_space()
573 (u32)TCP_INIT_CWND * tp->advmss); in tcp_init_buffer_space()
579 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
595 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window()
607 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
608 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); in tcp_initialize_rcv_mss()
610 hint = min(hint, tp->rcv_wnd / 2); in tcp_initialize_rcv_mss()
629 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update() argument
631 u32 new_sample = tp->rcv_rtt_est.rtt_us; in tcp_rcv_rtt_update()
658 tp->rcv_rtt_est.rtt_us = new_sample; in tcp_rcv_rtt_update()
661 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure() argument
665 if (tp->rcv_rtt_est.time == 0) in tcp_rcv_rtt_measure()
667 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) in tcp_rcv_rtt_measure()
669 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); in tcp_rcv_rtt_measure()
672 tcp_rcv_rtt_update(tp, delta_us, 1); in tcp_rcv_rtt_measure()
675 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure()
676 tp->rcv_rtt_est.time = tp->tcp_mstamp; in tcp_rcv_rtt_measure()
682 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
684 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) in tcp_rcv_rtt_measure_ts()
686 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
690 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
697 tcp_rcv_rtt_update(tp, delta_us, 0); in tcp_rcv_rtt_measure_ts()
708 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
714 tcp_mstamp_refresh(tp); in tcp_rcv_space_adjust()
715 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); in tcp_rcv_space_adjust()
716 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) in tcp_rcv_space_adjust()
720 copied = tp->copied_seq - tp->rcvq_space.seq; in tcp_rcv_space_adjust()
721 if (copied <= tp->rcvq_space.space) in tcp_rcv_space_adjust()
741 rcvwin = ((u64)copied << 1) + 16 * tp->advmss; in tcp_rcv_space_adjust()
744 grow = rcvwin * (copied - tp->rcvq_space.space); in tcp_rcv_space_adjust()
745 do_div(grow, tp->rcvq_space.space); in tcp_rcv_space_adjust()
748 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); in tcp_rcv_space_adjust()
749 while (tcp_win_from_space(sk, rcvmem) < tp->advmss) in tcp_rcv_space_adjust()
752 do_div(rcvwin, tp->advmss); in tcp_rcv_space_adjust()
759 tp->window_clamp = tcp_win_from_space(sk, rcvbuf); in tcp_rcv_space_adjust()
762 tp->rcvq_space.space = copied; in tcp_rcv_space_adjust()
765 tp->rcvq_space.seq = tp->copied_seq; in tcp_rcv_space_adjust()
766 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_rcv_space_adjust()
781 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
789 tcp_rcv_rtt_measure(tp); in tcp_event_data_recv()
836 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
838 u32 srtt = tp->srtt_us; in tcp_rtt_estimator()
861 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
873 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
875 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ in tcp_rtt_estimator()
876 if (tp->mdev_us > tp->mdev_max_us) { in tcp_rtt_estimator()
877 tp->mdev_max_us = tp->mdev_us; in tcp_rtt_estimator()
878 if (tp->mdev_max_us > tp->rttvar_us) in tcp_rtt_estimator()
879 tp->rttvar_us = tp->mdev_max_us; in tcp_rtt_estimator()
881 if (after(tp->snd_una, tp->rtt_seq)) { in tcp_rtt_estimator()
882 if (tp->mdev_max_us < tp->rttvar_us) in tcp_rtt_estimator()
883 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator()
884 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
885 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
892 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ in tcp_rtt_estimator()
893 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
894 tp->mdev_max_us = tp->rttvar_us; in tcp_rtt_estimator()
895 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
899 tp->srtt_us = max(1U, srtt); in tcp_rtt_estimator()
904 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
908 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); in tcp_update_pacing_rate()
918 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2) in tcp_update_pacing_rate()
923 rate *= max(tcp_snd_cwnd(tp), tp->packets_out); in tcp_update_pacing_rate()
925 if (likely(tp->srtt_us)) in tcp_update_pacing_rate()
926 do_div(rate, tp->srtt_us); in tcp_update_pacing_rate()
941 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
952 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
966 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd() argument
972 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
995 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, in tcp_dsack_seen() argument
1005 if (seq_len > tp->max_window) in tcp_dsack_seen()
1007 if (seq_len > tp->mss_cache) in tcp_dsack_seen()
1008 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache); in tcp_dsack_seen()
1009 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen()
1012 tp->dsack_dups += dup_segs; in tcp_dsack_seen()
1014 if (tp->dsack_dups > tp->total_retrans) in tcp_dsack_seen()
1017 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; in tcp_dsack_seen()
1024 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP)) in tcp_dsack_seen()
1025 tp->rack.dsack_seen = 1; in tcp_dsack_seen()
1041 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering() local
1042 const u32 mss = tp->mss_cache; in tcp_check_sack_reordering()
1045 fack = tcp_highest_sack_seq(tp); in tcp_check_sack_reordering()
1050 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering()
1053 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_check_sack_reordering()
1054 tp->reordering, in tcp_check_sack_reordering()
1056 tp->sacked_out, in tcp_check_sack_reordering()
1057 tp->undo_marker ? tp->undo_retrans : 0); in tcp_check_sack_reordering()
1059 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering()
1064 tp->reord_seen++; in tcp_check_sack_reordering()
1074 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
1076 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || in tcp_verify_retransmit_hint()
1077 (tp->retransmit_skb_hint && in tcp_verify_retransmit_hint()
1079 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) in tcp_verify_retransmit_hint()
1080 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
1086 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_notify_skb_loss_event() argument
1088 tp->lost += tcp_skb_pcount(skb); in tcp_notify_skb_loss_event()
1094 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_skb_lost() local
1099 tcp_verify_retransmit_hint(tp, skb); in tcp_mark_skb_lost()
1104 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1107 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1110 tp->lost_out += tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1112 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1117 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, in tcp_count_delivered() argument
1120 tp->delivered += delivered; in tcp_count_delivered()
1122 tp->delivered_ce += delivered; in tcp_count_delivered()
1218 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid() argument
1222 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1226 if (!before(start_seq, tp->snd_nxt)) in tcp_is_sackblock_valid()
1232 if (after(start_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1235 if (!is_dsack || !tp->undo_marker) in tcp_is_sackblock_valid()
1239 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1242 if (!before(start_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1246 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1252 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid()
1259 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1277 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); in tcp_check_dsack()
1286 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_check_dsack()
1288 after(end_seq_0, tp->undo_marker)) in tcp_check_dsack()
1289 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs); in tcp_check_dsack()
1357 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1361 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_sacktag_one()
1362 after(end_seq, tp->undo_marker)) in tcp_sacktag_one()
1363 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); in tcp_sacktag_one()
1370 if (!after(end_seq, tp->snd_una)) in tcp_sacktag_one()
1374 tcp_rack_advance(tp, sacked, end_seq, xmit_time); in tcp_sacktag_one()
1383 tp->lost_out -= pcount; in tcp_sacktag_one()
1384 tp->retrans_out -= pcount; in tcp_sacktag_one()
1392 tcp_highest_sack_seq(tp)) && in tcp_sacktag_one()
1396 if (!after(end_seq, tp->high_seq)) in tcp_sacktag_one()
1405 tp->lost_out -= pcount; in tcp_sacktag_one()
1411 tp->sacked_out += pcount; in tcp_sacktag_one()
1416 if (tp->lost_skb_hint && in tcp_sacktag_one()
1417 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) in tcp_sacktag_one()
1418 tp->lost_cnt_hint += pcount; in tcp_sacktag_one()
1427 tp->retrans_out -= pcount; in tcp_sacktag_one()
1442 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1459 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1460 tp->lost_cnt_hint += pcount; in tcp_shifted_skb()
1492 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1493 tp->retransmit_skb_hint = prev; in tcp_shifted_skb()
1494 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1495 tp->lost_skb_hint = prev; in tcp_shifted_skb()
1496 tp->lost_cnt_hint -= tcp_skb_pcount(prev); in tcp_shifted_skb()
1555 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1569 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1649 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1694 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1753 tcp_highest_sack_seq(tp))) in tcp_sacktag_walk()
1809 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument
1811 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok()
1818 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
1832 state->reord = tp->snd_nxt; in tcp_sacktag_write_queue()
1834 if (!tp->sacked_out) in tcp_sacktag_write_queue()
1844 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) in tcp_sacktag_write_queue()
1847 if (!tp->packets_out) in tcp_sacktag_write_queue()
1858 if (!tcp_is_sackblock_valid(tp, dup_sack, in tcp_sacktag_write_queue()
1864 if (!tp->undo_marker) in tcp_sacktag_write_queue()
1870 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && in tcp_sacktag_write_queue()
1871 !after(sp[used_sacks].end_seq, tp->snd_una)) in tcp_sacktag_write_queue()
1909 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
1911 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue()
1913 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue()
1915 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue()
1930 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue()
1935 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && in tcp_sacktag_write_queue()
1957 if (tcp_highest_sack_seq(tp) == cache->end_seq) { in tcp_sacktag_write_queue()
1972 if (!before(start_seq, tcp_highest_sack_seq(tp))) { in tcp_sacktag_write_queue()
1988 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { in tcp_sacktag_write_queue()
1989 tp->recv_sack_cache[i].start_seq = 0; in tcp_sacktag_write_queue()
1990 tp->recv_sack_cache[i].end_seq = 0; in tcp_sacktag_write_queue()
1993 tp->recv_sack_cache[i++] = sp[j]; in tcp_sacktag_write_queue()
1995 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) in tcp_sacktag_write_queue()
1998 tcp_verify_left_out(tp); in tcp_sacktag_write_queue()
2002 WARN_ON((int)tp->sacked_out < 0); in tcp_sacktag_write_queue()
2003 WARN_ON((int)tp->lost_out < 0); in tcp_sacktag_write_queue()
2004 WARN_ON((int)tp->retrans_out < 0); in tcp_sacktag_write_queue()
2005 WARN_ON((int)tcp_packets_in_flight(tp) < 0); in tcp_sacktag_write_queue()
2013 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked() argument
2017 holes = max(tp->lost_out, 1U); in tcp_limit_reno_sacked()
2018 holes = min(holes, tp->packets_out); in tcp_limit_reno_sacked()
2020 if ((tp->sacked_out + holes) > tp->packets_out) { in tcp_limit_reno_sacked()
2021 tp->sacked_out = tp->packets_out - holes; in tcp_limit_reno_sacked()
2033 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
2035 if (!tcp_limit_reno_sacked(tp)) in tcp_check_reno_reordering()
2038 tp->reordering = min_t(u32, tp->packets_out + addend, in tcp_check_reno_reordering()
2040 tp->reord_seen++; in tcp_check_reno_reordering()
2049 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
2050 u32 prior_sacked = tp->sacked_out; in tcp_add_reno_sack()
2053 tp->sacked_out += num_dupack; in tcp_add_reno_sack()
2055 delivered = tp->sacked_out - prior_sacked; in tcp_add_reno_sack()
2057 tcp_count_delivered(tp, delivered, ece_ack); in tcp_add_reno_sack()
2058 tcp_verify_left_out(tp); in tcp_add_reno_sack()
2066 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
2070 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1), in tcp_remove_reno_sacks()
2072 if (acked - 1 >= tp->sacked_out) in tcp_remove_reno_sacks()
2073 tp->sacked_out = 0; in tcp_remove_reno_sacks()
2075 tp->sacked_out -= acked - 1; in tcp_remove_reno_sacks()
2078 tcp_verify_left_out(tp); in tcp_remove_reno_sacks()
2081 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack() argument
2083 tp->sacked_out = 0; in tcp_reset_reno_sack()
2086 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans() argument
2088 tp->retrans_out = 0; in tcp_clear_retrans()
2089 tp->lost_out = 0; in tcp_clear_retrans()
2090 tp->undo_marker = 0; in tcp_clear_retrans()
2091 tp->undo_retrans = -1; in tcp_clear_retrans()
2092 tp->sacked_out = 0; in tcp_clear_retrans()
2095 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo() argument
2097 tp->undo_marker = tp->snd_una; in tcp_init_undo()
2099 tp->undo_retrans = tp->retrans_out ? : -1; in tcp_init_undo()
2114 struct tcp_sock *tp = tcp_sk(sk); in tcp_timeout_mark_lost() local
2122 tp->sacked_out = 0; in tcp_timeout_mark_lost()
2124 tp->is_sack_reneg = 1; in tcp_timeout_mark_lost()
2125 } else if (tcp_is_reno(tp)) { in tcp_timeout_mark_lost()
2126 tcp_reset_reno_sack(tp); in tcp_timeout_mark_lost()
2134 tcp_rack_skb_timeout(tp, skb, 0) > 0) in tcp_timeout_mark_lost()
2138 tcp_verify_left_out(tp); in tcp_timeout_mark_lost()
2139 tcp_clear_all_retrans_hints(tp); in tcp_timeout_mark_lost()
2146 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
2155 !after(tp->high_seq, tp->snd_una) || in tcp_enter_loss()
2157 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
2158 tp->prior_cwnd = tcp_snd_cwnd(tp); in tcp_enter_loss()
2159 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
2161 tcp_init_undo(tp); in tcp_enter_loss()
2163 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1); in tcp_enter_loss()
2164 tp->snd_cwnd_cnt = 0; in tcp_enter_loss()
2165 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_enter_loss()
2172 tp->sacked_out >= reordering) in tcp_enter_loss()
2173 tp->reordering = min_t(unsigned int, tp->reordering, in tcp_enter_loss()
2177 tp->high_seq = tp->snd_nxt; in tcp_enter_loss()
2178 tcp_ecn_queue_cwr(tp); in tcp_enter_loss()
2184 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) && in tcp_enter_loss()
2203 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2204 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), in tcp_check_sack_reneging()
2226 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) in tcp_dupack_heuristics() argument
2228 return tp->sacked_out + 1; in tcp_dupack_heuristics()
2330 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover() local
2333 if (tp->lost_out) in tcp_time_to_recover()
2337 if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering) in tcp_time_to_recover()
2350 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost() local
2354 const u32 loss_high = tp->snd_nxt; in tcp_mark_head_lost()
2356 WARN_ON(packets > tp->packets_out); in tcp_mark_head_lost()
2357 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2360 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) in tcp_mark_head_lost()
2362 cnt = tp->lost_cnt_hint; in tcp_mark_head_lost()
2371 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2372 tp->lost_cnt_hint = cnt; in tcp_mark_head_lost()
2389 tcp_verify_left_out(tp); in tcp_mark_head_lost()
2396 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard() local
2398 if (tcp_is_sack(tp)) { in tcp_update_scoreboard()
2399 int sacked_upto = tp->sacked_out - tp->reordering; in tcp_update_scoreboard()
2407 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) in tcp_tsopt_ecr_before() argument
2409 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_tsopt_ecr_before()
2410 before(tp->rx_opt.rcv_tsecr, when); in tcp_tsopt_ecr_before()
2416 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, in tcp_skb_spurious_retrans() argument
2420 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); in tcp_skb_spurious_retrans()
2426 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed() argument
2428 return tp->retrans_stamp && in tcp_packet_delayed()
2429 tcp_tsopt_ecr_before(tp, tp->retrans_stamp); in tcp_packet_delayed()
2450 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2453 if (tp->retrans_out) in tcp_any_retrans_done()
2466 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2473 tcp_snd_cwnd(tp), tcp_left_out(tp), in DBGUNDO()
2474 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2475 tp->packets_out); in DBGUNDO()
2482 tcp_snd_cwnd(tp), tcp_left_out(tp), in DBGUNDO()
2483 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2484 tp->packets_out); in DBGUNDO()
2492 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2500 tp->lost_out = 0; in tcp_undo_cwnd_reduction()
2501 tcp_clear_all_retrans_hints(tp); in tcp_undo_cwnd_reduction()
2504 if (tp->prior_ssthresh) { in tcp_undo_cwnd_reduction()
2507 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); in tcp_undo_cwnd_reduction()
2509 if (tp->prior_ssthresh > tp->snd_ssthresh) { in tcp_undo_cwnd_reduction()
2510 tp->snd_ssthresh = tp->prior_ssthresh; in tcp_undo_cwnd_reduction()
2511 tcp_ecn_withdraw_cwr(tp); in tcp_undo_cwnd_reduction()
2514 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_undo_cwnd_reduction()
2515 tp->undo_marker = 0; in tcp_undo_cwnd_reduction()
2516 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ in tcp_undo_cwnd_reduction()
2519 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo() argument
2521 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); in tcp_may_undo()
2526 struct tcp_sock *tp = tcp_sk(sk); in tcp_is_non_sack_preventing_reopen() local
2528 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_is_non_sack_preventing_reopen()
2533 tp->retrans_stamp = 0; in tcp_is_non_sack_preventing_reopen()
2542 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2544 if (tcp_may_undo(tp)) { in tcp_try_undo_recovery()
2558 } else if (tp->rack.reo_wnd_persist) { in tcp_try_undo_recovery()
2559 tp->rack.reo_wnd_persist--; in tcp_try_undo_recovery()
2564 tp->is_sack_reneg = 0; in tcp_try_undo_recovery()
2571 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2573 if (tp->undo_marker && !tp->undo_retrans) { in tcp_try_undo_dsack()
2574 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, in tcp_try_undo_dsack()
2575 tp->rack.reo_wnd_persist + 1); in tcp_try_undo_dsack()
2587 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2589 if (frto_undo || tcp_may_undo(tp)) { in tcp_try_undo_loss()
2600 if (frto_undo || tcp_is_sack(tp)) { in tcp_try_undo_loss()
2602 tp->is_sack_reneg = 0; in tcp_try_undo_loss()
2620 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2622 tp->high_seq = tp->snd_nxt; in tcp_init_cwnd_reduction()
2623 tp->tlp_high_seq = 0; in tcp_init_cwnd_reduction()
2624 tp->snd_cwnd_cnt = 0; in tcp_init_cwnd_reduction()
2625 tp->prior_cwnd = tcp_snd_cwnd(tp); in tcp_init_cwnd_reduction()
2626 tp->prr_delivered = 0; in tcp_init_cwnd_reduction()
2627 tp->prr_out = 0; in tcp_init_cwnd_reduction()
2628 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2629 tcp_ecn_queue_cwr(tp); in tcp_init_cwnd_reduction()
2634 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2636 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); in tcp_cwnd_reduction()
2638 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) in tcp_cwnd_reduction()
2641 tp->prr_delivered += newly_acked_sacked; in tcp_cwnd_reduction()
2643 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + in tcp_cwnd_reduction()
2644 tp->prior_cwnd - 1; in tcp_cwnd_reduction()
2645 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction()
2648 max_t(int, tp->prr_delivered - tp->prr_out, in tcp_cwnd_reduction()
2654 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); in tcp_cwnd_reduction()
2655 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt); in tcp_cwnd_reduction()
2660 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2666 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && in tcp_end_cwnd_reduction()
2667 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2668 tcp_snd_cwnd_set(tp, tp->snd_ssthresh); in tcp_end_cwnd_reduction()
2669 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_end_cwnd_reduction()
2677 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2679 tp->prior_ssthresh = 0; in tcp_enter_cwr()
2681 tp->undo_marker = 0; in tcp_enter_cwr()
2690 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2693 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2698 tp->high_seq = tp->snd_nxt; in tcp_try_keep_open()
2704 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2706 tcp_verify_left_out(tp); in tcp_try_to_open()
2709 tp->retrans_stamp = 0; in tcp_try_to_open()
2730 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2734 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2736 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache); in tcp_mtup_probe_success()
2739 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val)); in tcp_mtup_probe_success()
2741 tp->snd_cwnd_cnt = 0; in tcp_mtup_probe_success()
2742 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_mtup_probe_success()
2743 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2758 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2772 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT) in tcp_simple_retransmit()
2782 tcp_clear_retrans_hints_partial(tp); in tcp_simple_retransmit()
2784 if (!tp->lost_out) in tcp_simple_retransmit()
2787 if (tcp_is_reno(tp)) in tcp_simple_retransmit()
2788 tcp_limit_reno_sacked(tp); in tcp_simple_retransmit()
2790 tcp_verify_left_out(tp); in tcp_simple_retransmit()
2798 tp->high_seq = tp->snd_nxt; in tcp_simple_retransmit()
2799 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2800 tp->prior_ssthresh = 0; in tcp_simple_retransmit()
2801 tp->undo_marker = 0; in tcp_simple_retransmit()
2810 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2813 if (tcp_is_reno(tp)) in tcp_enter_recovery()
2820 tp->prior_ssthresh = 0; in tcp_enter_recovery()
2821 tcp_init_undo(tp); in tcp_enter_recovery()
2825 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2837 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
2838 bool recovered = !before(tp->snd_una, tp->high_seq); in tcp_process_loss()
2840 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) && in tcp_process_loss()
2844 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ in tcp_process_loss()
2852 if (after(tp->snd_nxt, tp->high_seq)) { in tcp_process_loss()
2854 tp->frto = 0; /* Step 3.a. loss was real */ in tcp_process_loss()
2856 tp->high_seq = tp->snd_nxt; in tcp_process_loss()
2862 after(tcp_wnd_end(tp), tp->snd_nxt)) { in tcp_process_loss()
2866 tp->frto = 0; in tcp_process_loss()
2875 if (tcp_is_reno(tp)) { in tcp_process_loss()
2879 if (after(tp->snd_nxt, tp->high_seq) && num_dupack) in tcp_process_loss()
2882 tcp_reset_reno_sack(tp); in tcp_process_loss()
2889 struct tcp_sock *tp = tcp_sk(sk); in tcp_force_fast_retransmit() local
2891 return after(tcp_highest_sack_seq(tp), in tcp_force_fast_retransmit()
2892 tp->snd_una + tp->reordering * tp->mss_cache); in tcp_force_fast_retransmit()
2899 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
2901 if (tp->undo_marker && tcp_packet_delayed(tp)) { in tcp_try_undo_partial()
2912 if (tp->retrans_out) in tcp_try_undo_partial()
2916 tp->retrans_stamp = 0; in tcp_try_undo_partial()
2931 struct tcp_sock *tp = tcp_sk(sk); in tcp_identify_packet_loss() local
2936 if (unlikely(tcp_is_reno(tp))) { in tcp_identify_packet_loss()
2939 u32 prior_retrans = tp->retrans_out; in tcp_identify_packet_loss()
2943 if (prior_retrans > tp->retrans_out) in tcp_identify_packet_loss()
2964 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
2970 if (!tp->packets_out && tp->sacked_out) in tcp_fastretrans_alert()
2971 tp->sacked_out = 0; in tcp_fastretrans_alert()
2976 tp->prior_ssthresh = 0; in tcp_fastretrans_alert()
2983 tcp_verify_left_out(tp); in tcp_fastretrans_alert()
2988 WARN_ON(tp->retrans_out != 0 && !tp->syn_data); in tcp_fastretrans_alert()
2989 tp->retrans_stamp = 0; in tcp_fastretrans_alert()
2990 } else if (!before(tp->snd_una, tp->high_seq)) { in tcp_fastretrans_alert()
2995 if (tp->snd_una != tp->high_seq) { in tcp_fastretrans_alert()
3002 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
3003 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
3015 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
3042 if (tcp_is_reno(tp)) { in tcp_fastretrans_alert()
3044 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
3060 tp->snd_una == tp->mtu_probe.probe_seq_start) { in tcp_fastretrans_alert()
3063 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_fastretrans_alert()
3081 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min() local
3083 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { in tcp_update_rtt_min()
3090 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, in tcp_update_rtt_min()
3098 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
3114 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_ack_update_rtt()
3116 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; in tcp_ack_update_rtt()
3169 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
3174 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_rearm_rto()
3177 if (!tp->packets_out) { in tcp_rearm_rto()
3205 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3208 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3211 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3251 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3252 u32 prior_sacked = tp->sacked_out; in tcp_clean_rtx_queue()
3253 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ in tcp_clean_rtx_queue()
3273 if (after(scb->end_seq, tp->snd_una)) { in tcp_clean_rtx_queue()
3275 !after(tp->snd_una, scb->seq)) in tcp_clean_rtx_queue()
3288 tp->retrans_out -= acked_pcount; in tcp_clean_rtx_queue()
3299 if (!after(scb->end_seq, tp->high_seq)) in tcp_clean_rtx_queue()
3304 tp->sacked_out -= acked_pcount; in tcp_clean_rtx_queue()
3305 } else if (tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3306 tcp_count_delivered(tp, acked_pcount, ece_ack); in tcp_clean_rtx_queue()
3307 if (!tcp_skb_spurious_retrans(tp, skb)) in tcp_clean_rtx_queue()
3308 tcp_rack_advance(tp, sacked, scb->end_seq, in tcp_clean_rtx_queue()
3312 tp->lost_out -= acked_pcount; in tcp_clean_rtx_queue()
3314 tp->packets_out -= acked_pcount; in tcp_clean_rtx_queue()
3329 tp->retrans_stamp = 0; in tcp_clean_rtx_queue()
3338 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3339 tp->retransmit_skb_hint = NULL; in tcp_clean_rtx_queue()
3340 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3341 tp->lost_skb_hint = NULL; in tcp_clean_rtx_queue()
3349 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) in tcp_clean_rtx_queue()
3350 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue()
3359 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); in tcp_clean_rtx_queue()
3360 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); in tcp_clean_rtx_queue()
3362 if (pkts_acked == 1 && last_in_flight < tp->mss_cache && in tcp_clean_rtx_queue()
3364 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue()
3374 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue()
3375 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue()
3383 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { in tcp_clean_rtx_queue()
3387 if (tcp_is_reno(tp)) { in tcp_clean_rtx_queue()
3405 delta = prior_sacked - tp->sacked_out; in tcp_clean_rtx_queue()
3406 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); in tcp_clean_rtx_queue()
3409 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, in tcp_clean_rtx_queue()
3427 WARN_ON((int)tp->sacked_out < 0); in tcp_clean_rtx_queue()
3428 WARN_ON((int)tp->lost_out < 0); in tcp_clean_rtx_queue()
3429 WARN_ON((int)tp->retrans_out < 0); in tcp_clean_rtx_queue()
3430 if (!tp->packets_out && tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3432 if (tp->lost_out) { in tcp_clean_rtx_queue()
3434 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3435 tp->lost_out = 0; in tcp_clean_rtx_queue()
3437 if (tp->sacked_out) { in tcp_clean_rtx_queue()
3439 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3440 tp->sacked_out = 0; in tcp_clean_rtx_queue()
3442 if (tp->retrans_out) { in tcp_clean_rtx_queue()
3444 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3445 tp->retrans_out = 0; in tcp_clean_rtx_queue()
3456 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3461 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3526 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window() argument
3530 return after(ack, tp->snd_una) || in tcp_may_update_window()
3531 after(ack_seq, tp->snd_wl1) || in tcp_may_update_window()
3532 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); in tcp_may_update_window()
3536 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3538 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3540 sock_owned_by_me((struct sock *)tp); in tcp_snd_una_update()
3541 tp->bytes_acked += delta; in tcp_snd_una_update()
3542 tp->snd_una = ack; in tcp_snd_una_update()
3546 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update() argument
3548 u32 delta = seq - tp->rcv_nxt; in tcp_rcv_nxt_update()
3550 sock_owned_by_me((struct sock *)tp); in tcp_rcv_nxt_update()
3551 tp->bytes_received += delta; in tcp_rcv_nxt_update()
3552 WRITE_ONCE(tp->rcv_nxt, seq); in tcp_rcv_nxt_update()
3563 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3568 nwin <<= tp->rx_opt.snd_wscale; in tcp_ack_update_window()
3570 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { in tcp_ack_update_window()
3572 tcp_update_wl(tp, ack_seq); in tcp_ack_update_window()
3574 if (tp->snd_wnd != nwin) { in tcp_ack_update_window()
3575 tp->snd_wnd = nwin; in tcp_ack_update_window()
3580 tp->pred_flags = 0; in tcp_ack_update_window()
3586 if (nwin > tp->max_window) { in tcp_ack_update_window()
3587 tp->max_window = nwin; in tcp_ack_update_window()
3593 tcp_snd_una_update(tp, ack); in tcp_ack_update_window()
3646 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3653 &tp->last_oow_ack_time)) in tcp_send_challenge_ack()
3673 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent() argument
3675 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; in tcp_store_ts_recent()
3676 tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); in tcp_store_ts_recent()
3679 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent() argument
3681 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { in tcp_replace_ts_recent()
3689 if (tcp_paws_check(&tp->rx_opt, 0)) in tcp_replace_ts_recent()
3690 tcp_store_ts_recent(tp); in tcp_replace_ts_recent()
3699 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3701 if (before(ack, tp->tlp_high_seq)) in tcp_process_tlp_ack()
3704 if (!tp->tlp_retrans) { in tcp_process_tlp_ack()
3706 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3709 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3710 } else if (after(ack, tp->tlp_high_seq)) { in tcp_process_tlp_ack()
3723 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3741 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery() local
3749 if (after(tp->snd_nxt, tp->high_seq)) in tcp_xmit_recovery()
3751 tp->frto = 0; in tcp_xmit_recovery()
3760 struct tcp_sock *tp = tcp_sk(sk); in tcp_newly_delivered() local
3763 delivered = tp->delivered - prior_delivered; in tcp_newly_delivered()
3775 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
3778 u32 prior_snd_una = tp->snd_una; in tcp_ack()
3779 bool is_sack_reneg = tp->is_sack_reneg; in tcp_ack()
3783 int prior_packets = tp->packets_out; in tcp_ack()
3784 u32 delivered = tp->delivered; in tcp_ack()
3785 u32 lost = tp->lost; in tcp_ack()
3803 max_window = min_t(u64, tp->max_window, tp->bytes_acked); in tcp_ack()
3816 if (after(ack, tp->snd_nxt)) in tcp_ack()
3830 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; in tcp_ack()
3831 rs.prior_in_flight = tcp_packets_in_flight(tp); in tcp_ack()
3837 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3845 tcp_update_wl(tp, ack_seq); in tcp_ack()
3846 tcp_snd_una_update(tp, ack); in tcp_ack()
3866 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3872 tcp_count_delivered(tp, sack_state.sack_delivered, in tcp_ack()
3895 tp->rcv_tstamp = tcp_jiffies32; in tcp_ack()
3905 if (tp->tlp_high_seq) in tcp_ack()
3928 lost = tp->lost - lost; /* freshly marked lost */ in tcp_ack()
3948 if (tp->tlp_high_seq) in tcp_ack()
4175 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp() argument
4181 tp->rx_opt.saw_tstamp = 1; in tcp_parse_aligned_timestamp()
4183 tp->rx_opt.rcv_tsval = ntohl(*ptr); in tcp_parse_aligned_timestamp()
4186 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; in tcp_parse_aligned_timestamp()
4188 tp->rx_opt.rcv_tsecr = 0; in tcp_parse_aligned_timestamp()
4199 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options() argument
4205 tp->rx_opt.saw_tstamp = 0; in tcp_fast_parse_options()
4207 } else if (tp->rx_opt.tstamp_ok && in tcp_fast_parse_options()
4209 if (tcp_parse_aligned_timestamp(tp, th)) in tcp_fast_parse_options()
4213 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
4214 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_fast_parse_options()
4215 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_fast_parse_options()
4280 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack() local
4286 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
4289 ack == tp->snd_una && in tcp_disordered_ack()
4292 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && in tcp_disordered_ack()
4295 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
4301 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard() local
4303 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && in tcp_paws_discard()
4320 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) in tcp_sequence() argument
4322 return !before(end_seq, tp->rcv_wup) && in tcp_sequence()
4323 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); in tcp_sequence()
4376 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4426 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_fin()
4427 if (tcp_is_sack(tp)) in tcp_fin()
4428 tcp_sack_reset(&tp->rx_opt); in tcp_fin()
4458 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4460 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_dsack_set()
4463 if (before(seq, tp->rcv_nxt)) in tcp_dsack_set()
4470 tp->rx_opt.dsack = 1; in tcp_dsack_set()
4471 tp->duplicate_sack[0].start_seq = seq; in tcp_dsack_set()
4472 tp->duplicate_sack[0].end_seq = end_seq; in tcp_dsack_set()
4478 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4480 if (!tp->rx_opt.dsack) in tcp_dsack_extend()
4483 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); in tcp_dsack_extend()
4500 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4503 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4507 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_send_dupack()
4511 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4512 end_seq = tp->rcv_nxt; in tcp_send_dupack()
4523 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce() argument
4526 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_maybe_coalesce()
4532 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { in tcp_sack_maybe_coalesce()
4539 tp->rx_opt.num_sacks--; in tcp_sack_maybe_coalesce()
4540 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) in tcp_sack_maybe_coalesce()
4551 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_compress_send_ack() local
4553 if (!tp->compressed_ack) in tcp_sack_compress_send_ack()
4556 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_sack_compress_send_ack()
4564 tp->compressed_ack - 1); in tcp_sack_compress_send_ack()
4566 tp->compressed_ack = 0; in tcp_sack_compress_send_ack()
4578 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4579 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_new_ofo_skb()
4580 int cur_sacks = tp->rx_opt.num_sacks; in tcp_sack_new_ofo_skb()
4594 tcp_sack_maybe_coalesce(tp); in tcp_sack_new_ofo_skb()
4610 tp->rx_opt.num_sacks--; in tcp_sack_new_ofo_skb()
4620 tp->rx_opt.num_sacks++; in tcp_sack_new_ofo_skb()
4625 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove() argument
4627 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_remove()
4628 int num_sacks = tp->rx_opt.num_sacks; in tcp_sack_remove()
4632 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_sack_remove()
4633 tp->rx_opt.num_sacks = 0; in tcp_sack_remove()
4639 if (!before(tp->rcv_nxt, sp->start_seq)) { in tcp_sack_remove()
4643 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); in tcp_sack_remove()
4647 tp->selective_acks[i-1] = tp->selective_acks[i]; in tcp_sack_remove()
4654 tp->rx_opt.num_sacks = num_sacks; in tcp_sack_remove()
4739 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
4740 __u32 dsack_high = tp->rcv_nxt; in tcp_ofo_queue()
4745 p = rb_first(&tp->out_of_order_queue); in tcp_ofo_queue()
4748 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4758 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue()
4760 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { in tcp_ofo_queue()
4767 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4806 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
4822 tp->pred_flags = 0; in tcp_data_queue_ofo()
4825 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs); in tcp_data_queue_ofo()
4830 p = &tp->out_of_order_queue.rb_node; in tcp_data_queue_ofo()
4831 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue_ofo()
4833 if (tcp_is_sack(tp)) { in tcp_data_queue_ofo()
4834 tp->rx_opt.num_sacks = 1; in tcp_data_queue_ofo()
4835 tp->selective_acks[0].start_seq = seq; in tcp_data_queue_ofo()
4836 tp->selective_acks[0].end_seq = end_seq; in tcp_data_queue_ofo()
4839 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4840 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
4847 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, in tcp_data_queue_ofo()
4853 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4860 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { in tcp_data_queue_ofo()
4861 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo()
4893 &tp->out_of_order_queue); in tcp_data_queue_ofo()
4911 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4923 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4931 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
4934 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4941 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5025 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
5044 tp->rx_opt.dsack = 0; in tcp_data_queue()
5050 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
5051 if (tcp_receive_window(tp) == 0) { in tcp_data_queue()
5072 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue()
5078 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_data_queue()
5082 if (tp->rx_opt.num_sacks) in tcp_data_queue()
5083 tcp_sack_remove(tp); in tcp_data_queue()
5094 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
5109 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_data_queue()
5112 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
5114 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
5119 if (!tcp_receive_window(tp)) { in tcp_data_queue()
5289 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
5294 skb = skb_rb_first(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5297 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5316 tcp_collapse(sk, NULL, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
5347 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
5351 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_prune_ofo_queue()
5356 node = &tp->ooo_last_skb->rbnode; in tcp_prune_ofo_queue()
5359 rb_erase(node, &tp->out_of_order_queue); in tcp_prune_ofo_queue()
5371 tp->ooo_last_skb = rb_to_skb(prev); in tcp_prune_ofo_queue()
5378 if (tp->rx_opt.sack_ok) in tcp_prune_ofo_queue()
5379 tcp_sack_reset(&tp->rx_opt); in tcp_prune_ofo_queue()
5392 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
5399 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); in tcp_prune_queue()
5409 tp->copied_seq, tp->rcv_nxt); in tcp_prune_queue()
5430 tp->pred_flags = 0; in tcp_prune_queue()
5436 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
5453 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)) in tcp_should_expand_sndbuf()
5461 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
5465 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_new_space()
5504 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
5508 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
5514 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || in __tcp_ack_snd_check()
5515 __tcp_select_window(sk) >= tp->rcv_wnd)) || in __tcp_ack_snd_check()
5525 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in __tcp_ack_snd_check()
5530 if (!tcp_is_sack(tp) || in __tcp_ack_snd_check()
5531 tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) in __tcp_ack_snd_check()
5534 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { in __tcp_ack_snd_check()
5535 tp->compressed_ack_rcv_nxt = tp->rcv_nxt; in __tcp_ack_snd_check()
5536 tp->dup_ack_counter = 0; in __tcp_ack_snd_check()
5538 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) { in __tcp_ack_snd_check()
5539 tp->dup_ack_counter++; in __tcp_ack_snd_check()
5542 tp->compressed_ack++; in __tcp_ack_snd_check()
5543 if (hrtimer_is_queued(&tp->compressed_ack_timer)) in __tcp_ack_snd_check()
5548 rtt = tp->rcv_rtt_est.rtt_us; in __tcp_ack_snd_check()
5549 if (tp->srtt_us && tp->srtt_us < rtt) in __tcp_ack_snd_check()
5550 rtt = tp->srtt_us; in __tcp_ack_snd_check()
5556 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), in __tcp_ack_snd_check()
5582 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
5590 if (after(tp->copied_seq, ptr)) in tcp_check_urg()
5603 if (before(ptr, tp->rcv_nxt)) in tcp_check_urg()
5607 if (tp->urg_data && !after(ptr, tp->urg_seq)) in tcp_check_urg()
5628 if (tp->urg_seq == tp->copied_seq && tp->urg_data && in tcp_check_urg()
5629 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
5631 tp->copied_seq++; in tcp_check_urg()
5632 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
5638 tp->urg_data = TCP_URG_NOTYET; in tcp_check_urg()
5639 WRITE_ONCE(tp->urg_seq, ptr); in tcp_check_urg()
5642 tp->pred_flags = 0; in tcp_check_urg()
5648 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
5655 if (tp->urg_data == TCP_URG_NOTYET) { in tcp_urg()
5656 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - in tcp_urg()
5664 tp->urg_data = TCP_URG_VALID | tmp; in tcp_urg()
5681 struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check() local
5683 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && in tcp_reset_check()
5694 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
5698 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5699 tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5705 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5713 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_validate_incoming()
5725 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5744 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || in tcp_validate_incoming()
5747 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { in tcp_validate_incoming()
5748 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_validate_incoming()
5752 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; in tcp_validate_incoming()
5770 if (tp->syn_fastopen && !tp->data_segs_in && in tcp_validate_incoming()
5827 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
5833 tcp_mstamp_refresh(tp); in tcp_rcv_established()
5851 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_established()
5862 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && in tcp_rcv_established()
5863 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
5864 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
5865 int tcp_header_len = tp->tcp_header_len; in tcp_rcv_established()
5875 if (!tcp_parse_aligned_timestamp(tp, th)) in tcp_rcv_established()
5879 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) in tcp_rcv_established()
5898 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5899 tcp_store_ts_recent(tp); in tcp_rcv_established()
5911 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_established()
5933 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5934 tcp_store_ts_recent(tp); in tcp_rcv_established()
5946 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
5953 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_established()
6008 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_transfer() local
6020 if (tp->total_retrans > 1 && tp->undo_marker) in tcp_init_transfer()
6021 tcp_snd_cwnd_set(tp, 1); in tcp_init_transfer()
6023 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk))); in tcp_init_transfer()
6024 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_transfer()
6035 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
6052 tp->lsndtime = tcp_jiffies32; in tcp_finish_connect()
6055 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
6057 if (!tp->rx_opt.snd_wscale) in tcp_finish_connect()
6058 __tcp_fast_path_on(tp, tp->snd_wnd); in tcp_finish_connect()
6060 tp->pred_flags = 0; in tcp_finish_connect()
6066 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
6067 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
6068 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; in tcp_rcv_fastopen_synack()
6071 if (mss == tp->rx_opt.user_mss) { in tcp_rcv_fastopen_synack()
6081 if (!tp->syn_fastopen) { in tcp_rcv_fastopen_synack()
6084 } else if (tp->total_retrans) { in tcp_rcv_fastopen_synack()
6091 } else if (cookie->len < 0 && !tp->syn_data) { in tcp_rcv_fastopen_synack()
6096 try_exp = tp->syn_fastopen_exp ? 2 : 1; in tcp_rcv_fastopen_synack()
6102 if (tp->total_retrans) in tcp_rcv_fastopen_synack()
6103 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED; in tcp_rcv_fastopen_synack()
6105 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED; in tcp_rcv_fastopen_synack()
6113 tp->syn_data_acked = tp->syn_data; in tcp_rcv_fastopen_synack()
6114 if (tp->syn_data_acked) { in tcp_rcv_fastopen_synack()
6117 if (tp->delivered > 1) in tcp_rcv_fastopen_synack()
6118 --tp->delivered; in tcp_rcv_fastopen_synack()
6126 static void smc_check_reset_syn(struct tcp_sock *tp) in smc_check_reset_syn() argument
6130 if (tp->syn_smc && !tp->rx_opt.smc_ok) in smc_check_reset_syn()
6131 tp->syn_smc = 0; in smc_check_reset_syn()
6138 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_spurious_syn() local
6145 syn_stamp = tp->retrans_stamp; in tcp_try_undo_spurious_syn()
6146 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp && in tcp_try_undo_spurious_syn()
6147 syn_stamp == tp->rx_opt.rcv_tsecr) in tcp_try_undo_spurious_syn()
6148 tp->undo_marker = 0; in tcp_try_undo_spurious_syn()
6155 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
6157 int saved_clamp = tp->rx_opt.mss_clamp; in tcp_rcv_synsent_state_process()
6160 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
6161 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_rcv_synsent_state_process()
6162 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_rcv_synsent_state_process()
6173 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
6174 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_synsent_state_process()
6183 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_rcv_synsent_state_process()
6184 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, in tcp_rcv_synsent_state_process()
6185 tcp_time_stamp(tp))) { in tcp_rcv_synsent_state_process()
6221 tcp_ecn_rcv_synack(tp, th); in tcp_rcv_synsent_state_process()
6223 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
6230 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6231 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6236 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6238 if (!tp->rx_opt.wscale_ok) { in tcp_rcv_synsent_state_process()
6239 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; in tcp_rcv_synsent_state_process()
6240 tp->window_clamp = min(tp->window_clamp, 65535U); in tcp_rcv_synsent_state_process()
6243 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6244 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6245 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6247 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_synsent_state_process()
6248 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6250 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6259 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6261 smc_check_reset_syn(tp); in tcp_rcv_synsent_state_process()
6267 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && in tcp_rcv_synsent_state_process()
6313 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && in tcp_rcv_synsent_state_process()
6314 tcp_paws_reject(&tp->rx_opt, 0)) in tcp_rcv_synsent_state_process()
6324 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6325 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6326 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6327 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6330 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6333 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6334 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6335 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6340 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6341 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
6342 tp->max_window = tp->snd_wnd; in tcp_rcv_synsent_state_process()
6344 tcp_ecn_rcv_syn(tp, th); in tcp_rcv_synsent_state_process()
6373 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6374 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6378 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6379 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6385 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synrecv_state_fastopen() local
6391 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out) in tcp_rcv_synrecv_state_fastopen()
6395 tp->retrans_stamp = 0; in tcp_rcv_synrecv_state_fastopen()
6401 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_synrecv_state_fastopen()
6425 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
6463 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6464 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6476 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6477 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6478 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_state_process()
6509 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ in tcp_rcv_state_process()
6510 if (!tp->srtt_us) in tcp_rcv_state_process()
6517 tp->retrans_stamp = 0; in tcp_rcv_state_process()
6520 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_state_process()
6533 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
6534 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; in tcp_rcv_state_process()
6535 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
6537 if (tp->rx_opt.tstamp_ok) in tcp_rcv_state_process()
6538 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_state_process()
6544 tp->lsndtime = tcp_jiffies32; in tcp_rcv_state_process()
6547 tcp_fast_path_on(tp); in tcp_rcv_state_process()
6556 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process()
6570 if (tp->linger2 < 0) { in tcp_rcv_state_process()
6576 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6578 if (tp->syn_fastopen && th->fin) in tcp_rcv_state_process()
6604 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6611 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6627 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_rcv_state_process()
6644 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6845 struct tcp_sock *tp = tcp_sk(sk); in tcp_get_syncookie_mss() local
6860 mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss); in tcp_get_syncookie_mss()
6875 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local
6914 tmp_opt.user_mss = tp->rx_opt.user_mss; in tcp_conn_request()