• Home
  • Raw
  • Download

Lines Matching refs:sk

129 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)  in tcp_measure_rcv_mss()  argument
131 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
162 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss()
175 static void tcp_incr_quickack(struct sock *sk) in tcp_incr_quickack() argument
177 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
178 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
186 static void tcp_enter_quickack_mode(struct sock *sk) in tcp_enter_quickack_mode() argument
188 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
189 tcp_incr_quickack(sk); in tcp_enter_quickack_mode()
198 static inline bool tcp_in_quickack_mode(const struct sock *sk) in tcp_in_quickack_mode() argument
200 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
282 static void tcp_sndbuf_expand(struct sock *sk) in tcp_sndbuf_expand() argument
284 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand()
307 if (sk->sk_sndbuf < sndmem) in tcp_sndbuf_expand()
308 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); in tcp_sndbuf_expand()
337 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) in __tcp_grow_window() argument
339 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window()
346 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
354 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) in tcp_grow_window() argument
356 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window()
360 (int)tp->rcv_ssthresh < tcp_space(sk) && in tcp_grow_window()
361 !sk_under_memory_pressure(sk)) { in tcp_grow_window()
370 incr = __tcp_grow_window(sk, skb); in tcp_grow_window()
376 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
382 static void tcp_fixup_rcvbuf(struct sock *sk) in tcp_fixup_rcvbuf() argument
384 u32 mss = tcp_sk(sk)->advmss; in tcp_fixup_rcvbuf()
396 if (sk->sk_rcvbuf < rcvmem) in tcp_fixup_rcvbuf()
397 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); in tcp_fixup_rcvbuf()
403 void tcp_init_buffer_space(struct sock *sk) in tcp_init_buffer_space() argument
405 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space()
408 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) in tcp_init_buffer_space()
409 tcp_fixup_rcvbuf(sk); in tcp_init_buffer_space()
410 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) in tcp_init_buffer_space()
411 tcp_sndbuf_expand(sk); in tcp_init_buffer_space()
417 maxwin = tcp_full_space(sk); in tcp_init_buffer_space()
439 static void tcp_clamp_window(struct sock *sk) in tcp_clamp_window() argument
441 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window()
442 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
446 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && in tcp_clamp_window()
447 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && in tcp_clamp_window()
448 !sk_under_memory_pressure(sk) && in tcp_clamp_window()
449 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { in tcp_clamp_window()
450 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), in tcp_clamp_window()
453 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window()
464 void tcp_initialize_rcv_mss(struct sock *sk) in tcp_initialize_rcv_mss() argument
466 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss()
473 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
537 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, in tcp_rcv_rtt_measure_ts() argument
540 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts()
543 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts()
551 void tcp_rcv_space_adjust(struct sock *sk) in tcp_rcv_space_adjust() argument
553 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust()
576 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { in tcp_rcv_space_adjust()
603 if (rcvbuf > sk->sk_rcvbuf) { in tcp_rcv_space_adjust()
604 sk->sk_rcvbuf = rcvbuf; in tcp_rcv_space_adjust()
627 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) in tcp_event_data_recv() argument
629 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv()
630 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv()
633 inet_csk_schedule_ack(sk); in tcp_event_data_recv()
635 tcp_measure_rcv_mss(sk, skb); in tcp_event_data_recv()
645 tcp_incr_quickack(sk); in tcp_event_data_recv()
661 tcp_incr_quickack(sk); in tcp_event_data_recv()
662 sk_mem_reclaim(sk); in tcp_event_data_recv()
670 tcp_grow_window(sk, skb); in tcp_event_data_recv()
682 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) in tcp_rtt_estimator() argument
684 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator()
733 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
739 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
752 static void tcp_update_pacing_rate(struct sock *sk) in tcp_update_pacing_rate() argument
754 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate()
769 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, in tcp_update_pacing_rate()
770 sk->sk_max_pacing_rate); in tcp_update_pacing_rate()
776 static void tcp_set_rto(struct sock *sk) in tcp_set_rto() argument
778 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto()
789 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
800 tcp_bound_rto(sk); in tcp_set_rto()
830 static void tcp_update_reordering(struct sock *sk, const int metric, in tcp_update_reordering() argument
833 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_reordering()
849 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_update_reordering()
852 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_update_reordering()
1039 static void tcp_mark_lost_retrans(struct sock *sk) in tcp_mark_lost_retrans() argument
1041 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mark_lost_retrans()
1042 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_lost_retrans()
1053 tcp_for_write_queue(skb, sk) { in tcp_mark_lost_retrans()
1056 if (skb == tcp_send_head(sk)) in tcp_mark_lost_retrans()
1082 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); in tcp_mark_lost_retrans()
1094 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, in tcp_check_dsack() argument
1098 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack()
1106 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); in tcp_check_dsack()
1115 NET_INC_STATS_BH(sock_net(sk), in tcp_check_dsack()
1144 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, in tcp_match_skb_to_sack() argument
1183 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); in tcp_match_skb_to_sack()
1192 static u8 tcp_sacktag_one(struct sock *sk, in tcp_sacktag_one() argument
1198 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one()
1282 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, in tcp_shifted_skb() argument
1287 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb()
1288 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); in tcp_shifted_skb()
1300 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, in tcp_shifted_skb()
1321 skb_shinfo(prev)->gso_type = sk->sk_gso_type; in tcp_shifted_skb()
1335 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); in tcp_shifted_skb()
1352 if (skb == tcp_highest_sack(sk)) in tcp_shifted_skb()
1353 tcp_advance_highest_sack(sk, skb); in tcp_shifted_skb()
1355 tcp_unlink_write_queue(skb, sk); in tcp_shifted_skb()
1356 sk_wmem_free_skb(sk, skb); in tcp_shifted_skb()
1358 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); in tcp_shifted_skb()
1380 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, in tcp_shift_skb_data() argument
1385 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data()
1392 if (!sk_can_gso(sk)) in tcp_shift_skb_data()
1406 if (unlikely(skb == tcp_write_queue_head(sk))) in tcp_shift_skb_data()
1408 prev = tcp_write_queue_prev(sk, skb); in tcp_shift_skb_data()
1484 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) in tcp_shift_skb_data()
1490 if (prev == tcp_write_queue_tail(sk)) in tcp_shift_skb_data()
1492 skb = tcp_write_queue_next(sk, prev); in tcp_shift_skb_data()
1495 (skb == tcp_send_head(sk)) || in tcp_shift_skb_data()
1503 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); in tcp_shift_skb_data()
1514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); in tcp_shift_skb_data()
1518 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_walk() argument
1524 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk()
1527 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_walk()
1531 if (skb == tcp_send_head(sk)) in tcp_sacktag_walk()
1540 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1552 tmp = tcp_shift_skb_data(sk, skb, state, in tcp_sacktag_walk()
1562 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1573 tcp_sacktag_one(sk, in tcp_sacktag_walk()
1584 tcp_advance_highest_sack(sk, skb); in tcp_sacktag_walk()
1595 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_skip() argument
1599 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_skip()
1600 if (skb == tcp_send_head(sk)) in tcp_sacktag_skip()
1612 struct sock *sk, in tcp_maybe_skipping_dsack() argument
1621 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); in tcp_maybe_skipping_dsack()
1622 skb = tcp_sacktag_walk(skb, sk, NULL, state, in tcp_maybe_skipping_dsack()
1636 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, in tcp_sacktag_write_queue() argument
1639 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue()
1660 tcp_highest_sack_reset(sk); in tcp_sacktag_write_queue()
1663 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, in tcp_sacktag_write_queue()
1704 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_sacktag_write_queue()
1730 skb = tcp_write_queue_head(sk); in tcp_sacktag_write_queue()
1765 skb = tcp_sacktag_skip(skb, sk, &state, in tcp_sacktag_write_queue()
1767 skb = tcp_sacktag_walk(skb, sk, next_dup, in tcp_sacktag_write_queue()
1778 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, in tcp_sacktag_write_queue()
1785 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1793 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); in tcp_sacktag_write_queue()
1800 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1805 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); in tcp_sacktag_write_queue()
1808 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, in tcp_sacktag_write_queue()
1823 tcp_mark_lost_retrans(sk); in tcp_sacktag_write_queue()
1828 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) in tcp_sacktag_write_queue()
1829 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); in tcp_sacktag_write_queue()
1864 static void tcp_check_reno_reordering(struct sock *sk, const int addend) in tcp_check_reno_reordering() argument
1866 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering()
1868 tcp_update_reordering(sk, tp->packets_out + addend, 0); in tcp_check_reno_reordering()
1873 static void tcp_add_reno_sack(struct sock *sk) in tcp_add_reno_sack() argument
1875 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack()
1877 tcp_check_reno_reordering(sk, 0); in tcp_add_reno_sack()
1883 static void tcp_remove_reno_sacks(struct sock *sk, int acked) in tcp_remove_reno_sacks() argument
1885 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks()
1894 tcp_check_reno_reordering(sk, acked); in tcp_remove_reno_sacks()
1924 void tcp_enter_loss(struct sock *sk) in tcp_enter_loss() argument
1926 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss()
1927 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss()
1937 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
1938 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1939 tcp_ca_event(sk, CA_EVENT_LOSS); in tcp_enter_loss()
1952 skb = tcp_write_queue_head(sk); in tcp_enter_loss()
1955 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); in tcp_enter_loss()
1961 tcp_for_write_queue(skb, sk) { in tcp_enter_loss()
1962 if (skb == tcp_send_head(sk)) in tcp_enter_loss()
1982 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_enter_loss()
1992 !inet_csk(sk)->icsk_mtup.probe_size; in tcp_enter_loss()
2005 static bool tcp_check_sack_reneging(struct sock *sk, int flag) in tcp_check_sack_reneging() argument
2008 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging()
2012 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_check_sack_reneging()
2044 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) in tcp_pause_early_retransmit() argument
2046 struct tcp_sock *tp = tcp_sk(sk); in tcp_pause_early_retransmit()
2060 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) in tcp_pause_early_retransmit()
2063 inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, in tcp_pause_early_retransmit()
2161 static bool tcp_time_to_recover(struct sock *sk, int flag) in tcp_time_to_recover() argument
2163 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover()
2180 !tcp_may_send_now(sk)) { in tcp_time_to_recover()
2194 tcp_is_sack(tp) && !tcp_send_head(sk)) in tcp_time_to_recover()
2204 !tcp_may_send_now(sk)) in tcp_time_to_recover()
2205 return !tcp_pause_early_retransmit(sk, flag); in tcp_time_to_recover()
2216 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) in tcp_mark_head_lost() argument
2218 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost()
2231 if (mark_head && skb != tcp_write_queue_head(sk)) in tcp_mark_head_lost()
2234 skb = tcp_write_queue_head(sk); in tcp_mark_head_lost()
2238 tcp_for_write_queue_from(skb, sk) { in tcp_mark_head_lost()
2239 if (skb == tcp_send_head(sk)) in tcp_mark_head_lost()
2261 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, in tcp_mark_head_lost()
2278 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) in tcp_update_scoreboard() argument
2280 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard()
2283 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2288 tcp_mark_head_lost(sk, lost, 0); in tcp_update_scoreboard()
2292 tcp_mark_head_lost(sk, sacked_upto, 0); in tcp_update_scoreboard()
2294 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2334 static bool tcp_any_retrans_done(const struct sock *sk) in tcp_any_retrans_done() argument
2336 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done()
2342 skb = tcp_write_queue_head(sk); in tcp_any_retrans_done()
2350 static void DBGUNDO(struct sock *sk, const char *msg) in DBGUNDO() argument
2352 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO()
2353 struct inet_sock *inet = inet_sk(sk); in DBGUNDO()
2355 if (sk->sk_family == AF_INET) { in DBGUNDO()
2364 else if (sk->sk_family == AF_INET6) { in DBGUNDO()
2367 &sk->sk_v6_daddr, ntohs(inet->inet_dport), in DBGUNDO()
2378 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) in tcp_undo_cwnd_reduction() argument
2380 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction()
2385 tcp_for_write_queue(skb, sk) { in tcp_undo_cwnd_reduction()
2386 if (skb == tcp_send_head(sk)) in tcp_undo_cwnd_reduction()
2395 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction()
2398 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2419 static bool tcp_try_undo_recovery(struct sock *sk) in tcp_try_undo_recovery() argument
2421 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery()
2429 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); in tcp_try_undo_recovery()
2430 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_recovery()
2431 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) in tcp_try_undo_recovery()
2436 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_try_undo_recovery()
2443 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_recovery()
2447 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_recovery()
2452 static bool tcp_try_undo_dsack(struct sock *sk) in tcp_try_undo_dsack() argument
2454 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack()
2457 DBGUNDO(sk, "D-SACK"); in tcp_try_undo_dsack()
2458 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_dsack()
2459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); in tcp_try_undo_dsack()
2466 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) in tcp_try_undo_loss() argument
2468 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss()
2471 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_loss()
2473 DBGUNDO(sk, "partial loss"); in tcp_try_undo_loss()
2474 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); in tcp_try_undo_loss()
2476 NET_INC_STATS_BH(sock_net(sk), in tcp_try_undo_loss()
2478 inet_csk(sk)->icsk_retransmits = 0; in tcp_try_undo_loss()
2480 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_loss()
2496 static void tcp_init_cwnd_reduction(struct sock *sk) in tcp_init_cwnd_reduction() argument
2498 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction()
2506 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2510 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, in tcp_cwnd_reduction() argument
2513 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction()
2534 static inline void tcp_end_cwnd_reduction(struct sock *sk) in tcp_end_cwnd_reduction() argument
2536 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction()
2540 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2544 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); in tcp_end_cwnd_reduction()
2548 void tcp_enter_cwr(struct sock *sk) in tcp_enter_cwr() argument
2550 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr()
2553 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_cwr()
2555 tcp_init_cwnd_reduction(sk); in tcp_enter_cwr()
2556 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_enter_cwr()
2560 static void tcp_try_keep_open(struct sock *sk) in tcp_try_keep_open() argument
2562 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open()
2565 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2568 if (inet_csk(sk)->icsk_ca_state != state) { in tcp_try_keep_open()
2569 tcp_set_ca_state(sk, state); in tcp_try_keep_open()
2574 static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) in tcp_try_to_open() argument
2576 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open()
2580 if (!tcp_any_retrans_done(sk)) in tcp_try_to_open()
2584 tcp_enter_cwr(sk); in tcp_try_to_open()
2586 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { in tcp_try_to_open()
2587 tcp_try_keep_open(sk); in tcp_try_to_open()
2589 tcp_cwnd_reduction(sk, prior_unsacked, 0); in tcp_try_to_open()
2593 static void tcp_mtup_probe_failed(struct sock *sk) in tcp_mtup_probe_failed() argument
2595 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed()
2601 static void tcp_mtup_probe_success(struct sock *sk) in tcp_mtup_probe_success() argument
2603 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success()
2604 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success()
2607 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2609 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2613 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2617 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2624 void tcp_simple_retransmit(struct sock *sk) in tcp_simple_retransmit() argument
2626 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit()
2627 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit()
2629 unsigned int mss = tcp_current_mss(sk); in tcp_simple_retransmit()
2632 tcp_for_write_queue(skb, sk) { in tcp_simple_retransmit()
2633 if (skb == tcp_send_head(sk)) in tcp_simple_retransmit()
2662 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2665 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_simple_retransmit()
2667 tcp_xmit_retransmit_queue(sk); in tcp_simple_retransmit()
2671 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) in tcp_enter_recovery() argument
2673 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery()
2681 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_enter_recovery()
2686 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_recovery()
2688 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2689 tcp_init_cwnd_reduction(sk); in tcp_enter_recovery()
2691 tcp_set_ca_state(sk, TCP_CA_Recovery); in tcp_enter_recovery()
2697 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) in tcp_process_loss() argument
2699 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss()
2706 if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) in tcp_process_loss()
2714 __tcp_push_pending_frames(sk, tcp_current_mss(sk), in tcp_process_loss()
2724 tcp_try_undo_recovery(sk); in tcp_process_loss()
2732 tcp_add_reno_sack(sk); in tcp_process_loss()
2736 if (tcp_try_undo_loss(sk, false)) in tcp_process_loss()
2738 tcp_xmit_retransmit_queue(sk); in tcp_process_loss()
2742 static bool tcp_try_undo_partial(struct sock *sk, const int acked, in tcp_try_undo_partial() argument
2745 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial()
2751 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); in tcp_try_undo_partial()
2759 tcp_cwnd_reduction(sk, prior_unsacked, 0); in tcp_try_undo_partial()
2763 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_partial()
2766 DBGUNDO(sk, "partial recovery"); in tcp_try_undo_partial()
2767 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_partial()
2768 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); in tcp_try_undo_partial()
2769 tcp_try_keep_open(sk); in tcp_try_undo_partial()
2786 static void tcp_fastretrans_alert(struct sock *sk, const int acked, in tcp_fastretrans_alert() argument
2790 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert()
2791 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert()
2807 if (tcp_check_sack_reneging(sk, flag)) in tcp_fastretrans_alert()
2824 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2825 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_fastretrans_alert()
2832 if (tcp_try_undo_recovery(sk)) in tcp_fastretrans_alert()
2834 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2844 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2846 if (tcp_try_undo_partial(sk, acked, prior_unsacked)) in tcp_fastretrans_alert()
2852 if (tcp_try_undo_dsack(sk)) { in tcp_fastretrans_alert()
2853 tcp_try_keep_open(sk); in tcp_fastretrans_alert()
2858 tcp_process_loss(sk, flag, is_dupack); in tcp_fastretrans_alert()
2867 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2871 tcp_try_undo_dsack(sk); in tcp_fastretrans_alert()
2873 if (!tcp_time_to_recover(sk, flag)) { in tcp_fastretrans_alert()
2874 tcp_try_to_open(sk, flag, prior_unsacked); in tcp_fastretrans_alert()
2882 tcp_mtup_probe_failed(sk); in tcp_fastretrans_alert()
2885 tcp_simple_retransmit(sk); in tcp_fastretrans_alert()
2890 tcp_enter_recovery(sk, (flag & FLAG_ECE)); in tcp_fastretrans_alert()
2895 tcp_update_scoreboard(sk, fast_rexmit); in tcp_fastretrans_alert()
2896 tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit); in tcp_fastretrans_alert()
2897 tcp_xmit_retransmit_queue(sk); in tcp_fastretrans_alert()
2900 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, in tcp_ack_update_rtt() argument
2903 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt()
2929 tcp_rtt_estimator(sk, seq_rtt_us); in tcp_ack_update_rtt()
2930 tcp_set_rto(sk); in tcp_ack_update_rtt()
2933 inet_csk(sk)->icsk_backoff = 0; in tcp_ack_update_rtt()
2938 static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp) in tcp_synack_rtt_meas() argument
2940 struct tcp_sock *tp = tcp_sk(sk); in tcp_synack_rtt_meas()
2950 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); in tcp_synack_rtt_meas()
2953 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cong_avoid() argument
2955 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid()
2957 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
2958 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; in tcp_cong_avoid()
2964 void tcp_rearm_rto(struct sock *sk) in tcp_rearm_rto() argument
2966 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto()
2967 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto()
2976 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); in tcp_rearm_rto()
2978 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rearm_rto()
2982 struct sk_buff *skb = tcp_write_queue_head(sk); in tcp_rearm_rto()
2991 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, in tcp_rearm_rto()
2999 void tcp_resume_early_retransmit(struct sock *sk) in tcp_resume_early_retransmit() argument
3001 struct tcp_sock *tp = tcp_sk(sk); in tcp_resume_early_retransmit()
3003 tcp_rearm_rto(sk); in tcp_resume_early_retransmit()
3009 tcp_enter_recovery(sk, false); in tcp_resume_early_retransmit()
3010 tcp_update_scoreboard(sk, 1); in tcp_resume_early_retransmit()
3011 tcp_xmit_retransmit_queue(sk); in tcp_resume_early_retransmit()
3015 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) in tcp_tso_acked() argument
3017 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked()
3023 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3035 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, in tcp_ack_tstamp() argument
3041 if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) in tcp_ack_tstamp()
3046 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) in tcp_ack_tstamp()
3047 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); in tcp_ack_tstamp()
3054 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, in tcp_clean_rtx_queue() argument
3057 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3059 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue()
3072 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { in tcp_clean_rtx_queue()
3077 tcp_ack_tstamp(sk, skb, prior_snd_una); in tcp_clean_rtx_queue()
3085 acked_pcount = tcp_tso_acked(sk, skb); in tcp_clean_rtx_queue()
3138 tcp_unlink_write_queue(skb, sk); in tcp_clean_rtx_queue()
3139 sk_wmem_free_skb(sk, skb); in tcp_clean_rtx_queue()
3158 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us); in tcp_clean_rtx_queue()
3162 = inet_csk(sk)->icsk_ca_ops; in tcp_clean_rtx_queue()
3164 tcp_rearm_rto(sk); in tcp_clean_rtx_queue()
3167 tcp_mtup_probe_success(sk); in tcp_clean_rtx_queue()
3171 tcp_remove_reno_sacks(sk, pkts_acked); in tcp_clean_rtx_queue()
3177 tcp_update_reordering(sk, tp->fackets_out - reord, 0); in tcp_clean_rtx_queue()
3187 ca_ops->pkts_acked(sk, pkts_acked, ca_seq_rtt_us); in tcp_clean_rtx_queue()
3195 tcp_rearm_rto(sk); in tcp_clean_rtx_queue()
3203 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3224 static void tcp_ack_probe(struct sock *sk) in tcp_ack_probe() argument
3226 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe()
3227 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe()
3231 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3233 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); in tcp_ack_probe()
3240 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_ack_probe()
3245 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) in tcp_ack_is_dubious() argument
3248 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; in tcp_ack_is_dubious()
3252 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) in tcp_may_raise_cwnd() argument
3254 if (tcp_in_cwnd_reduction(sk)) in tcp_may_raise_cwnd()
3263 if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) in tcp_may_raise_cwnd()
3286 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
3289 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window()
3307 tcp_fast_path_check(sk); in tcp_ack_update_window()
3311 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); in tcp_ack_update_window()
3322 static void tcp_send_challenge_ack(struct sock *sk) in tcp_send_challenge_ack() argument
3340 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); in tcp_send_challenge_ack()
3341 tcp_send_ack(sk); in tcp_send_challenge_ack()
3369 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) in tcp_process_tlp_ack() argument
3371 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack()
3388 tcp_init_cwnd_reduction(sk); in tcp_process_tlp_ack()
3389 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_process_tlp_ack()
3390 tcp_end_cwnd_reduction(sk); in tcp_process_tlp_ack()
3391 tcp_try_keep_open(sk); in tcp_process_tlp_ack()
3392 NET_INC_STATS_BH(sock_net(sk), in tcp_process_tlp_ack()
3398 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) in tcp_in_ack_event() argument
3400 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event()
3403 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3407 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3409 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack()
3410 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack()
3422 prefetchw(sk->sk_write_queue.next); in tcp_ack()
3430 tcp_send_challenge_ack(sk); in tcp_ack()
3444 tcp_rearm_rto(sk); in tcp_ack()
3468 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); in tcp_ack()
3470 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); in tcp_ack()
3477 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); in tcp_ack()
3479 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3482 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3493 tcp_in_ack_event(sk, ack_ev_flags); in tcp_ack()
3499 sk->sk_err_soft = 0; in tcp_ack()
3507 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, in tcp_ack()
3512 if (tcp_may_raise_cwnd(sk, flag)) in tcp_ack()
3513 tcp_cong_avoid(sk, ack, acked); in tcp_ack()
3515 if (tcp_ack_is_dubious(sk, flag)) { in tcp_ack()
3517 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3521 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3524 struct dst_entry *dst = __sk_dst_get(sk); in tcp_ack()
3530 tcp_schedule_loss_probe(sk); in tcp_ack()
3531 tcp_update_pacing_rate(sk); in tcp_ack()
3537 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3543 if (tcp_send_head(sk)) in tcp_ack()
3544 tcp_ack_probe(sk); in tcp_ack()
3547 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3551 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3559 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3561 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3565 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3788 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) in tcp_disordered_ack() argument
3790 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack()
3805 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
3808 static inline bool tcp_paws_discard(const struct sock *sk, in tcp_paws_discard() argument
3811 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard()
3814 !tcp_disordered_ack(sk, skb); in tcp_paws_discard()
3837 void tcp_reset(struct sock *sk) in tcp_reset() argument
3840 switch (sk->sk_state) { in tcp_reset()
3842 sk->sk_err = ECONNREFUSED; in tcp_reset()
3845 sk->sk_err = EPIPE; in tcp_reset()
3850 sk->sk_err = ECONNRESET; in tcp_reset()
3855 if (!sock_flag(sk, SOCK_DEAD)) in tcp_reset()
3856 sk->sk_error_report(sk); in tcp_reset()
3858 tcp_done(sk); in tcp_reset()
3875 static void tcp_fin(struct sock *sk) in tcp_fin() argument
3877 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin()
3880 inet_csk_schedule_ack(sk); in tcp_fin()
3882 sk->sk_shutdown |= RCV_SHUTDOWN; in tcp_fin()
3883 sock_set_flag(sk, SOCK_DONE); in tcp_fin()
3885 switch (sk->sk_state) { in tcp_fin()
3889 tcp_set_state(sk, TCP_CLOSE_WAIT); in tcp_fin()
3890 dst = __sk_dst_get(sk); in tcp_fin()
3892 inet_csk(sk)->icsk_ack.pingpong = 1; in tcp_fin()
3910 tcp_send_ack(sk); in tcp_fin()
3911 tcp_set_state(sk, TCP_CLOSING); in tcp_fin()
3915 tcp_send_ack(sk); in tcp_fin()
3916 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_fin()
3923 __func__, sk->sk_state); in tcp_fin()
3933 sk_mem_reclaim(sk); in tcp_fin()
3935 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_fin()
3936 sk->sk_state_change(sk); in tcp_fin()
3939 if (sk->sk_shutdown == SHUTDOWN_MASK || in tcp_fin()
3940 sk->sk_state == TCP_CLOSE) in tcp_fin()
3941 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); in tcp_fin()
3943 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in tcp_fin()
3960 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_set() argument
3962 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set()
3972 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_dsack_set()
3980 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_extend() argument
3982 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend()
3985 tcp_dsack_set(sk, seq, end_seq); in tcp_dsack_extend()
3990 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) in tcp_send_dupack() argument
3992 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack()
3996 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_send_dupack()
3997 tcp_enter_quickack_mode(sk); in tcp_send_dupack()
4004 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); in tcp_send_dupack()
4008 tcp_send_ack(sk); in tcp_send_dupack()
4039 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) in tcp_sack_new_ofo_skb() argument
4041 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb()
4128 static bool tcp_try_coalesce(struct sock *sk, in tcp_try_coalesce() argument
4144 atomic_add(delta, &sk->sk_rmem_alloc); in tcp_try_coalesce()
4145 sk_mem_charge(sk, delta); in tcp_try_coalesce()
4146 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); in tcp_try_coalesce()
4156 static void tcp_ofo_queue(struct sock *sk) in tcp_ofo_queue() argument
4158 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue()
4171 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); in tcp_ofo_queue()
4176 SOCK_DEBUG(sk, "ofo packet was already received\n"); in tcp_ofo_queue()
4180 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", in tcp_ofo_queue()
4184 tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_ofo_queue()
4185 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); in tcp_ofo_queue()
4188 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_ofo_queue()
4190 tcp_fin(sk); in tcp_ofo_queue()
4196 static bool tcp_prune_ofo_queue(struct sock *sk);
4197 static int tcp_prune_queue(struct sock *sk);
4199 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, in tcp_try_rmem_schedule() argument
4202 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule()
4203 !sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4205 if (tcp_prune_queue(sk) < 0) in tcp_try_rmem_schedule()
4208 if (!sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4209 if (!tcp_prune_ofo_queue(sk)) in tcp_try_rmem_schedule()
4212 if (!sk_rmem_schedule(sk, skb, size)) in tcp_try_rmem_schedule()
4219 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) in tcp_data_queue_ofo() argument
4221 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo()
4227 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { in tcp_data_queue_ofo()
4228 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); in tcp_data_queue_ofo()
4235 inet_csk_schedule_ack(sk); in tcp_data_queue_ofo()
4237 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); in tcp_data_queue_ofo()
4238 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", in tcp_data_queue_ofo()
4260 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { in tcp_data_queue_ofo()
4263 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4292 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4295 tcp_dsack_set(sk, seq, end_seq); in tcp_data_queue_ofo()
4300 tcp_dsack_set(sk, seq, in tcp_data_queue_ofo()
4324 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4329 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4331 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4337 tcp_sack_new_ofo_skb(sk, seq, end_seq); in tcp_data_queue_ofo()
4340 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4341 skb_set_owner_r(skb, sk); in tcp_data_queue_ofo()
4345 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, in tcp_queue_rcv() argument
4349 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_queue_rcv()
4353 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; in tcp_queue_rcv()
4354 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_queue_rcv()
4356 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_queue_rcv()
4357 skb_set_owner_r(skb, sk); in tcp_queue_rcv()
4362 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) in tcp_send_rcvq() argument
4380 &err, sk->sk_allocation); in tcp_send_rcvq()
4388 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_send_rcvq()
4395 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; in tcp_send_rcvq()
4397 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; in tcp_send_rcvq()
4399 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { in tcp_send_rcvq()
4412 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) in tcp_data_queue() argument
4414 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue()
4439 sock_owned_by_user(sk) && !tp->urg_data) { in tcp_data_queue()
4450 tcp_rcv_space_adjust(sk); in tcp_data_queue()
4458 tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_data_queue()
4461 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); in tcp_data_queue()
4465 tcp_event_data_recv(sk, skb); in tcp_data_queue()
4467 tcp_fin(sk); in tcp_data_queue()
4470 tcp_ofo_queue(sk); in tcp_data_queue()
4476 inet_csk(sk)->icsk_ack.pingpong = 0; in tcp_data_queue()
4482 tcp_fast_path_check(sk); in tcp_data_queue()
4486 if (!sock_flag(sk, SOCK_DEAD)) in tcp_data_queue()
4487 sk->sk_data_ready(sk); in tcp_data_queue()
4493 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_data_queue()
4494 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4497 tcp_enter_quickack_mode(sk); in tcp_data_queue()
4498 inet_csk_schedule_ack(sk); in tcp_data_queue()
4508 tcp_enter_quickack_mode(sk); in tcp_data_queue()
4512 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", in tcp_data_queue()
4516 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4526 tcp_data_queue_ofo(sk, skb); in tcp_data_queue()
4529 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, in tcp_collapse_one() argument
4539 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); in tcp_collapse_one()
4553 tcp_collapse(struct sock *sk, struct sk_buff_head *list, in tcp_collapse() argument
4570 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4615 skb_set_owner_r(nskb, sk); in tcp_collapse()
4632 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4645 static void tcp_collapse_ofo_queue(struct sock *sk) in tcp_collapse_ofo_queue() argument
4647 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue()
4671 tcp_collapse(sk, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
4692 static bool tcp_prune_ofo_queue(struct sock *sk) in tcp_prune_ofo_queue() argument
4694 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue()
4698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); in tcp_prune_ofo_queue()
4708 sk_mem_reclaim(sk); in tcp_prune_ofo_queue()
4721 static int tcp_prune_queue(struct sock *sk) in tcp_prune_queue() argument
4723 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue()
4725 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); in tcp_prune_queue()
4727 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); in tcp_prune_queue()
4729 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in tcp_prune_queue()
4730 tcp_clamp_window(sk); in tcp_prune_queue()
4731 else if (sk_under_memory_pressure(sk)) in tcp_prune_queue()
4734 tcp_collapse_ofo_queue(sk); in tcp_prune_queue()
4735 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_prune_queue()
4736 tcp_collapse(sk, &sk->sk_receive_queue, in tcp_prune_queue()
4737 skb_peek(&sk->sk_receive_queue), in tcp_prune_queue()
4740 sk_mem_reclaim(sk); in tcp_prune_queue()
4742 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
4748 tcp_prune_ofo_queue(sk); in tcp_prune_queue()
4750 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
4757 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); in tcp_prune_queue()
4764 static bool tcp_should_expand_sndbuf(const struct sock *sk) in tcp_should_expand_sndbuf() argument
4766 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf()
4771 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in tcp_should_expand_sndbuf()
4775 if (sk_under_memory_pressure(sk)) in tcp_should_expand_sndbuf()
4779 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) in tcp_should_expand_sndbuf()
4795 static void tcp_new_space(struct sock *sk) in tcp_new_space() argument
4797 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space()
4799 if (tcp_should_expand_sndbuf(sk)) { in tcp_new_space()
4800 tcp_sndbuf_expand(sk); in tcp_new_space()
4804 sk->sk_write_space(sk); in tcp_new_space()
4807 static void tcp_check_space(struct sock *sk) in tcp_check_space() argument
4809 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { in tcp_check_space()
4810 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_check_space()
4811 if (sk->sk_socket && in tcp_check_space()
4812 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) in tcp_check_space()
4813 tcp_new_space(sk); in tcp_check_space()
4817 static inline void tcp_data_snd_check(struct sock *sk) in tcp_data_snd_check() argument
4819 tcp_push_pending_frames(sk); in tcp_data_snd_check()
4820 tcp_check_space(sk); in tcp_data_snd_check()
4826 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) in __tcp_ack_snd_check() argument
4828 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check()
4831 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
4835 __tcp_select_window(sk) >= tp->rcv_wnd) || in __tcp_ack_snd_check()
4837 tcp_in_quickack_mode(sk) || in __tcp_ack_snd_check()
4841 tcp_send_ack(sk); in __tcp_ack_snd_check()
4844 tcp_send_delayed_ack(sk); in __tcp_ack_snd_check()
4848 static inline void tcp_ack_snd_check(struct sock *sk) in tcp_ack_snd_check() argument
4850 if (!inet_csk_ack_scheduled(sk)) { in tcp_ack_snd_check()
4854 __tcp_ack_snd_check(sk, 1); in tcp_ack_snd_check()
4867 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) in tcp_check_urg() argument
4869 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg()
4898 sk_send_sigurg(sk); in tcp_check_urg()
4916 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
4917 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_check_urg()
4920 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_check_urg()
4933 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) in tcp_urg() argument
4935 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg()
4939 tcp_check_urg(sk, th); in tcp_urg()
4952 if (!sock_flag(sk, SOCK_DEAD)) in tcp_urg()
4953 sk->sk_data_ready(sk); in tcp_urg()
4958 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) in tcp_copy_to_iovec() argument
4960 struct tcp_sock *tp = tcp_sk(sk); in tcp_copy_to_iovec()
4974 tcp_rcv_space_adjust(sk); in tcp_copy_to_iovec()
4981 static __sum16 __tcp_checksum_complete_user(struct sock *sk, in __tcp_checksum_complete_user() argument
4986 if (sock_owned_by_user(sk)) { in __tcp_checksum_complete_user()
4996 static inline bool tcp_checksum_complete_user(struct sock *sk, in tcp_checksum_complete_user() argument
5000 __tcp_checksum_complete_user(sk, skb); in tcp_checksum_complete_user()
5006 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, in tcp_validate_incoming() argument
5009 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming()
5013 tcp_paws_discard(sk, skb)) { in tcp_validate_incoming()
5015 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_validate_incoming()
5016 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5033 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5047 tcp_reset(sk); in tcp_validate_incoming()
5049 tcp_send_challenge_ack(sk); in tcp_validate_incoming()
5061 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_validate_incoming()
5062 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); in tcp_validate_incoming()
5063 tcp_send_challenge_ack(sk); in tcp_validate_incoming()
5097 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, in tcp_rcv_established() argument
5100 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established()
5102 if (unlikely(sk->sk_rx_dst == NULL)) in tcp_rcv_established()
5103 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_rcv_established()
5172 tcp_ack(sk, skb, 0); in tcp_rcv_established()
5174 tcp_data_snd_check(sk); in tcp_rcv_established()
5177 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5187 sock_owned_by_user(sk)) { in tcp_rcv_established()
5190 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { in tcp_rcv_established()
5201 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5205 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); in tcp_rcv_established()
5210 if (tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5213 if ((int)skb->truesize > sk->sk_forward_alloc) in tcp_rcv_established()
5225 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); in tcp_rcv_established()
5230 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, in tcp_rcv_established()
5234 tcp_event_data_recv(sk, skb); in tcp_rcv_established()
5238 tcp_ack(sk, skb, FLAG_DATA); in tcp_rcv_established()
5239 tcp_data_snd_check(sk); in tcp_rcv_established()
5240 if (!inet_csk_ack_scheduled(sk)) in tcp_rcv_established()
5244 __tcp_ack_snd_check(sk, 0); in tcp_rcv_established()
5248 sk->sk_data_ready(sk); in tcp_rcv_established()
5254 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5264 if (!tcp_validate_incoming(sk, skb, th, 1)) in tcp_rcv_established()
5268 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) in tcp_rcv_established()
5271 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5274 tcp_urg(sk, skb, th); in tcp_rcv_established()
5277 tcp_data_queue(sk, skb); in tcp_rcv_established()
5279 tcp_data_snd_check(sk); in tcp_rcv_established()
5280 tcp_ack_snd_check(sk); in tcp_rcv_established()
5284 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_rcv_established()
5285 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5292 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) in tcp_finish_connect() argument
5294 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect()
5295 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect()
5297 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_finish_connect()
5301 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5302 security_inet_conn_established(sk, skb); in tcp_finish_connect()
5306 icsk->icsk_af_ops->rebuild_header(sk); in tcp_finish_connect()
5308 tcp_init_metrics(sk); in tcp_finish_connect()
5310 tcp_init_congestion_control(sk); in tcp_finish_connect()
5317 tcp_init_buffer_space(sk); in tcp_finish_connect()
5319 if (sock_flag(sk, SOCK_KEEPOPEN)) in tcp_finish_connect()
5320 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5327 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_finish_connect()
5328 sk->sk_state_change(sk); in tcp_finish_connect()
5329 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_finish_connect()
5333 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, in tcp_rcv_fastopen_synack() argument
5336 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack()
5337 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5360 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); in tcp_rcv_fastopen_synack()
5363 tcp_for_write_queue_from(data, sk) { in tcp_rcv_fastopen_synack()
5364 if (data == tcp_send_head(sk) || in tcp_rcv_fastopen_synack()
5365 __tcp_retransmit_skb(sk, data)) in tcp_rcv_fastopen_synack()
5368 tcp_rearm_rto(sk); in tcp_rcv_fastopen_synack()
5369 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); in tcp_rcv_fastopen_synack()
5374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); in tcp_rcv_fastopen_synack()
5378 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_synsent_state_process() argument
5381 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process()
5382 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process()
5406 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); in tcp_rcv_synsent_state_process()
5419 tcp_reset(sk); in tcp_rcv_synsent_state_process()
5443 tcp_ack(sk, skb, FLAG_SLOWPATH); in tcp_rcv_synsent_state_process()
5474 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5475 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5476 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5485 tcp_finish_connect(sk, skb); in tcp_rcv_synsent_state_process()
5488 tcp_rcv_fastopen_synack(sk, skb, &foc)) in tcp_rcv_synsent_state_process()
5491 if (sk->sk_write_pending || in tcp_rcv_synsent_state_process()
5501 inet_csk_schedule_ack(sk); in tcp_rcv_synsent_state_process()
5502 tcp_enter_quickack_mode(sk); in tcp_rcv_synsent_state_process()
5503 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_rcv_synsent_state_process()
5510 tcp_send_ack(sk); in tcp_rcv_synsent_state_process()
5537 tcp_set_state(sk, TCP_SYN_RECV); in tcp_rcv_synsent_state_process()
5561 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5562 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5563 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5565 tcp_send_synack(sk); in tcp_rcv_synsent_state_process()
5605 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_state_process() argument
5608 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process()
5609 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process()
5617 switch (sk->sk_state) { in tcp_rcv_state_process()
5631 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
5657 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); in tcp_rcv_state_process()
5662 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5664 tcp_data_snd_check(sk); in tcp_rcv_state_process()
5670 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_rcv_state_process()
5671 sk->sk_state != TCP_FIN_WAIT1); in tcp_rcv_state_process()
5673 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) in tcp_rcv_state_process()
5680 if (!tcp_validate_incoming(sk, skb, th, 0)) in tcp_rcv_state_process()
5684 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | in tcp_rcv_state_process()
5687 switch (sk->sk_state) { in tcp_rcv_state_process()
5698 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
5702 icsk->icsk_af_ops->rebuild_header(sk); in tcp_rcv_state_process()
5703 tcp_init_congestion_control(sk); in tcp_rcv_state_process()
5705 tcp_mtup_init(sk); in tcp_rcv_state_process()
5707 tcp_init_buffer_space(sk); in tcp_rcv_state_process()
5710 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_rcv_state_process()
5711 sk->sk_state_change(sk); in tcp_rcv_state_process()
5717 if (sk->sk_socket) in tcp_rcv_state_process()
5718 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_rcv_state_process()
5723 tcp_synack_rtt_meas(sk, synack_stamp); in tcp_rcv_state_process()
5737 tcp_rearm_rto(sk); in tcp_rcv_state_process()
5739 tcp_init_metrics(sk); in tcp_rcv_state_process()
5741 tcp_update_pacing_rate(sk); in tcp_rcv_state_process()
5746 tcp_initialize_rcv_mss(sk); in tcp_rcv_state_process()
5769 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
5770 tcp_rearm_rto(sk); in tcp_rcv_state_process()
5775 tcp_set_state(sk, TCP_FIN_WAIT2); in tcp_rcv_state_process()
5776 sk->sk_shutdown |= SEND_SHUTDOWN; in tcp_rcv_state_process()
5778 dst = __sk_dst_get(sk); in tcp_rcv_state_process()
5782 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_rcv_state_process()
5784 sk->sk_state_change(sk); in tcp_rcv_state_process()
5791 tcp_done(sk); in tcp_rcv_state_process()
5792 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
5796 tmo = tcp_fin_time(sk); in tcp_rcv_state_process()
5798 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); in tcp_rcv_state_process()
5799 } else if (th->fin || sock_owned_by_user(sk)) { in tcp_rcv_state_process()
5806 inet_csk_reset_keepalive_timer(sk, tmo); in tcp_rcv_state_process()
5808 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_rcv_state_process()
5816 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_rcv_state_process()
5823 tcp_update_metrics(sk); in tcp_rcv_state_process()
5824 tcp_done(sk); in tcp_rcv_state_process()
5831 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5834 switch (sk->sk_state) { in tcp_rcv_state_process()
5846 if (sk->sk_shutdown & RCV_SHUTDOWN) { in tcp_rcv_state_process()
5849 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
5850 tcp_reset(sk); in tcp_rcv_state_process()
5856 tcp_data_queue(sk, skb); in tcp_rcv_state_process()
5862 if (sk->sk_state != TCP_CLOSE) { in tcp_rcv_state_process()
5863 tcp_data_snd_check(sk); in tcp_rcv_state_process()
5864 tcp_ack_snd_check(sk); in tcp_rcv_state_process()
5925 struct sock *sk, struct sk_buff *skb) in tcp_conn_request() argument
5929 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request()
5943 inet_csk_reqsk_queue_is_full(sk)) && !isn) { in tcp_conn_request()
5944 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); in tcp_conn_request()
5955 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { in tcp_conn_request()
5956 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_conn_request()
5975 tcp_openreq_init(req, &tmp_opt, skb, sk); in tcp_conn_request()
5977 af_ops->init_req(req, sk, skb); in tcp_conn_request()
5979 if (security_inet_conn_request(sk, skb, req)) in tcp_conn_request()
5983 tcp_ecn_create_request(req, skb, sk); in tcp_conn_request()
5986 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); in tcp_conn_request()
6001 dst = af_ops->route_req(sk, &fl, req, &strict); in tcp_conn_request()
6006 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); in tcp_conn_request()
6012 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < in tcp_conn_request()
6031 dst = af_ops->route_req(sk, &fl, req, NULL); in tcp_conn_request()
6037 tcp_openreq_init_rwin(req, sk, dst); in tcp_conn_request()
6039 tcp_try_fastopen(sk, skb, req, &foc, dst); in tcp_conn_request()
6040 err = af_ops->send_synack(sk, dst, &fl, req, in tcp_conn_request()
6047 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); in tcp_conn_request()
6057 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_conn_request()