• Home
  • Raw
  • Download

Lines Matching refs:sk

129 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,  in tcp_gro_dev_warn()  argument
140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); in tcp_gro_dev_warn()
151 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) in tcp_measure_rcv_mss() argument
153 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
165 tcp_sk(sk)->advmss); in tcp_measure_rcv_mss()
169 tcp_gro_dev_warn(sk, skb, len); in tcp_measure_rcv_mss()
189 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss()
202 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) in tcp_incr_quickack() argument
204 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
205 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
214 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) in tcp_enter_quickack_mode() argument
216 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
218 tcp_incr_quickack(sk, max_quickacks); in tcp_enter_quickack_mode()
228 static bool tcp_in_quickack_mode(struct sock *sk) in tcp_in_quickack_mode() argument
230 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
231 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_in_quickack_mode()
254 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) in __tcp_ecn_check_ce() argument
256 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce()
265 tcp_enter_quickack_mode(sk, 2); in __tcp_ecn_check_ce()
268 if (tcp_ca_needs_ecn(sk)) in __tcp_ecn_check_ce()
269 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); in __tcp_ecn_check_ce()
273 tcp_enter_quickack_mode(sk, 2); in __tcp_ecn_check_ce()
279 if (tcp_ca_needs_ecn(sk)) in __tcp_ecn_check_ce()
280 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); in __tcp_ecn_check_ce()
286 static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) in tcp_ecn_check_ce() argument
288 if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) in tcp_ecn_check_ce()
289 __tcp_ecn_check_ce(sk, skb); in tcp_ecn_check_ce()
316 static void tcp_sndbuf_expand(struct sock *sk) in tcp_sndbuf_expand() argument
318 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand()
319 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_sndbuf_expand()
340 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; in tcp_sndbuf_expand()
343 if (sk->sk_sndbuf < sndmem) in tcp_sndbuf_expand()
344 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); in tcp_sndbuf_expand()
373 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) in __tcp_grow_window() argument
375 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window()
382 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
390 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) in tcp_grow_window() argument
392 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window()
395 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; in tcp_grow_window()
398 if (room > 0 && !tcp_under_memory_pressure(sk)) { in tcp_grow_window()
407 incr = __tcp_grow_window(sk, skb); in tcp_grow_window()
412 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
418 static void tcp_fixup_rcvbuf(struct sock *sk) in tcp_fixup_rcvbuf() argument
420 u32 mss = tcp_sk(sk)->advmss; in tcp_fixup_rcvbuf()
432 if (sk->sk_rcvbuf < rcvmem) in tcp_fixup_rcvbuf()
433 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); in tcp_fixup_rcvbuf()
439 void tcp_init_buffer_space(struct sock *sk) in tcp_init_buffer_space() argument
441 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space()
444 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) in tcp_init_buffer_space()
445 tcp_fixup_rcvbuf(sk); in tcp_init_buffer_space()
446 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) in tcp_init_buffer_space()
447 tcp_sndbuf_expand(sk); in tcp_init_buffer_space()
454 maxwin = tcp_full_space(sk); in tcp_init_buffer_space()
476 static void tcp_clamp_window(struct sock *sk) in tcp_clamp_window() argument
478 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window()
479 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
483 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && in tcp_clamp_window()
484 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && in tcp_clamp_window()
485 !tcp_under_memory_pressure(sk) && in tcp_clamp_window()
486 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { in tcp_clamp_window()
487 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), in tcp_clamp_window()
490 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window()
501 void tcp_initialize_rcv_mss(struct sock *sk) in tcp_initialize_rcv_mss() argument
503 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss()
510 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
575 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, in tcp_rcv_rtt_measure_ts() argument
578 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts()
582 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { in tcp_rcv_rtt_measure_ts()
597 void tcp_rcv_space_adjust(struct sock *sk) in tcp_rcv_space_adjust() argument
599 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust()
623 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { in tcp_rcv_space_adjust()
652 if (rcvbuf > sk->sk_rcvbuf) { in tcp_rcv_space_adjust()
653 sk->sk_rcvbuf = rcvbuf; in tcp_rcv_space_adjust()
676 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) in tcp_event_data_recv() argument
678 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv()
679 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv()
682 inet_csk_schedule_ack(sk); in tcp_event_data_recv()
684 tcp_measure_rcv_mss(sk, skb); in tcp_event_data_recv()
694 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); in tcp_event_data_recv()
710 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); in tcp_event_data_recv()
711 sk_mem_reclaim(sk); in tcp_event_data_recv()
716 tcp_ecn_check_ce(sk, skb); in tcp_event_data_recv()
719 tcp_grow_window(sk, skb); in tcp_event_data_recv()
731 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) in tcp_rtt_estimator() argument
733 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator()
782 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
788 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
804 static void tcp_update_pacing_rate(struct sock *sk) in tcp_update_pacing_rate() argument
806 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate()
834 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, in tcp_update_pacing_rate()
835 sk->sk_max_pacing_rate); in tcp_update_pacing_rate()
841 static void tcp_set_rto(struct sock *sk) in tcp_set_rto() argument
843 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto()
854 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
865 tcp_bound_rto(sk); in tcp_set_rto()
895 static void tcp_update_reordering(struct sock *sk, const int metric, in tcp_update_reordering() argument
898 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_reordering()
909 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_update_reordering()
930 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_update_reordering()
1112 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, in tcp_check_dsack() argument
1116 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack()
1124 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); in tcp_check_dsack()
1133 NET_INC_STATS(sock_net(sk), in tcp_check_dsack()
1168 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, in tcp_match_skb_to_sack() argument
1207 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); in tcp_match_skb_to_sack()
1216 static u8 tcp_sacktag_one(struct sock *sk, in tcp_sacktag_one() argument
1222 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one()
1304 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, in tcp_shifted_skb() argument
1309 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb()
1310 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); in tcp_shifted_skb()
1322 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, in tcp_shifted_skb()
1325 tcp_rate_skb_delivered(sk, skb, state->rate); in tcp_shifted_skb()
1354 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); in tcp_shifted_skb()
1372 if (skb == tcp_highest_sack(sk)) in tcp_shifted_skb()
1373 tcp_advance_highest_sack(sk, skb); in tcp_shifted_skb()
1379 tcp_unlink_write_queue(skb, sk); in tcp_shifted_skb()
1380 sk_wmem_free_skb(sk, skb); in tcp_shifted_skb()
1382 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); in tcp_shifted_skb()
1419 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, in tcp_shift_skb_data() argument
1424 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data()
1432 if (!sk_can_gso(sk)) in tcp_shift_skb_data()
1446 if (unlikely(skb == tcp_write_queue_head(sk))) in tcp_shift_skb_data()
1448 prev = tcp_write_queue_prev(sk, skb); in tcp_shift_skb_data()
1527 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) in tcp_shift_skb_data()
1533 if (prev == tcp_write_queue_tail(sk)) in tcp_shift_skb_data()
1535 skb = tcp_write_queue_next(sk, prev); in tcp_shift_skb_data()
1538 (skb == tcp_send_head(sk)) || in tcp_shift_skb_data()
1547 tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0); in tcp_shift_skb_data()
1557 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); in tcp_shift_skb_data()
1561 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_walk() argument
1567 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk()
1570 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_walk()
1574 if (skb == tcp_send_head(sk)) in tcp_sacktag_walk()
1583 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1595 tmp = tcp_shift_skb_data(sk, skb, state, in tcp_sacktag_walk()
1605 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1616 tcp_sacktag_one(sk, in tcp_sacktag_walk()
1624 tcp_rate_skb_delivered(sk, skb, state->rate); in tcp_sacktag_walk()
1628 tcp_advance_highest_sack(sk, skb); in tcp_sacktag_walk()
1639 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_skip() argument
1643 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_skip()
1644 if (skb == tcp_send_head(sk)) in tcp_sacktag_skip()
1656 struct sock *sk, in tcp_maybe_skipping_dsack() argument
1665 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); in tcp_maybe_skipping_dsack()
1666 skb = tcp_sacktag_walk(skb, sk, NULL, state, in tcp_maybe_skipping_dsack()
1680 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, in tcp_sacktag_write_queue() argument
1683 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue()
1702 tcp_highest_sack_reset(sk); in tcp_sacktag_write_queue()
1705 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, in tcp_sacktag_write_queue()
1748 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_sacktag_write_queue()
1777 skb = tcp_write_queue_head(sk); in tcp_sacktag_write_queue()
1812 skb = tcp_sacktag_skip(skb, sk, state, in tcp_sacktag_write_queue()
1814 skb = tcp_sacktag_walk(skb, sk, next_dup, in tcp_sacktag_write_queue()
1825 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, in tcp_sacktag_write_queue()
1832 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1840 skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); in tcp_sacktag_write_queue()
1847 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1852 skb = tcp_sacktag_skip(skb, sk, state, start_seq); in tcp_sacktag_write_queue()
1855 skb = tcp_sacktag_walk(skb, sk, next_dup, state, in tcp_sacktag_write_queue()
1871 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) in tcp_sacktag_write_queue()
1872 tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); in tcp_sacktag_write_queue()
1907 static void tcp_check_reno_reordering(struct sock *sk, const int addend) in tcp_check_reno_reordering() argument
1909 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering()
1911 tcp_update_reordering(sk, tp->packets_out + addend, 0); in tcp_check_reno_reordering()
1916 static void tcp_add_reno_sack(struct sock *sk) in tcp_add_reno_sack() argument
1918 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack()
1922 tcp_check_reno_reordering(sk, 0); in tcp_add_reno_sack()
1930 static void tcp_remove_reno_sacks(struct sock *sk, int acked) in tcp_remove_reno_sacks() argument
1932 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks()
1942 tcp_check_reno_reordering(sk, acked); in tcp_remove_reno_sacks()
1972 void tcp_enter_loss(struct sock *sk) in tcp_enter_loss() argument
1974 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss()
1975 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss()
1976 struct net *net = sock_net(sk); in tcp_enter_loss()
1986 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
1988 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1989 tcp_ca_event(sk, CA_EVENT_LOSS); in tcp_enter_loss()
2002 skb = tcp_write_queue_head(sk); in tcp_enter_loss()
2005 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); in tcp_enter_loss()
2013 tcp_for_write_queue(skb, sk) { in tcp_enter_loss()
2014 if (skb == tcp_send_head(sk)) in tcp_enter_loss()
2037 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_enter_loss()
2047 !inet_csk(sk)->icsk_mtup.probe_size; in tcp_enter_loss()
2060 static bool tcp_check_sack_reneging(struct sock *sk, int flag) in tcp_check_sack_reneging() argument
2063 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging()
2067 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_check_sack_reneging()
2206 static bool tcp_time_to_recover(struct sock *sk, int flag) in tcp_time_to_recover() argument
2208 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover()
2227 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) in tcp_mark_head_lost() argument
2229 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost()
2241 if (mark_head && skb != tcp_write_queue_head(sk)) in tcp_mark_head_lost()
2244 skb = tcp_write_queue_head(sk); in tcp_mark_head_lost()
2248 tcp_for_write_queue_from(skb, sk) { in tcp_mark_head_lost()
2249 if (skb == tcp_send_head(sk)) in tcp_mark_head_lost()
2274 tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0) in tcp_mark_head_lost()
2289 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) in tcp_update_scoreboard() argument
2291 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard()
2294 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2299 tcp_mark_head_lost(sk, lost, 0); in tcp_update_scoreboard()
2303 tcp_mark_head_lost(sk, sacked_upto, 0); in tcp_update_scoreboard()
2305 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2350 static bool tcp_any_retrans_done(const struct sock *sk) in tcp_any_retrans_done() argument
2352 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done()
2358 skb = tcp_write_queue_head(sk); in tcp_any_retrans_done()
2366 static void DBGUNDO(struct sock *sk, const char *msg) in DBGUNDO() argument
2368 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO()
2369 struct inet_sock *inet = inet_sk(sk); in DBGUNDO()
2371 if (sk->sk_family == AF_INET) { in DBGUNDO()
2380 else if (sk->sk_family == AF_INET6) { in DBGUNDO()
2383 &sk->sk_v6_daddr, ntohs(inet->inet_dport), in DBGUNDO()
2394 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) in tcp_undo_cwnd_reduction() argument
2396 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction()
2401 tcp_for_write_queue(skb, sk) { in tcp_undo_cwnd_reduction()
2402 if (skb == tcp_send_head(sk)) in tcp_undo_cwnd_reduction()
2411 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction()
2413 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2430 static bool tcp_try_undo_recovery(struct sock *sk) in tcp_try_undo_recovery() argument
2432 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery()
2440 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); in tcp_try_undo_recovery()
2441 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_recovery()
2442 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) in tcp_try_undo_recovery()
2447 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_try_undo_recovery()
2453 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_recovery()
2457 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_recovery()
2463 static bool tcp_try_undo_dsack(struct sock *sk) in tcp_try_undo_dsack() argument
2465 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack()
2468 DBGUNDO(sk, "D-SACK"); in tcp_try_undo_dsack()
2469 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_dsack()
2470 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); in tcp_try_undo_dsack()
2477 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) in tcp_try_undo_loss() argument
2479 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss()
2482 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_loss()
2484 DBGUNDO(sk, "partial loss"); in tcp_try_undo_loss()
2485 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); in tcp_try_undo_loss()
2487 NET_INC_STATS(sock_net(sk), in tcp_try_undo_loss()
2489 inet_csk(sk)->icsk_retransmits = 0; in tcp_try_undo_loss()
2491 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_loss()
2508 static void tcp_init_cwnd_reduction(struct sock *sk) in tcp_init_cwnd_reduction() argument
2510 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction()
2518 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2522 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag) in tcp_cwnd_reduction() argument
2524 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction()
2549 static inline void tcp_end_cwnd_reduction(struct sock *sk) in tcp_end_cwnd_reduction() argument
2551 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction()
2553 if (inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_end_cwnd_reduction()
2558 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2562 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); in tcp_end_cwnd_reduction()
2566 void tcp_enter_cwr(struct sock *sk) in tcp_enter_cwr() argument
2568 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr()
2571 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_cwr()
2573 tcp_init_cwnd_reduction(sk); in tcp_enter_cwr()
2574 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_enter_cwr()
2579 static void tcp_try_keep_open(struct sock *sk) in tcp_try_keep_open() argument
2581 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open()
2584 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2587 if (inet_csk(sk)->icsk_ca_state != state) { in tcp_try_keep_open()
2588 tcp_set_ca_state(sk, state); in tcp_try_keep_open()
2593 static void tcp_try_to_open(struct sock *sk, int flag) in tcp_try_to_open() argument
2595 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open()
2599 if (!tcp_any_retrans_done(sk)) in tcp_try_to_open()
2603 tcp_enter_cwr(sk); in tcp_try_to_open()
2605 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { in tcp_try_to_open()
2606 tcp_try_keep_open(sk); in tcp_try_to_open()
2610 static void tcp_mtup_probe_failed(struct sock *sk) in tcp_mtup_probe_failed() argument
2612 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed()
2616 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); in tcp_mtup_probe_failed()
2619 static void tcp_mtup_probe_success(struct sock *sk) in tcp_mtup_probe_success() argument
2621 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success()
2622 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success()
2625 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2627 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2631 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2635 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2636 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); in tcp_mtup_probe_success()
2643 void tcp_simple_retransmit(struct sock *sk) in tcp_simple_retransmit() argument
2645 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit()
2646 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit()
2648 unsigned int mss = tcp_current_mss(sk); in tcp_simple_retransmit()
2650 tcp_for_write_queue(skb, sk) { in tcp_simple_retransmit()
2651 if (skb == tcp_send_head(sk)) in tcp_simple_retransmit()
2680 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2683 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_simple_retransmit()
2685 tcp_xmit_retransmit_queue(sk); in tcp_simple_retransmit()
2689 void tcp_enter_recovery(struct sock *sk, bool ece_ack) in tcp_enter_recovery() argument
2691 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery()
2699 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_enter_recovery()
2704 if (!tcp_in_cwnd_reduction(sk)) { in tcp_enter_recovery()
2706 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2707 tcp_init_cwnd_reduction(sk); in tcp_enter_recovery()
2709 tcp_set_ca_state(sk, TCP_CA_Recovery); in tcp_enter_recovery()
2715 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, in tcp_process_loss() argument
2718 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss()
2722 tcp_try_undo_loss(sk, false)) in tcp_process_loss()
2730 tcp_try_undo_loss(sk, true)) in tcp_process_loss()
2742 if (tcp_send_head(sk) && in tcp_process_loss()
2753 tcp_try_undo_recovery(sk); in tcp_process_loss()
2761 tcp_add_reno_sack(sk); in tcp_process_loss()
2769 static bool tcp_try_undo_partial(struct sock *sk, const int acked) in tcp_try_undo_partial() argument
2771 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial()
2777 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); in tcp_try_undo_partial()
2787 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_partial()
2790 DBGUNDO(sk, "partial recovery"); in tcp_try_undo_partial()
2791 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_partial()
2792 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); in tcp_try_undo_partial()
2793 tcp_try_keep_open(sk); in tcp_try_undo_partial()
2799 static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) in tcp_rack_identify_loss() argument
2801 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_identify_loss()
2807 tcp_rack_mark_lost(sk); in tcp_rack_identify_loss()
2825 static void tcp_fastretrans_alert(struct sock *sk, const int acked, in tcp_fastretrans_alert() argument
2828 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert()
2829 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert()
2845 if (tcp_check_sack_reneging(sk, flag)) in tcp_fastretrans_alert()
2862 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2863 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_fastretrans_alert()
2870 if (tcp_try_undo_recovery(sk)) in tcp_fastretrans_alert()
2872 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2882 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2884 if (tcp_try_undo_partial(sk, acked)) in tcp_fastretrans_alert()
2890 if (tcp_try_undo_dsack(sk)) { in tcp_fastretrans_alert()
2891 tcp_try_keep_open(sk); in tcp_fastretrans_alert()
2894 tcp_rack_identify_loss(sk, ack_flag); in tcp_fastretrans_alert()
2897 tcp_process_loss(sk, flag, is_dupack, rexmit); in tcp_fastretrans_alert()
2898 tcp_rack_identify_loss(sk, ack_flag); in tcp_fastretrans_alert()
2908 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2912 tcp_try_undo_dsack(sk); in tcp_fastretrans_alert()
2914 tcp_rack_identify_loss(sk, ack_flag); in tcp_fastretrans_alert()
2915 if (!tcp_time_to_recover(sk, flag)) { in tcp_fastretrans_alert()
2916 tcp_try_to_open(sk, flag); in tcp_fastretrans_alert()
2924 tcp_mtup_probe_failed(sk); in tcp_fastretrans_alert()
2927 tcp_simple_retransmit(sk); in tcp_fastretrans_alert()
2932 tcp_enter_recovery(sk, (flag & FLAG_ECE)); in tcp_fastretrans_alert()
2937 tcp_update_scoreboard(sk, fast_rexmit); in tcp_fastretrans_alert()
2941 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) in tcp_update_rtt_min() argument
2943 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min()
2950 static bool tcp_ack_update_rtt(struct sock *sk, const int flag, in tcp_ack_update_rtt() argument
2954 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt()
2985 tcp_update_rtt_min(sk, ca_rtt_us); in tcp_ack_update_rtt()
2986 tcp_rtt_estimator(sk, seq_rtt_us); in tcp_ack_update_rtt()
2987 tcp_set_rto(sk); in tcp_ack_update_rtt()
2990 inet_csk(sk)->icsk_backoff = 0; in tcp_ack_update_rtt()
2995 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) in tcp_synack_rtt_meas() argument
3003 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs); in tcp_synack_rtt_meas()
3007 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cong_avoid() argument
3009 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid()
3011 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3012 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32; in tcp_cong_avoid()
3018 void tcp_rearm_rto(struct sock *sk) in tcp_rearm_rto() argument
3020 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto()
3021 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto()
3030 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); in tcp_rearm_rto()
3032 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rearm_rto()
3036 s64 delta_us = tcp_rto_delta_us(sk); in tcp_rearm_rto()
3042 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, in tcp_rearm_rto()
3048 static void tcp_set_xmit_timer(struct sock *sk) in tcp_set_xmit_timer() argument
3050 if (!tcp_schedule_loss_probe(sk, true)) in tcp_set_xmit_timer()
3051 tcp_rearm_rto(sk); in tcp_set_xmit_timer()
3055 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) in tcp_tso_acked() argument
3057 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked()
3063 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3075 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, in tcp_ack_tstamp() argument
3086 before(shinfo->tskey, tcp_sk(sk)->snd_una)) in tcp_ack_tstamp()
3087 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); in tcp_ack_tstamp()
3094 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, in tcp_clean_rtx_queue() argument
3098 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3100 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue()
3115 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { in tcp_clean_rtx_queue()
3120 tcp_ack_tstamp(sk, skb, prior_snd_una); in tcp_clean_rtx_queue()
3128 acked_pcount = tcp_tso_acked(sk, skb); in tcp_clean_rtx_queue()
3167 tcp_rate_skb_delivered(sk, skb, sack->rate); in tcp_clean_rtx_queue()
3186 tcp_unlink_write_queue(skb, sk); in tcp_clean_rtx_queue()
3187 sk_wmem_free_skb(sk, skb); in tcp_clean_rtx_queue()
3195 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); in tcp_clean_rtx_queue()
3211 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, in tcp_clean_rtx_queue()
3218 tcp_mtup_probe_success(sk); in tcp_clean_rtx_queue()
3222 tcp_remove_reno_sacks(sk, pkts_acked); in tcp_clean_rtx_queue()
3237 tcp_update_reordering(sk, tp->fackets_out - reord, 0); in tcp_clean_rtx_queue()
3260 icsk->icsk_ca_ops->pkts_acked(sk, &sample); in tcp_clean_rtx_queue()
3268 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3290 static void tcp_ack_probe(struct sock *sk) in tcp_ack_probe() argument
3292 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe()
3293 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe()
3297 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3299 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); in tcp_ack_probe()
3304 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); in tcp_ack_probe()
3306 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_ack_probe()
3311 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) in tcp_ack_is_dubious() argument
3314 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; in tcp_ack_is_dubious()
3318 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) in tcp_may_raise_cwnd() argument
3326 if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) in tcp_may_raise_cwnd()
3337 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, in tcp_cong_control() argument
3340 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_control()
3343 icsk->icsk_ca_ops->cong_control(sk, rs); in tcp_cong_control()
3347 if (tcp_in_cwnd_reduction(sk)) { in tcp_cong_control()
3349 tcp_cwnd_reduction(sk, acked_sacked, flag); in tcp_cong_control()
3350 } else if (tcp_may_raise_cwnd(sk, flag)) { in tcp_cong_control()
3352 tcp_cong_avoid(sk, ack, acked_sacked); in tcp_cong_control()
3354 tcp_update_pacing_rate(sk); in tcp_cong_control()
3394 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
3397 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window()
3415 tcp_fast_path_check(sk); in tcp_ack_update_window()
3417 if (tcp_send_head(sk)) in tcp_ack_update_window()
3418 tcp_slow_start_after_idle_check(sk); in tcp_ack_update_window()
3422 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); in tcp_ack_update_window()
3468 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) in tcp_send_challenge_ack() argument
3473 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack()
3477 if (__tcp_oow_rate_limited(sock_net(sk), in tcp_send_challenge_ack()
3494 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); in tcp_send_challenge_ack()
3495 tcp_send_ack(sk); in tcp_send_challenge_ack()
3525 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) in tcp_process_tlp_ack() argument
3527 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack()
3539 tcp_init_cwnd_reduction(sk); in tcp_process_tlp_ack()
3540 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_process_tlp_ack()
3541 tcp_end_cwnd_reduction(sk); in tcp_process_tlp_ack()
3542 tcp_try_keep_open(sk); in tcp_process_tlp_ack()
3543 NET_INC_STATS(sock_net(sk), in tcp_process_tlp_ack()
3552 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) in tcp_in_ack_event() argument
3554 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event()
3557 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3564 static void tcp_xmit_recovery(struct sock *sk, int rexmit) in tcp_xmit_recovery() argument
3566 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery()
3572 __tcp_push_pending_frames(sk, tcp_current_mss(sk), in tcp_xmit_recovery()
3578 tcp_xmit_retransmit_queue(sk); in tcp_xmit_recovery()
3582 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3584 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack()
3585 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack()
3604 prefetchw(sk->sk_write_queue.next); in tcp_ack()
3613 tcp_send_challenge_ack(sk, skb); in tcp_ack()
3648 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); in tcp_ack()
3650 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); in tcp_ack()
3657 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); in tcp_ack()
3659 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3662 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3673 tcp_in_ack_event(sk, ack_ev_flags); in tcp_ack()
3679 sk->sk_err_soft = 0; in tcp_ack()
3686 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, in tcp_ack()
3690 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3693 tcp_set_xmit_timer(sk); in tcp_ack()
3695 if (tcp_ack_is_dubious(sk, flag)) { in tcp_ack()
3697 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); in tcp_ack()
3701 sk_dst_confirm(sk); in tcp_ack()
3705 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate); in tcp_ack()
3706 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); in tcp_ack()
3707 tcp_xmit_recovery(sk, rexmit); in tcp_ack()
3713 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); in tcp_ack()
3718 if (tcp_send_head(sk)) in tcp_ack()
3719 tcp_ack_probe(sk); in tcp_ack()
3722 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3726 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3734 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3736 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); in tcp_ack()
3737 tcp_xmit_recovery(sk, rexmit); in tcp_ack()
3740 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3981 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) in tcp_disordered_ack() argument
3983 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack()
3998 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
4001 static inline bool tcp_paws_discard(const struct sock *sk, in tcp_paws_discard() argument
4004 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard()
4007 !tcp_disordered_ack(sk, skb); in tcp_paws_discard()
4030 void tcp_reset(struct sock *sk) in tcp_reset() argument
4033 switch (sk->sk_state) { in tcp_reset()
4035 sk->sk_err = ECONNREFUSED; in tcp_reset()
4038 sk->sk_err = EPIPE; in tcp_reset()
4043 sk->sk_err = ECONNRESET; in tcp_reset()
4048 tcp_write_queue_purge(sk); in tcp_reset()
4049 tcp_done(sk); in tcp_reset()
4051 if (!sock_flag(sk, SOCK_DEAD)) in tcp_reset()
4052 sk->sk_error_report(sk); in tcp_reset()
4069 void tcp_fin(struct sock *sk) in tcp_fin() argument
4071 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin()
4073 inet_csk_schedule_ack(sk); in tcp_fin()
4075 sk->sk_shutdown |= RCV_SHUTDOWN; in tcp_fin()
4076 sock_set_flag(sk, SOCK_DONE); in tcp_fin()
4078 switch (sk->sk_state) { in tcp_fin()
4082 tcp_set_state(sk, TCP_CLOSE_WAIT); in tcp_fin()
4083 inet_csk(sk)->icsk_ack.pingpong = 1; in tcp_fin()
4101 tcp_send_ack(sk); in tcp_fin()
4102 tcp_set_state(sk, TCP_CLOSING); in tcp_fin()
4106 tcp_send_ack(sk); in tcp_fin()
4107 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_fin()
4114 __func__, sk->sk_state); in tcp_fin()
4124 sk_mem_reclaim(sk); in tcp_fin()
4126 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_fin()
4127 sk->sk_state_change(sk); in tcp_fin()
4130 if (sk->sk_shutdown == SHUTDOWN_MASK || in tcp_fin()
4131 sk->sk_state == TCP_CLOSE) in tcp_fin()
4132 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); in tcp_fin()
4134 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in tcp_fin()
4151 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_set() argument
4153 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set()
4163 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_dsack_set()
4171 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_extend() argument
4173 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend()
4176 tcp_dsack_set(sk, seq, end_seq); in tcp_dsack_extend()
4181 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) in tcp_send_dupack() argument
4183 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack()
4187 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_send_dupack()
4188 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); in tcp_send_dupack()
4195 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); in tcp_send_dupack()
4199 tcp_send_ack(sk); in tcp_send_dupack()
4230 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) in tcp_sack_new_ofo_skb() argument
4232 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb()
4325 static bool tcp_try_coalesce(struct sock *sk, in tcp_try_coalesce() argument
4342 atomic_add(delta, &sk->sk_rmem_alloc); in tcp_try_coalesce()
4343 sk_mem_charge(sk, delta); in tcp_try_coalesce()
4344 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); in tcp_try_coalesce()
4360 static bool tcp_ooo_try_coalesce(struct sock *sk, in tcp_ooo_try_coalesce() argument
4365 bool res = tcp_try_coalesce(sk, OOO_QUEUE, to, from, fragstolen); in tcp_ooo_try_coalesce()
4377 static void tcp_drop(struct sock *sk, struct sk_buff *skb) in tcp_drop() argument
4379 sk_drops_add(sk, skb); in tcp_drop()
4386 static void tcp_ofo_queue(struct sock *sk) in tcp_ofo_queue() argument
4388 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue()
4404 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); in tcp_ofo_queue()
4413 SOCK_DEBUG(sk, "ofo packet was already received\n"); in tcp_ofo_queue()
4414 tcp_drop(sk, skb); in tcp_ofo_queue()
4417 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", in tcp_ofo_queue()
4421 tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_ofo_queue()
4422 eaten = tail && tcp_try_coalesce(sk, RCV_QUEUE, in tcp_ofo_queue()
4427 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_ofo_queue()
4432 tcp_fin(sk); in tcp_ofo_queue()
4441 static bool tcp_prune_ofo_queue(struct sock *sk);
4442 static int tcp_prune_queue(struct sock *sk);
4444 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, in tcp_try_rmem_schedule() argument
4447 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule()
4448 !sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4450 if (tcp_prune_queue(sk) < 0) in tcp_try_rmem_schedule()
4453 while (!sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4454 if (!tcp_prune_ofo_queue(sk)) in tcp_try_rmem_schedule()
4461 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) in tcp_data_queue_ofo() argument
4463 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo()
4469 tcp_ecn_check_ce(sk, skb); in tcp_data_queue_ofo()
4471 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { in tcp_data_queue_ofo()
4472 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); in tcp_data_queue_ofo()
4473 tcp_drop(sk, skb); in tcp_data_queue_ofo()
4483 inet_csk_schedule_ack(sk); in tcp_data_queue_ofo()
4485 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); in tcp_data_queue_ofo()
4488 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", in tcp_data_queue_ofo()
4508 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, in tcp_data_queue_ofo()
4511 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4535 NET_INC_STATS(sock_net(sk), in tcp_data_queue_ofo()
4537 tcp_drop(sk, skb); in tcp_data_queue_ofo()
4539 tcp_dsack_set(sk, seq, end_seq); in tcp_data_queue_ofo()
4544 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); in tcp_data_queue_ofo()
4551 tcp_dsack_extend(sk, in tcp_data_queue_ofo()
4554 NET_INC_STATS(sock_net(sk), in tcp_data_queue_ofo()
4556 tcp_drop(sk, skb1); in tcp_data_queue_ofo()
4559 } else if (tcp_ooo_try_coalesce(sk, skb1, in tcp_data_queue_ofo()
4576 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4581 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4583 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4584 tcp_drop(sk, skb1); in tcp_data_queue_ofo()
4592 tcp_sack_new_ofo_skb(sk, seq, end_seq); in tcp_data_queue_ofo()
4595 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4597 skb_set_owner_r(skb, sk); in tcp_data_queue_ofo()
4601 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, in tcp_queue_rcv() argument
4605 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_queue_rcv()
4609 tcp_try_coalesce(sk, RCV_QUEUE, tail, in tcp_queue_rcv()
4611 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); in tcp_queue_rcv()
4613 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_queue_rcv()
4614 skb_set_owner_r(skb, sk); in tcp_queue_rcv()
4619 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) in tcp_send_rcvq() argument
4637 &err, sk->sk_allocation); in tcp_send_rcvq()
4645 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_send_rcvq()
4652 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; in tcp_send_rcvq()
4654 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; in tcp_send_rcvq()
4656 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { in tcp_send_rcvq()
4669 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) in tcp_data_queue() argument
4671 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue()
4696 if (skb_queue_len(&sk->sk_receive_queue) == 0) in tcp_data_queue()
4697 sk_forced_mem_schedule(sk, skb->truesize); in tcp_data_queue()
4698 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_data_queue()
4701 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); in tcp_data_queue()
4704 tcp_event_data_recv(sk, skb); in tcp_data_queue()
4706 tcp_fin(sk); in tcp_data_queue()
4709 tcp_ofo_queue(sk); in tcp_data_queue()
4715 inet_csk(sk)->icsk_ack.pingpong = 0; in tcp_data_queue()
4721 tcp_fast_path_check(sk); in tcp_data_queue()
4725 if (!sock_flag(sk, SOCK_DEAD)) in tcp_data_queue()
4726 sk->sk_data_ready(sk); in tcp_data_queue()
4732 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_data_queue()
4733 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4736 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); in tcp_data_queue()
4737 inet_csk_schedule_ack(sk); in tcp_data_queue()
4739 tcp_drop(sk, skb); in tcp_data_queue()
4749 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", in tcp_data_queue()
4753 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4763 tcp_data_queue_ofo(sk, skb); in tcp_data_queue()
4774 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, in tcp_collapse_one() argument
4786 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); in tcp_collapse_one()
4819 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, in tcp_collapse() argument
4835 skb = tcp_collapse_one(sk, skb, list, root); in tcp_collapse()
4882 skb_set_owner_r(nskb, sk); in tcp_collapse()
4899 skb = tcp_collapse_one(sk, skb, list, root); in tcp_collapse()
4915 static void tcp_collapse_ofo_queue(struct sock *sk) in tcp_collapse_ofo_queue() argument
4917 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue()
4944 tcp_collapse(sk, NULL, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
4948 if (sum_tiny > sk->sk_rcvbuf >> 3) in tcp_collapse_ofo_queue()
4973 static bool tcp_prune_ofo_queue(struct sock *sk) in tcp_prune_ofo_queue() argument
4975 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue()
4982 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); in tcp_prune_ofo_queue()
4983 goal = sk->sk_rcvbuf >> 3; in tcp_prune_ofo_queue()
4989 tcp_drop(sk, rb_to_skb(node)); in tcp_prune_ofo_queue()
4991 sk_mem_reclaim(sk); in tcp_prune_ofo_queue()
4992 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in tcp_prune_ofo_queue()
4993 !tcp_under_memory_pressure(sk)) in tcp_prune_ofo_queue()
4995 goal = sk->sk_rcvbuf >> 3; in tcp_prune_ofo_queue()
5018 static int tcp_prune_queue(struct sock *sk) in tcp_prune_queue() argument
5020 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue()
5022 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); in tcp_prune_queue()
5024 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); in tcp_prune_queue()
5026 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in tcp_prune_queue()
5027 tcp_clamp_window(sk); in tcp_prune_queue()
5028 else if (tcp_under_memory_pressure(sk)) in tcp_prune_queue()
5031 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
5034 tcp_collapse_ofo_queue(sk); in tcp_prune_queue()
5035 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_prune_queue()
5036 tcp_collapse(sk, &sk->sk_receive_queue, NULL, in tcp_prune_queue()
5037 skb_peek(&sk->sk_receive_queue), in tcp_prune_queue()
5040 sk_mem_reclaim(sk); in tcp_prune_queue()
5042 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
5048 tcp_prune_ofo_queue(sk); in tcp_prune_queue()
5050 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
5057 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); in tcp_prune_queue()
5064 static bool tcp_should_expand_sndbuf(const struct sock *sk) in tcp_should_expand_sndbuf() argument
5066 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf()
5071 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in tcp_should_expand_sndbuf()
5075 if (tcp_under_memory_pressure(sk)) in tcp_should_expand_sndbuf()
5079 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) in tcp_should_expand_sndbuf()
5095 static void tcp_new_space(struct sock *sk) in tcp_new_space() argument
5097 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space()
5099 if (tcp_should_expand_sndbuf(sk)) { in tcp_new_space()
5100 tcp_sndbuf_expand(sk); in tcp_new_space()
5104 sk->sk_write_space(sk); in tcp_new_space()
5107 static void tcp_check_space(struct sock *sk) in tcp_check_space() argument
5109 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { in tcp_check_space()
5110 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_check_space()
5113 if (sk->sk_socket && in tcp_check_space()
5114 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_check_space()
5115 tcp_new_space(sk); in tcp_check_space()
5116 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) in tcp_check_space()
5117 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); in tcp_check_space()
5122 static inline void tcp_data_snd_check(struct sock *sk) in tcp_data_snd_check() argument
5124 tcp_push_pending_frames(sk); in tcp_data_snd_check()
5125 tcp_check_space(sk); in tcp_data_snd_check()
5131 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) in __tcp_ack_snd_check() argument
5133 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check()
5136 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
5140 __tcp_select_window(sk) >= tp->rcv_wnd) || in __tcp_ack_snd_check()
5142 tcp_in_quickack_mode(sk) || in __tcp_ack_snd_check()
5146 tcp_send_ack(sk); in __tcp_ack_snd_check()
5149 tcp_send_delayed_ack(sk); in __tcp_ack_snd_check()
5153 static inline void tcp_ack_snd_check(struct sock *sk) in tcp_ack_snd_check() argument
5155 if (!inet_csk_ack_scheduled(sk)) { in tcp_ack_snd_check()
5159 __tcp_ack_snd_check(sk, 1); in tcp_ack_snd_check()
5172 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) in tcp_check_urg() argument
5174 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg()
5203 sk_send_sigurg(sk); in tcp_check_urg()
5221 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
5222 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_check_urg()
5225 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_check_urg()
5238 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) in tcp_urg() argument
5240 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg()
5244 tcp_check_urg(sk, th); in tcp_urg()
5257 if (!sock_flag(sk, SOCK_DEAD)) in tcp_urg()
5258 sk->sk_data_ready(sk); in tcp_urg()
5271 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb) in tcp_reset_check() argument
5273 struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check()
5276 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK | in tcp_reset_check()
5283 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, in tcp_validate_incoming() argument
5286 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming()
5290 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5292 tcp_paws_discard(sk, skb)) { in tcp_validate_incoming()
5294 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_validate_incoming()
5295 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5298 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5315 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5318 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5319 } else if (tcp_reset_check(sk, skb)) { in tcp_validate_incoming()
5320 tcp_reset(sk); in tcp_validate_incoming()
5337 tcp_reset_check(sk, skb)) { in tcp_validate_incoming()
5356 tcp_reset(sk); in tcp_validate_incoming()
5363 sk->sk_state == TCP_ESTABLISHED) in tcp_validate_incoming()
5364 tcp_fastopen_active_disable(sk); in tcp_validate_incoming()
5365 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5378 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_validate_incoming()
5379 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); in tcp_validate_incoming()
5380 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5387 tcp_drop(sk, skb); in tcp_validate_incoming()
5414 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, in tcp_rcv_established() argument
5418 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established()
5421 if (unlikely(!sk->sk_rx_dst)) in tcp_rcv_established()
5422 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_rcv_established()
5491 tcp_ack(sk, skb, 0); in tcp_rcv_established()
5493 tcp_data_snd_check(sk); in tcp_rcv_established()
5496 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5506 if ((int)skb->truesize > sk->sk_forward_alloc) in tcp_rcv_established()
5518 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5520 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); in tcp_rcv_established()
5523 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, in tcp_rcv_established()
5526 tcp_event_data_recv(sk, skb); in tcp_rcv_established()
5530 tcp_ack(sk, skb, FLAG_DATA); in tcp_rcv_established()
5531 tcp_data_snd_check(sk); in tcp_rcv_established()
5532 if (!inet_csk_ack_scheduled(sk)) in tcp_rcv_established()
5536 __tcp_ack_snd_check(sk, 0); in tcp_rcv_established()
5540 sk->sk_data_ready(sk); in tcp_rcv_established()
5556 if (!tcp_validate_incoming(sk, skb, th, 1)) in tcp_rcv_established()
5560 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) in tcp_rcv_established()
5563 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5566 tcp_urg(sk, skb, th); in tcp_rcv_established()
5569 tcp_data_queue(sk, skb); in tcp_rcv_established()
5571 tcp_data_snd_check(sk); in tcp_rcv_established()
5572 tcp_ack_snd_check(sk); in tcp_rcv_established()
5576 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_rcv_established()
5577 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5580 tcp_drop(sk, skb); in tcp_rcv_established()
5584 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) in tcp_finish_connect() argument
5586 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect()
5587 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect()
5589 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_finish_connect()
5593 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5594 security_inet_conn_established(sk, skb); in tcp_finish_connect()
5598 icsk->icsk_af_ops->rebuild_header(sk); in tcp_finish_connect()
5600 tcp_init_metrics(sk); in tcp_finish_connect()
5601 tcp_call_bpf(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB); in tcp_finish_connect()
5602 tcp_init_congestion_control(sk); in tcp_finish_connect()
5609 tcp_init_buffer_space(sk); in tcp_finish_connect()
5611 if (sock_flag(sk, SOCK_KEEPOPEN)) in tcp_finish_connect()
5612 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5620 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, in tcp_rcv_fastopen_synack() argument
5623 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack()
5624 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5634 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL); in tcp_rcv_fastopen_synack()
5656 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); in tcp_rcv_fastopen_synack()
5659 tcp_for_write_queue_from(data, sk) { in tcp_rcv_fastopen_synack()
5660 if (data == tcp_send_head(sk) || in tcp_rcv_fastopen_synack()
5661 __tcp_retransmit_skb(sk, data, 1)) in tcp_rcv_fastopen_synack()
5664 tcp_rearm_rto(sk); in tcp_rcv_fastopen_synack()
5665 NET_INC_STATS(sock_net(sk), in tcp_rcv_fastopen_synack()
5671 NET_INC_STATS(sock_net(sk), in tcp_rcv_fastopen_synack()
5674 tcp_fastopen_add_skb(sk, synack); in tcp_rcv_fastopen_synack()
5679 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_synsent_state_process() argument
5682 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process()
5683 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process()
5688 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
5708 NET_INC_STATS(sock_net(sk), in tcp_rcv_synsent_state_process()
5722 tcp_reset(sk); in tcp_rcv_synsent_state_process()
5746 tcp_ack(sk, skb, FLAG_SLOWPATH); in tcp_rcv_synsent_state_process()
5777 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5778 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5779 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5788 tcp_finish_connect(sk, skb); in tcp_rcv_synsent_state_process()
5791 tcp_rcv_fastopen_synack(sk, skb, &foc); in tcp_rcv_synsent_state_process()
5793 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_rcv_synsent_state_process()
5794 sk->sk_state_change(sk); in tcp_rcv_synsent_state_process()
5795 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_rcv_synsent_state_process()
5799 if (sk->sk_write_pending || in tcp_rcv_synsent_state_process()
5809 inet_csk_schedule_ack(sk); in tcp_rcv_synsent_state_process()
5810 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); in tcp_rcv_synsent_state_process()
5811 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_rcv_synsent_state_process()
5815 tcp_drop(sk, skb); in tcp_rcv_synsent_state_process()
5818 tcp_send_ack(sk); in tcp_rcv_synsent_state_process()
5845 tcp_set_state(sk, TCP_SYN_RECV); in tcp_rcv_synsent_state_process()
5869 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5870 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5871 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5873 tcp_send_synack(sk); in tcp_rcv_synsent_state_process()
5913 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) in tcp_rcv_state_process() argument
5915 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process()
5916 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process()
5922 switch (sk->sk_state) { in tcp_rcv_state_process()
5941 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; in tcp_rcv_state_process()
5955 queued = tcp_rcv_synsent_state_process(sk, skb, th); in tcp_rcv_state_process()
5960 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5962 tcp_data_snd_check(sk); in tcp_rcv_state_process()
5970 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_rcv_state_process()
5971 sk->sk_state != TCP_FIN_WAIT1); in tcp_rcv_state_process()
5973 if (!tcp_check_req(sk, skb, req, true)) in tcp_rcv_state_process()
5980 if (!tcp_validate_incoming(sk, skb, th, 0)) in tcp_rcv_state_process()
5984 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | in tcp_rcv_state_process()
5989 if (sk->sk_state == TCP_SYN_RECV) in tcp_rcv_state_process()
5991 tcp_send_challenge_ack(sk, skb); in tcp_rcv_state_process()
5994 switch (sk->sk_state) { in tcp_rcv_state_process()
5997 tcp_synack_rtt_meas(sk, req); in tcp_rcv_state_process()
6003 inet_csk(sk)->icsk_retransmits = 0; in tcp_rcv_state_process()
6004 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
6007 icsk->icsk_af_ops->rebuild_header(sk); in tcp_rcv_state_process()
6008 tcp_call_bpf(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); in tcp_rcv_state_process()
6009 tcp_init_congestion_control(sk); in tcp_rcv_state_process()
6011 tcp_mtup_init(sk); in tcp_rcv_state_process()
6013 tcp_init_buffer_space(sk); in tcp_rcv_state_process()
6016 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_rcv_state_process()
6017 sk->sk_state_change(sk); in tcp_rcv_state_process()
6023 if (sk->sk_socket) in tcp_rcv_state_process()
6024 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_rcv_state_process()
6042 tcp_rearm_rto(sk); in tcp_rcv_state_process()
6044 tcp_init_metrics(sk); in tcp_rcv_state_process()
6046 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rcv_state_process()
6047 tcp_update_pacing_rate(sk); in tcp_rcv_state_process()
6052 tcp_initialize_rcv_mss(sk); in tcp_rcv_state_process()
6066 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
6067 tcp_rearm_rto(sk); in tcp_rcv_state_process()
6072 tcp_set_state(sk, TCP_FIN_WAIT2); in tcp_rcv_state_process()
6073 sk->sk_shutdown |= SEND_SHUTDOWN; in tcp_rcv_state_process()
6075 sk_dst_confirm(sk); in tcp_rcv_state_process()
6077 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_rcv_state_process()
6079 sk->sk_state_change(sk); in tcp_rcv_state_process()
6084 tcp_done(sk); in tcp_rcv_state_process()
6085 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6092 tcp_fastopen_active_disable(sk); in tcp_rcv_state_process()
6093 tcp_done(sk); in tcp_rcv_state_process()
6094 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6098 tmo = tcp_fin_time(sk); in tcp_rcv_state_process()
6100 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); in tcp_rcv_state_process()
6101 } else if (th->fin || sock_owned_by_user(sk)) { in tcp_rcv_state_process()
6108 inet_csk_reset_keepalive_timer(sk, tmo); in tcp_rcv_state_process()
6110 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_rcv_state_process()
6118 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_rcv_state_process()
6125 tcp_update_metrics(sk); in tcp_rcv_state_process()
6126 tcp_done(sk); in tcp_rcv_state_process()
6133 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
6136 switch (sk->sk_state) { in tcp_rcv_state_process()
6148 if (sk->sk_shutdown & RCV_SHUTDOWN) { in tcp_rcv_state_process()
6151 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6152 tcp_reset(sk); in tcp_rcv_state_process()
6158 tcp_data_queue(sk, skb); in tcp_rcv_state_process()
6164 if (sk->sk_state != TCP_CLOSE) { in tcp_rcv_state_process()
6165 tcp_data_snd_check(sk); in tcp_rcv_state_process()
6166 tcp_ack_snd_check(sk); in tcp_rcv_state_process()
6171 tcp_drop(sk, skb); in tcp_rcv_state_process()
6229 struct sk_buff *skb, const struct sock *sk) in tcp_openreq_init() argument
6249 ireq->ir_mark = inet_request_mark(sk, skb); in tcp_openreq_init()
6279 static bool tcp_syn_flood_action(const struct sock *sk, in tcp_syn_flood_action() argument
6283 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_syn_flood_action()
6286 struct net *net = sock_net(sk); in tcp_syn_flood_action()
6292 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); in tcp_syn_flood_action()
6295 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); in tcp_syn_flood_action()
6306 static void tcp_reqsk_record_syn(const struct sock *sk, in tcp_reqsk_record_syn() argument
6310 if (tcp_sk(sk)->save_syn) { in tcp_reqsk_record_syn()
6325 struct sock *sk, struct sk_buff *skb) in tcp_conn_request() argument
6330 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request()
6331 struct net *net = sock_net(sk); in tcp_conn_request()
6343 inet_csk_reqsk_queue_is_full(sk)) && !isn) { in tcp_conn_request()
6344 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); in tcp_conn_request()
6349 if (sk_acceptq_is_full(sk)) { in tcp_conn_request()
6350 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_conn_request()
6354 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); in tcp_conn_request()
6364 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, in tcp_conn_request()
6371 tcp_openreq_init(req, &tmp_opt, skb, sk); in tcp_conn_request()
6372 inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent; in tcp_conn_request()
6375 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb); in tcp_conn_request()
6377 af_ops->init_req(req, sk, skb); in tcp_conn_request()
6379 if (security_inet_conn_request(sk, skb, req)) in tcp_conn_request()
6385 dst = af_ops->route_req(sk, &fl, req); in tcp_conn_request()
6392 (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < in tcp_conn_request()
6410 tcp_ecn_create_request(req, skb, sk, dst); in tcp_conn_request()
6413 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); in tcp_conn_request()
6421 tcp_openreq_init_rwin(req, sk, dst); in tcp_conn_request()
6423 tcp_reqsk_record_syn(sk, req, skb); in tcp_conn_request()
6424 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc); in tcp_conn_request()
6430 if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) { in tcp_conn_request()
6437 sk->sk_data_ready(sk); in tcp_conn_request()
6443 inet_csk_reqsk_queue_hash_add(sk, req, in tcp_conn_request()
6445 af_ops->send_synack(sk, dst, &fl, req, &foc, in tcp_conn_request()
6461 tcp_listendrop(sk); in tcp_conn_request()