• Home
  • Raw
  • Download

Lines Matching refs:sk

26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)  in tcp_clamp_rto_to_user_timeout()  argument
28 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout()
32 start_ts = tcp_sk(sk)->retrans_stamp; in tcp_clamp_rto_to_user_timeout()
35 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; in tcp_clamp_rto_to_user_timeout()
43 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) in tcp_clamp_probe0_to_user_timeout() argument
45 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout()
68 static void tcp_write_err(struct sock *sk) in tcp_write_err() argument
70 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; in tcp_write_err()
71 sk->sk_error_report(sk); in tcp_write_err()
73 tcp_write_queue_purge(sk); in tcp_write_err()
74 tcp_done(sk); in tcp_write_err()
75 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); in tcp_write_err()
102 static int tcp_out_of_resources(struct sock *sk, bool do_reset) in tcp_out_of_resources() argument
104 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources()
113 if (sk->sk_err_soft) in tcp_out_of_resources()
116 if (tcp_check_oom(sk, shift)) { in tcp_out_of_resources()
124 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_out_of_resources()
125 tcp_done(sk); in tcp_out_of_resources()
126 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); in tcp_out_of_resources()
130 if (!check_net(sock_net(sk))) { in tcp_out_of_resources()
132 tcp_done(sk); in tcp_out_of_resources()
144 static int tcp_orphan_retries(struct sock *sk, bool alive) in tcp_orphan_retries() argument
146 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ in tcp_orphan_retries()
149 if (sk->sk_err_soft && !alive) in tcp_orphan_retries()
160 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument
162 const struct net *net = sock_net(sk); in tcp_mtu_probing()
173 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing()
177 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
179 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing()
182 static unsigned int tcp_model_timeout(struct sock *sk, in tcp_model_timeout() argument
209 static bool retransmits_timed_out(struct sock *sk, in retransmits_timed_out() argument
215 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
218 start_ts = tcp_sk(sk)->retrans_stamp; in retransmits_timed_out()
222 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in retransmits_timed_out()
223 rto_base = tcp_timeout_init(sk); in retransmits_timed_out()
224 timeout = tcp_model_timeout(sk, boundary, rto_base); in retransmits_timed_out()
227 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; in retransmits_timed_out()
231 static int tcp_write_timeout(struct sock *sk) in tcp_write_timeout() argument
233 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout()
234 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout()
235 struct net *net = sock_net(sk); in tcp_write_timeout()
239 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_write_timeout()
241 __dst_negative_advice(sk); in tcp_write_timeout()
245 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { in tcp_write_timeout()
247 tcp_mtu_probing(icsk, sk); in tcp_write_timeout()
249 __dst_negative_advice(sk); in tcp_write_timeout()
253 if (sock_flag(sk, SOCK_DEAD)) { in tcp_write_timeout()
256 retry_until = tcp_orphan_retries(sk, alive); in tcp_write_timeout()
258 !retransmits_timed_out(sk, retry_until, 0); in tcp_write_timeout()
260 if (tcp_out_of_resources(sk, do_reset)) in tcp_write_timeout()
265 expired = retransmits_timed_out(sk, retry_until, in tcp_write_timeout()
267 tcp_fastopen_active_detect_blackhole(sk, expired); in tcp_write_timeout()
270 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB, in tcp_write_timeout()
276 tcp_write_err(sk); in tcp_write_timeout()
280 if (sk_rethink_txhash(sk)) { in tcp_write_timeout()
282 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH); in tcp_write_timeout()
289 void tcp_delack_timer_handler(struct sock *sk) in tcp_delack_timer_handler() argument
291 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler()
293 sk_mem_reclaim_partial(sk); in tcp_delack_timer_handler()
295 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in tcp_delack_timer_handler()
300 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in tcp_delack_timer_handler()
305 if (inet_csk_ack_scheduled(sk)) { in tcp_delack_timer_handler()
306 if (!inet_csk_in_pingpong_mode(sk)) { in tcp_delack_timer_handler()
313 inet_csk_exit_pingpong_mode(sk); in tcp_delack_timer_handler()
316 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_delack_timer_handler()
317 tcp_send_ack(sk); in tcp_delack_timer_handler()
318 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); in tcp_delack_timer_handler()
322 if (tcp_under_memory_pressure(sk)) in tcp_delack_timer_handler()
323 sk_mem_reclaim(sk); in tcp_delack_timer_handler()
340 struct sock *sk = &icsk->icsk_inet.sk; in tcp_delack_timer() local
342 bh_lock_sock(sk); in tcp_delack_timer()
343 if (!sock_owned_by_user(sk)) { in tcp_delack_timer()
344 tcp_delack_timer_handler(sk); in tcp_delack_timer()
346 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); in tcp_delack_timer()
348 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) in tcp_delack_timer()
349 sock_hold(sk); in tcp_delack_timer()
351 bh_unlock_sock(sk); in tcp_delack_timer()
352 sock_put(sk); in tcp_delack_timer()
355 static void tcp_probe_timer(struct sock *sk) in tcp_probe_timer() argument
357 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer()
358 struct sk_buff *skb = tcp_send_head(sk); in tcp_probe_timer()
359 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer()
383 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); in tcp_probe_timer()
384 if (sock_flag(sk, SOCK_DEAD)) { in tcp_probe_timer()
387 max_probes = tcp_orphan_retries(sk, alive); in tcp_probe_timer()
390 if (tcp_out_of_resources(sk, true)) in tcp_probe_timer()
395 abort: tcp_write_err(sk); in tcp_probe_timer()
398 tcp_send_probe0(sk); in tcp_probe_timer()
406 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) in tcp_fastopen_synack_timer() argument
408 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer()
410 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */ in tcp_fastopen_synack_timer()
411 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer()
416 tcp_write_err(sk); in tcp_fastopen_synack_timer()
421 tcp_enter_loss(sk); in tcp_fastopen_synack_timer()
427 inet_rtx_syn_ack(sk, req); in tcp_fastopen_synack_timer()
432 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_fastopen_synack_timer()
448 void tcp_retransmit_timer(struct sock *sk) in tcp_retransmit_timer() argument
450 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer()
451 struct net *net = sock_net(sk); in tcp_retransmit_timer()
452 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer()
457 lockdep_sock_is_held(sk)); in tcp_retransmit_timer()
459 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_retransmit_timer()
460 sk->sk_state != TCP_FIN_WAIT1); in tcp_retransmit_timer()
461 tcp_fastopen_synack_timer(sk, req); in tcp_retransmit_timer()
471 skb = tcp_rtx_queue_head(sk); in tcp_retransmit_timer()
477 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && in tcp_retransmit_timer()
478 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { in tcp_retransmit_timer()
484 struct inet_sock *inet = inet_sk(sk); in tcp_retransmit_timer()
485 if (sk->sk_family == AF_INET) { in tcp_retransmit_timer()
493 else if (sk->sk_family == AF_INET6) { in tcp_retransmit_timer()
495 &sk->sk_v6_daddr, in tcp_retransmit_timer()
502 tcp_write_err(sk); in tcp_retransmit_timer()
505 tcp_enter_loss(sk); in tcp_retransmit_timer()
506 tcp_retransmit_skb(sk, skb, 1); in tcp_retransmit_timer()
507 __sk_dst_reset(sk); in tcp_retransmit_timer()
511 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); in tcp_retransmit_timer()
512 if (tcp_write_timeout(sk)) in tcp_retransmit_timer()
533 __NET_INC_STATS(sock_net(sk), mib_idx); in tcp_retransmit_timer()
536 tcp_enter_loss(sk); in tcp_retransmit_timer()
539 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) { in tcp_retransmit_timer()
543 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_retransmit_timer()
576 if (sk->sk_state == TCP_ESTABLISHED && in tcp_retransmit_timer()
586 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_retransmit_timer()
587 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); in tcp_retransmit_timer()
588 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) in tcp_retransmit_timer()
589 __sk_dst_reset(sk); in tcp_retransmit_timer()
596 void tcp_write_timer_handler(struct sock *sk) in tcp_write_timer_handler() argument
598 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler()
601 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in tcp_write_timer_handler()
606 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in tcp_write_timer_handler()
610 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_write_timer_handler()
615 tcp_rack_reo_timeout(sk); in tcp_write_timer_handler()
618 tcp_send_loss_probe(sk); in tcp_write_timer_handler()
622 tcp_retransmit_timer(sk); in tcp_write_timer_handler()
626 tcp_probe_timer(sk); in tcp_write_timer_handler()
631 sk_mem_reclaim(sk); in tcp_write_timer_handler()
638 struct sock *sk = &icsk->icsk_inet.sk; in tcp_write_timer() local
640 bh_lock_sock(sk); in tcp_write_timer()
641 if (!sock_owned_by_user(sk)) { in tcp_write_timer()
642 tcp_write_timer_handler(sk); in tcp_write_timer()
645 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) in tcp_write_timer()
646 sock_hold(sk); in tcp_write_timer()
648 bh_unlock_sock(sk); in tcp_write_timer()
649 sock_put(sk); in tcp_write_timer()
660 void tcp_set_keepalive(struct sock *sk, int val) in tcp_set_keepalive() argument
662 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) in tcp_set_keepalive()
665 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) in tcp_set_keepalive()
666 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); in tcp_set_keepalive()
668 inet_csk_delete_keepalive_timer(sk); in tcp_set_keepalive()
675 struct sock *sk = from_timer(sk, t, sk_timer); in tcp_keepalive_timer() local
676 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
677 struct tcp_sock *tp = tcp_sk(sk); in tcp_keepalive_timer()
681 bh_lock_sock(sk); in tcp_keepalive_timer()
682 if (sock_owned_by_user(sk)) { in tcp_keepalive_timer()
684 inet_csk_reset_keepalive_timer (sk, HZ/20); in tcp_keepalive_timer()
688 if (sk->sk_state == TCP_LISTEN) { in tcp_keepalive_timer()
694 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { in tcp_keepalive_timer()
696 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; in tcp_keepalive_timer()
699 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_keepalive_timer()
703 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_keepalive_timer()
707 if (!sock_flag(sk, SOCK_KEEPOPEN) || in tcp_keepalive_timer()
708 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) in tcp_keepalive_timer()
714 if (tp->packets_out || !tcp_write_queue_empty(sk)) in tcp_keepalive_timer()
728 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_keepalive_timer()
729 tcp_write_err(sk); in tcp_keepalive_timer()
732 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { in tcp_keepalive_timer()
746 sk_mem_reclaim(sk); in tcp_keepalive_timer()
749 inet_csk_reset_keepalive_timer (sk, elapsed); in tcp_keepalive_timer()
753 tcp_done(sk); in tcp_keepalive_timer()
756 bh_unlock_sock(sk); in tcp_keepalive_timer()
757 sock_put(sk); in tcp_keepalive_timer()
763 struct sock *sk = (struct sock *)tp; in tcp_compressed_ack_kick() local
765 bh_lock_sock(sk); in tcp_compressed_ack_kick()
766 if (!sock_owned_by_user(sk)) { in tcp_compressed_ack_kick()
773 tcp_send_ack(sk); in tcp_compressed_ack_kick()
777 &sk->sk_tsq_flags)) in tcp_compressed_ack_kick()
778 sock_hold(sk); in tcp_compressed_ack_kick()
780 bh_unlock_sock(sk); in tcp_compressed_ack_kick()
782 sock_put(sk); in tcp_compressed_ack_kick()
787 void tcp_init_xmit_timers(struct sock *sk) in tcp_init_xmit_timers() argument
789 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, in tcp_init_xmit_timers()
791 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC, in tcp_init_xmit_timers()
793 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick; in tcp_init_xmit_timers()
795 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC, in tcp_init_xmit_timers()
797 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick; in tcp_init_xmit_timers()