Lines Matching refs:sk
61 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
65 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) in tcp_event_new_data_sent() argument
67 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
73 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
74 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
81 tcp_rearm_rto(sk); in tcp_event_new_data_sent()
83 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, in tcp_event_new_data_sent()
85 tcp_check_space(sk); in tcp_event_new_data_sent()
95 static inline __u32 tcp_acceptable_seq(const struct sock *sk) in tcp_acceptable_seq() argument
97 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
121 static __u16 tcp_advertise_mss(struct sock *sk) in tcp_advertise_mss() argument
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
124 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_advertise_mss()
142 void tcp_cwnd_restart(struct sock *sk, s32 delta) in tcp_cwnd_restart() argument
144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
145 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
148 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); in tcp_cwnd_restart()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
162 struct sock *sk) in tcp_event_data_sent() argument
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
168 tcp_ca_event(sk, CA_EVENT_TX_START); in tcp_event_data_sent()
176 inet_csk_enter_pingpong_mode(sk); in tcp_event_data_sent()
180 static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt) in tcp_event_ack_sent() argument
182 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent()
185 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, in tcp_event_ack_sent()
189 __sock_put(sk); in tcp_event_ack_sent()
194 tcp_dec_quickack_mode(sk); in tcp_event_ack_sent()
195 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); in tcp_event_ack_sent()
205 void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, in tcp_select_initial_window() argument
229 if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) in tcp_select_initial_window()
240 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_select_initial_window()
256 static u16 tcp_select_window(struct sock *sk) in tcp_select_window() argument
258 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
261 u32 new_win = __tcp_select_window(sk); in tcp_select_window()
273 NET_INC_STATS(sock_net(sk), in tcp_select_window()
284 sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) in tcp_select_window()
296 NET_INC_STATS(sock_net(sk), in tcp_select_window()
299 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); in tcp_select_window()
306 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
308 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack()
313 else if (tcp_ca_needs_ecn(sk) || in tcp_ecn_send_synack()
314 tcp_bpf_ca_needs_ecn(sk)) in tcp_ecn_send_synack()
315 INET_ECN_xmit(sk); in tcp_ecn_send_synack()
319 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
321 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
322 bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); in tcp_ecn_send_syn()
323 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || in tcp_ecn_send_syn()
324 tcp_ca_needs_ecn(sk) || bpf_needs_ecn; in tcp_ecn_send_syn()
327 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_ecn_send_syn()
338 if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn) in tcp_ecn_send_syn()
339 INET_ECN_xmit(sk); in tcp_ecn_send_syn()
343 static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_clear_syn() argument
345 if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) in tcp_ecn_clear_syn()
362 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, in tcp_ecn_send() argument
365 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send()
371 INET_ECN_xmit(sk); in tcp_ecn_send()
377 } else if (!tcp_ca_needs_ecn(sk)) { in tcp_ecn_send()
379 INET_ECN_dontxmit(sk); in tcp_ecn_send()
467 static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, in bpf_skops_hdr_opt_len() argument
477 if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_hdr_opt_len()
503 sock_ops.sk = (struct sock *)req; in bpf_skops_hdr_opt_len()
506 sock_owned_by_me(sk); in bpf_skops_hdr_opt_len()
509 sock_ops.sk = sk; in bpf_skops_hdr_opt_len()
518 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); in bpf_skops_hdr_opt_len()
530 static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, in bpf_skops_write_hdr_opt() argument
548 sock_ops.sk = (struct sock *)req; in bpf_skops_write_hdr_opt()
551 sock_owned_by_me(sk); in bpf_skops_write_hdr_opt()
554 sock_ops.sk = sk; in bpf_skops_write_hdr_opt()
562 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); in bpf_skops_write_hdr_opt()
574 static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, in bpf_skops_hdr_opt_len() argument
583 static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, in bpf_skops_write_hdr_opt() argument
756 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
760 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options()
768 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
785 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
788 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) { in tcp_syn_options()
794 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { in tcp_syn_options()
799 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { in tcp_syn_options()
822 if (sk_is_mptcp(sk)) { in tcp_syn_options()
825 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { in tcp_syn_options()
831 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining); in tcp_syn_options()
837 static unsigned int tcp_synack_options(const struct sock *sk, in tcp_synack_options() argument
899 smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); in tcp_synack_options()
901 bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb, in tcp_synack_options()
910 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
914 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options()
924 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
945 if (sk_is_mptcp(sk)) { in tcp_established_options()
949 if (mptcp_established_options(sk, skb, &opt_size, remaining, in tcp_established_options()
976 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining); in tcp_established_options()
1005 static void tcp_tsq_write(struct sock *sk) in tcp_tsq_write() argument
1007 if ((1 << sk->sk_state) & in tcp_tsq_write()
1010 struct tcp_sock *tp = tcp_sk(sk); in tcp_tsq_write()
1015 tcp_xmit_retransmit_queue(sk); in tcp_tsq_write()
1018 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
1023 static void tcp_tsq_handler(struct sock *sk) in tcp_tsq_handler() argument
1025 bh_lock_sock(sk); in tcp_tsq_handler()
1026 if (!sock_owned_by_user(sk)) in tcp_tsq_handler()
1027 tcp_tsq_write(sk); in tcp_tsq_handler()
1028 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
1029 sock_hold(sk); in tcp_tsq_handler()
1030 bh_unlock_sock(sk); in tcp_tsq_handler()
1045 struct sock *sk; in tcp_tasklet_func() local
1055 sk = (struct sock *)tp; in tcp_tasklet_func()
1057 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tasklet_func()
1059 tcp_tsq_handler(sk); in tcp_tasklet_func()
1060 sk_free(sk); in tcp_tasklet_func()
1075 void tcp_release_cb(struct sock *sk) in tcp_release_cb() argument
1081 flags = sk->sk_tsq_flags; in tcp_release_cb()
1085 } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); in tcp_release_cb()
1088 tcp_tsq_write(sk); in tcp_release_cb()
1089 __sock_put(sk); in tcp_release_cb()
1100 sock_release_ownership(sk); in tcp_release_cb()
1103 tcp_write_timer_handler(sk); in tcp_release_cb()
1104 __sock_put(sk); in tcp_release_cb()
1107 tcp_delack_timer_handler(sk); in tcp_release_cb()
1108 __sock_put(sk); in tcp_release_cb()
1111 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1112 __sock_put(sk); in tcp_release_cb()
1138 struct sock *sk = skb->sk; in tcp_wfree() local
1139 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree()
1145 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
1154 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
1157 for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { in tcp_wfree()
1165 nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); in tcp_wfree()
1180 sk_free(sk); in tcp_wfree()
1189 struct sock *sk = (struct sock *)tp; in tcp_pace_kick() local
1191 tcp_tsq_handler(sk); in tcp_pace_kick()
1192 sock_put(sk); in tcp_pace_kick()
1197 static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, in tcp_update_skb_after_send() argument
1200 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_skb_after_send()
1202 if (sk->sk_pacing_status != SK_PACING_NONE) { in tcp_update_skb_after_send()
1203 unsigned long rate = sk->sk_pacing_rate; in tcp_update_skb_after_send()
1221 INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)…
1222 INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl…
1223 INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1236 static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, in __tcp_transmit_skb() argument
1239 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb()
1252 tp = tcp_sk(sk); in __tcp_transmit_skb()
1276 inet = inet_sk(sk); in __tcp_transmit_skb()
1281 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in __tcp_transmit_skb()
1283 tcp_options_size = tcp_established_options(sk, skb, &opts, in __tcp_transmit_skb()
1305 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in __tcp_transmit_skb()
1318 skb->sk = sk; in __tcp_transmit_skb()
1320 skb_set_hash_from_sk(skb, sk); in __tcp_transmit_skb()
1321 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1323 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); in __tcp_transmit_skb()
1349 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1351 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1352 tcp_ecn_send(sk, skb, th, tcp_header_size); in __tcp_transmit_skb()
1362 sk_nocaps_add(sk, NETIF_F_GSO_MASK); in __tcp_transmit_skb()
1364 md5, sk, skb); in __tcp_transmit_skb()
1369 bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts); in __tcp_transmit_skb()
1373 sk, skb); in __tcp_transmit_skb()
1376 tcp_event_ack_sent(sk, rcv_nxt); in __tcp_transmit_skb()
1379 tcp_event_data_sent(tp, sk); in __tcp_transmit_skb()
1385 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, in __tcp_transmit_skb()
1403 sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1406 tcp_enter_cwr(sk); in __tcp_transmit_skb()
1410 tcp_update_skb_after_send(sk, oskb, prior_wstamp); in __tcp_transmit_skb()
1411 tcp_rate_skb_sent(sk, oskb); in __tcp_transmit_skb()
1416 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
1419 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, in tcp_transmit_skb()
1420 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1428 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
1430 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb()
1435 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
1436 sk_wmem_queued_add(sk, skb->truesize); in tcp_queue_skb()
1437 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1458 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1460 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount()
1515 struct sock *sk, in tcp_insert_write_queue_after() argument
1519 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1521 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1529 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, in tcp_fragment() argument
1533 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment()
1552 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); in tcp_fragment()
1553 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1555 skb != tcp_rtx_queue_head(sk) && in tcp_fragment()
1556 skb != tcp_rtx_queue_tail(sk))) { in tcp_fragment()
1557 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); in tcp_fragment()
1565 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); in tcp_fragment()
1570 sk_wmem_queued_add(sk, buff->truesize); in tcp_fragment()
1571 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1612 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1617 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); in tcp_fragment()
1666 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1680 sk_wmem_queued_add(sk, -delta_truesize); in tcp_trim_head()
1681 sk_mem_uncharge(sk, delta_truesize); in tcp_trim_head()
1692 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) in __tcp_mtu_to_mss() argument
1694 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss()
1695 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1705 const struct dst_entry *dst = __sk_dst_get(sk); in __tcp_mtu_to_mss()
1720 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); in __tcp_mtu_to_mss()
1725 int tcp_mtu_to_mss(struct sock *sk, int pmtu) in tcp_mtu_to_mss() argument
1728 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1729 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1734 int tcp_mss_to_mtu(struct sock *sk, int mss) in tcp_mss_to_mtu() argument
1736 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu()
1737 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1747 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_mss_to_mtu()
1757 void tcp_mtup_init(struct sock *sk) in tcp_mtup_init() argument
1759 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init()
1760 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1761 struct net *net = sock_net(sk); in tcp_mtup_init()
1766 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); in tcp_mtup_init()
1795 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) in tcp_sync_mss() argument
1797 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss()
1798 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1804 mss_now = tcp_mtu_to_mss(sk, pmtu); in tcp_sync_mss()
1810 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1820 unsigned int tcp_current_mss(struct sock *sk) in tcp_current_mss() argument
1822 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss()
1823 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_current_mss()
1833 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1834 mss_now = tcp_sync_mss(sk, mtu); in tcp_current_mss()
1837 header_len = tcp_established_options(sk, NULL, &opts, &md5) + in tcp_current_mss()
1855 static void tcp_cwnd_application_limited(struct sock *sk) in tcp_cwnd_application_limited() argument
1857 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited()
1859 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1860 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1862 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1865 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1873 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) in tcp_cwnd_validate() argument
1875 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
1876 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate()
1894 if (tcp_is_cwnd_limited(sk)) { in tcp_cwnd_validate()
1903 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && in tcp_cwnd_validate()
1904 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1906 tcp_cwnd_application_limited(sk); in tcp_cwnd_validate()
1915 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
1916 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
1917 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
1918 tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); in tcp_cwnd_validate()
1962 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, in tcp_tso_autosize() argument
1968 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), in tcp_tso_autosize()
1969 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); in tcp_tso_autosize()
1984 static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) in tcp_tso_segs() argument
1986 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
1990 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
1991 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); in tcp_tso_segs()
1993 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); in tcp_tso_segs()
1994 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
1998 static unsigned int tcp_mss_split_point(const struct sock *sk, in tcp_mss_split_point() argument
2004 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point()
2010 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
2115 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
2124 return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, in tso_fragment()
2127 buff = sk_stream_alloc_skb(sk, 0, gfp, true); in tso_fragment()
2132 sk_wmem_queued_add(sk, buff->truesize); in tso_fragment()
2133 sk_mem_charge(sk, buff->truesize); in tso_fragment()
2162 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE); in tso_fragment()
2172 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in tcp_tso_should_defer() argument
2177 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_tso_should_defer()
2179 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer()
2213 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
2216 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
2237 head = tcp_rtx_queue_head(sk); in tcp_tso_should_defer()
2274 static inline void tcp_mtu_check_reprobe(struct sock *sk) in tcp_mtu_check_reprobe() argument
2276 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_check_reprobe()
2277 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe()
2278 struct net *net = sock_net(sk); in tcp_mtu_check_reprobe()
2285 int mss = tcp_current_mss(sk); in tcp_mtu_check_reprobe()
2292 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2299 static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) in tcp_can_coalesce_send_queue_head() argument
2303 skb = tcp_send_head(sk); in tcp_can_coalesce_send_queue_head()
2304 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_can_coalesce_send_queue_head()
2326 static int tcp_mtu_probe(struct sock *sk) in tcp_mtu_probe() argument
2328 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_probe()
2329 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe()
2331 struct net *net = sock_net(sk); in tcp_mtu_probe()
2345 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2354 mss_now = tcp_current_mss(sk); in tcp_mtu_probe()
2355 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2363 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2368 tcp_mtu_check_reprobe(sk); in tcp_mtu_probe()
2389 if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) in tcp_mtu_probe()
2393 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); in tcp_mtu_probe()
2396 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2397 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2399 skb = tcp_send_head(sk); in tcp_mtu_probe()
2409 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
2410 tcp_highest_sack_replace(sk, skb, nskb); in tcp_mtu_probe()
2413 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
2426 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
2427 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
2450 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { in tcp_mtu_probe()
2454 tcp_event_new_data_sent(sk, nskb); in tcp_mtu_probe()
2456 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2466 static bool tcp_pacing_check(struct sock *sk) in tcp_pacing_check() argument
2468 struct tcp_sock *tp = tcp_sk(sk); in tcp_pacing_check()
2470 if (!tcp_needs_internal_pacing(sk)) in tcp_pacing_check()
2480 sock_hold(sk); in tcp_pacing_check()
2485 static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk) in tcp_rtx_queue_empty_or_single_skb() argument
2487 const struct rb_node *node = sk->tcp_rtx_queue.rb_node; in tcp_rtx_queue_empty_or_single_skb()
2508 static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, in tcp_small_queue_check() argument
2515 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check()
2516 if (sk->sk_pacing_status == SK_PACING_NONE) in tcp_small_queue_check()
2518 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); in tcp_small_queue_check()
2522 tcp_sk(sk)->tcp_tx_delay) { in tcp_small_queue_check()
2523 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
2533 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2539 if (tcp_rtx_queue_empty_or_single_skb(sk)) in tcp_small_queue_check()
2542 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2548 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2565 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) in tcp_chrono_start() argument
2567 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_start()
2578 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) in tcp_chrono_stop() argument
2580 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_stop()
2590 if (tcp_rtx_and_write_queues_empty(sk)) in tcp_chrono_stop()
2610 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, in tcp_write_xmit() argument
2613 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit()
2626 result = tcp_mtu_probe(sk); in tcp_write_xmit()
2634 max_segs = tcp_tso_segs(sk, mss_now); in tcp_write_xmit()
2635 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
2646 if (tcp_pacing_check(sk)) in tcp_write_xmit()
2668 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
2673 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, in tcp_write_xmit()
2680 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2687 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2690 if (tcp_small_queue_check(sk, skb, 0)) in tcp_write_xmit()
2701 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2708 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
2718 tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); in tcp_write_xmit()
2720 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); in tcp_write_xmit()
2724 tcp_cwnd_validate(sk, is_cwnd_limited); in tcp_write_xmit()
2727 if (tcp_in_cwnd_reduction(sk)) in tcp_write_xmit()
2732 tcp_schedule_loss_probe(sk, false); in tcp_write_xmit()
2735 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2738 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) in tcp_schedule_loss_probe() argument
2740 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_schedule_loss_probe()
2741 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe()
2751 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); in tcp_schedule_loss_probe()
2768 timeout_us += tcp_rto_min_us(sk); in tcp_schedule_loss_probe()
2778 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
2779 tcp_rto_delta_us(sk); /* How far in future is RTO? */ in tcp_schedule_loss_probe()
2783 tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX); in tcp_schedule_loss_probe()
2791 static bool skb_still_in_host_queue(const struct sock *sk, in skb_still_in_host_queue() argument
2794 if (unlikely(skb_fclone_busy(sk, skb))) { in skb_still_in_host_queue()
2795 NET_INC_STATS(sock_net(sk), in skb_still_in_host_queue()
2805 void tcp_send_loss_probe(struct sock *sk) in tcp_send_loss_probe() argument
2807 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe()
2810 int mss = tcp_current_mss(sk); in tcp_send_loss_probe()
2817 skb = tcp_send_head(sk); in tcp_send_loss_probe()
2820 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); in tcp_send_loss_probe()
2825 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
2829 tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); in tcp_send_loss_probe()
2830 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2834 if (skb_still_in_host_queue(sk, skb)) in tcp_send_loss_probe()
2842 if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, in tcp_send_loss_probe()
2852 if (__tcp_retransmit_skb(sk, skb, 1)) in tcp_send_loss_probe()
2861 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); in tcp_send_loss_probe()
2863 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2865 tcp_rearm_rto(sk); in tcp_send_loss_probe()
2872 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, in __tcp_push_pending_frames() argument
2879 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2882 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, in __tcp_push_pending_frames()
2883 sk_gfp_mask(sk, GFP_ATOMIC))) in __tcp_push_pending_frames()
2884 tcp_check_probe_timer(sk); in __tcp_push_pending_frames()
2890 void tcp_push_one(struct sock *sk, unsigned int mss_now) in tcp_push_one() argument
2892 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one()
2896 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
2951 u32 __tcp_select_window(struct sock *sk) in __tcp_select_window() argument
2953 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_select_window()
2954 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window()
2962 int free_space = tcp_space(sk); in __tcp_select_window()
2963 int allowed_space = tcp_full_space(sk); in __tcp_select_window()
2966 if (sk_is_mptcp(sk)) in __tcp_select_window()
2967 mptcp_space(sk, &free_space, &allowed_space); in __tcp_select_window()
2979 if (tcp_under_memory_pressure(sk)) in __tcp_select_window()
3049 static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
3051 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans()
3066 tcp_highest_sack_replace(sk, next_skb, skb); in tcp_collapse_retrans()
3085 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); in tcp_collapse_retrans()
3089 tcp_rtx_queue_unlink_and_free(next_skb, sk); in tcp_collapse_retrans()
3094 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
3110 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, in tcp_retrans_try_collapse() argument
3113 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse()
3117 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) in tcp_retrans_try_collapse()
3123 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
3142 if (!tcp_collapse_retrans(sk, to)) in tcp_retrans_try_collapse()
3151 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) in __tcp_retransmit_skb() argument
3153 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_retransmit_skb()
3154 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb()
3166 if (refcount_read(&sk->sk_wmem_alloc) > in __tcp_retransmit_skb()
3167 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), in __tcp_retransmit_skb()
3168 sk->sk_sndbuf)) in __tcp_retransmit_skb()
3171 if (skb_still_in_host_queue(sk, skb)) in __tcp_retransmit_skb()
3185 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
3189 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
3192 cur_mss = tcp_current_mss(sk); in __tcp_retransmit_skb()
3213 if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, in __tcp_retransmit_skb()
3224 tcp_adjust_pcount(sk, skb, diff); in __tcp_retransmit_skb()
3227 tcp_retrans_try_collapse(sk, skb, avail_wnd); in __tcp_retransmit_skb()
3232 tcp_ecn_clear_syn(sk, skb); in __tcp_retransmit_skb()
3236 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); in __tcp_retransmit_skb()
3238 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in __tcp_retransmit_skb()
3254 err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC); in __tcp_retransmit_skb()
3261 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
3262 tcp_rate_skb_sent(sk, skb); in __tcp_retransmit_skb()
3265 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
3274 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, in __tcp_retransmit_skb()
3278 trace_tcp_retransmit_skb(sk, skb); in __tcp_retransmit_skb()
3280 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); in __tcp_retransmit_skb()
3285 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) in tcp_retransmit_skb() argument
3287 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb()
3288 int err = __tcp_retransmit_skb(sk, skb, segs); in tcp_retransmit_skb()
3315 void tcp_xmit_retransmit_queue(struct sock *sk) in tcp_xmit_retransmit_queue() argument
3317 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_xmit_retransmit_queue()
3319 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue()
3327 rtx_head = tcp_rtx_queue_head(sk); in tcp_xmit_retransmit_queue()
3329 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); in tcp_xmit_retransmit_queue()
3334 if (tcp_pacing_check(sk)) in tcp_xmit_retransmit_queue()
3367 if (tcp_small_queue_check(sk, skb, 1)) in tcp_xmit_retransmit_queue()
3370 if (tcp_retransmit_skb(sk, skb, segs)) in tcp_xmit_retransmit_queue()
3373 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); in tcp_xmit_retransmit_queue()
3375 if (tcp_in_cwnd_reduction(sk)) in tcp_xmit_retransmit_queue()
3384 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_xmit_retransmit_queue()
3385 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
3396 void sk_forced_mem_schedule(struct sock *sk, int size) in sk_forced_mem_schedule() argument
3400 delta = size - sk->sk_forward_alloc; in sk_forced_mem_schedule()
3404 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; in sk_forced_mem_schedule()
3405 sk_memory_allocated_add(sk, amt); in sk_forced_mem_schedule()
3407 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in sk_forced_mem_schedule()
3408 mem_cgroup_charge_skmem(sk->sk_memcg, amt); in sk_forced_mem_schedule()
3414 void tcp_send_fin(struct sock *sk) in tcp_send_fin() argument
3416 struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk); in tcp_send_fin()
3417 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin()
3425 if (!tskb && tcp_under_memory_pressure(sk)) in tcp_send_fin()
3426 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3443 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
3449 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3453 tcp_queue_skb(sk, skb); in tcp_send_fin()
3455 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); in tcp_send_fin()
3463 void tcp_send_active_reset(struct sock *sk, gfp_t priority) in tcp_send_active_reset() argument
3467 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); in tcp_send_active_reset()
3472 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
3478 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
3480 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_send_active_reset()
3482 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
3483 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
3488 trace_tcp_send_reset(sk, NULL); in tcp_send_active_reset()
3497 int tcp_send_synack(struct sock *sk) in tcp_send_synack() argument
3501 skb = tcp_rtx_queue_head(sk); in tcp_send_synack()
3516 tcp_highest_sack_replace(sk, skb, nskb); in tcp_send_synack()
3517 tcp_rtx_queue_unlink_and_free(skb, sk); in tcp_send_synack()
3519 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3520 sk_wmem_queued_add(sk, nskb->truesize); in tcp_send_synack()
3521 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3526 tcp_ecn_send_synack(sk, skb); in tcp_send_synack()
3528 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
3541 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, in tcp_make_synack() argument
3548 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack()
3579 skb_set_owner_w(skb, (struct sock *)sk); in tcp_make_synack()
3601 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); in tcp_make_synack()
3606 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, in tcp_make_synack()
3630 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); in tcp_make_synack()
3640 bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, in tcp_make_synack()
3650 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) in tcp_ca_dst_init() argument
3652 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_dst_init()
3670 static void tcp_connect_init(struct sock *sk) in tcp_connect_init() argument
3672 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_connect_init()
3673 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init()
3681 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) in tcp_connect_init()
3685 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3693 tcp_mtup_init(sk); in tcp_connect_init()
3694 tcp_sync_mss(sk, dst_mtu(dst)); in tcp_connect_init()
3696 tcp_ca_dst_init(sk, dst); in tcp_connect_init()
3702 tcp_initialize_rcv_mss(sk); in tcp_connect_init()
3705 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3706 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3707 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3709 rcv_wnd = tcp_rwnd_init_bpf(sk); in tcp_connect_init()
3713 tcp_select_initial_window(sk, tcp_full_space(sk), in tcp_connect_init()
3717 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), in tcp_connect_init()
3724 sk->sk_err = 0; in tcp_connect_init()
3725 sock_reset_flag(sk, SOCK_DONE); in tcp_connect_init()
3728 tcp_write_queue_purge(sk); in tcp_connect_init()
3741 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
3742 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3746 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
3748 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb()
3753 sk_wmem_queued_add(sk, skb->truesize); in tcp_connect_queue_skb()
3754 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3766 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) in tcp_send_syn_data() argument
3768 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_syn_data()
3769 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data()
3775 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3784 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_send_syn_data()
3786 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - in tcp_send_syn_data()
3794 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); in tcp_send_syn_data()
3818 tcp_connect_queue_skb(sk, syn_data); in tcp_send_syn_data()
3820 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_send_syn_data()
3822 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3835 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
3836 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); in tcp_send_syn_data()
3841 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
3848 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
3857 int tcp_connect(struct sock *sk) in tcp_connect() argument
3859 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect()
3863 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL); in tcp_connect()
3865 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
3868 tcp_connect_init(sk); in tcp_connect()
3871 tcp_finish_connect(sk, NULL); in tcp_connect()
3875 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); in tcp_connect()
3882 tcp_connect_queue_skb(sk, buff); in tcp_connect()
3883 tcp_ecn_send_syn(sk, buff); in tcp_connect()
3884 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
3887 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3888 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3897 buff = tcp_send_head(sk); in tcp_connect()
3902 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); in tcp_connect()
3905 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_connect()
3906 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
3915 void tcp_send_delayed_ack(struct sock *sk) in tcp_send_delayed_ack() argument
3917 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_delayed_ack()
3922 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack()
3925 if (inet_csk_in_pingpong_mode(sk) || in tcp_send_delayed_ack()
3946 ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max); in tcp_send_delayed_ack()
3955 tcp_send_ack(sk); in tcp_send_delayed_ack()
3964 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3968 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) in __tcp_send_ack() argument
3973 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
3981 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); in __tcp_send_ack()
3983 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_send_ack()
3989 inet_csk_schedule_ack(sk); in __tcp_send_ack()
3991 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX); in __tcp_send_ack()
3997 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); in __tcp_send_ack()
4006 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); in __tcp_send_ack()
4010 void tcp_send_ack(struct sock *sk) in tcp_send_ack() argument
4012 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); in tcp_send_ack()
4026 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) in tcp_xmit_probe_skb() argument
4028 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb()
4033 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); in tcp_xmit_probe_skb()
4044 NET_INC_STATS(sock_net(sk), mib); in tcp_xmit_probe_skb()
4045 return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); in tcp_xmit_probe_skb()
4049 void tcp_send_window_probe(struct sock *sk) in tcp_send_window_probe() argument
4051 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
4052 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
4053 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_send_window_probe()
4054 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); in tcp_send_window_probe()
4059 int tcp_write_wakeup(struct sock *sk, int mib) in tcp_write_wakeup() argument
4061 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup()
4064 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
4067 skb = tcp_send_head(sk); in tcp_write_wakeup()
4070 unsigned int mss = tcp_current_mss(sk); in tcp_write_wakeup()
4084 if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, in tcp_write_wakeup()
4091 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
4093 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()
4097 tcp_xmit_probe_skb(sk, 1, mib); in tcp_write_wakeup()
4098 return tcp_xmit_probe_skb(sk, 0, mib); in tcp_write_wakeup()
4105 void tcp_send_probe0(struct sock *sk) in tcp_send_probe0() argument
4107 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_probe0()
4108 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0()
4109 struct net *net = sock_net(sk); in tcp_send_probe0()
4113 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); in tcp_send_probe0()
4115 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
4127 timeout = tcp_probe0_when(sk, TCP_RTO_MAX); in tcp_send_probe0()
4135 timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); in tcp_send_probe0()
4136 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX); in tcp_send_probe0()
4139 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) in tcp_rtx_synack() argument
4146 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, in tcp_rtx_synack()
4149 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); in tcp_rtx_synack()
4150 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in tcp_rtx_synack()
4151 if (unlikely(tcp_passive_fastopen(sk))) in tcp_rtx_synack()
4152 tcp_sk(sk)->total_retrans++; in tcp_rtx_synack()
4153 trace_tcp_retransmit_synack(sk, req); in tcp_rtx_synack()