Lines Matching refs:sk
60 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
64 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) in tcp_event_new_data_sent() argument
66 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
67 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
72 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
73 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
80 tcp_rearm_rto(sk); in tcp_event_new_data_sent()
82 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, in tcp_event_new_data_sent()
93 static inline __u32 tcp_acceptable_seq(const struct sock *sk) in tcp_acceptable_seq() argument
95 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
119 static __u16 tcp_advertise_mss(struct sock *sk) in tcp_advertise_mss() argument
121 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
122 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_advertise_mss()
140 void tcp_cwnd_restart(struct sock *sk, s32 delta) in tcp_cwnd_restart() argument
142 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
143 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
146 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); in tcp_cwnd_restart()
148 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
151 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
160 struct sock *sk) in tcp_event_data_sent() argument
162 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
166 tcp_ca_event(sk, CA_EVENT_TX_START); in tcp_event_data_sent()
175 inet_csk_inc_pingpong_cnt(sk); in tcp_event_data_sent()
181 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, in tcp_event_ack_sent() argument
184 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent()
187 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, in tcp_event_ack_sent()
191 __sock_put(sk); in tcp_event_ack_sent()
196 tcp_dec_quickack_mode(sk, pkts); in tcp_event_ack_sent()
197 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); in tcp_event_ack_sent()
207 void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, in tcp_select_initial_window() argument
231 if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) in tcp_select_initial_window()
242 space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); in tcp_select_initial_window()
258 static u16 tcp_select_window(struct sock *sk) in tcp_select_window() argument
260 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
263 u32 new_win = __tcp_select_window(sk); in tcp_select_window()
275 NET_INC_STATS(sock_net(sk), in tcp_select_window()
286 sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) in tcp_select_window()
298 NET_INC_STATS(sock_net(sk), in tcp_select_window()
301 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); in tcp_select_window()
308 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
310 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack()
315 else if (tcp_ca_needs_ecn(sk) || in tcp_ecn_send_synack()
316 tcp_bpf_ca_needs_ecn(sk)) in tcp_ecn_send_synack()
317 INET_ECN_xmit(sk); in tcp_ecn_send_synack()
321 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
323 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
324 bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); in tcp_ecn_send_syn()
325 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || in tcp_ecn_send_syn()
326 tcp_ca_needs_ecn(sk) || bpf_needs_ecn; in tcp_ecn_send_syn()
329 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_ecn_send_syn()
340 if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn) in tcp_ecn_send_syn()
341 INET_ECN_xmit(sk); in tcp_ecn_send_syn()
345 static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_clear_syn() argument
347 if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) in tcp_ecn_clear_syn()
364 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, in tcp_ecn_send() argument
367 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send()
373 INET_ECN_xmit(sk); in tcp_ecn_send()
379 } else if (!tcp_ca_needs_ecn(sk)) { in tcp_ecn_send()
381 INET_ECN_dontxmit(sk); in tcp_ecn_send()
590 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
594 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options()
602 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
619 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
622 if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { in tcp_syn_options()
628 if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { in tcp_syn_options()
633 if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { in tcp_syn_options()
660 static unsigned int tcp_synack_options(const struct sock *sk, in tcp_synack_options() argument
717 smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); in tcp_synack_options()
725 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
729 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options()
739 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
790 static void tcp_tsq_write(struct sock *sk) in tcp_tsq_write() argument
792 if ((1 << sk->sk_state) & in tcp_tsq_write()
795 struct tcp_sock *tp = tcp_sk(sk); in tcp_tsq_write()
800 tcp_xmit_retransmit_queue(sk); in tcp_tsq_write()
803 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
808 static void tcp_tsq_handler(struct sock *sk) in tcp_tsq_handler() argument
810 bh_lock_sock(sk); in tcp_tsq_handler()
811 if (!sock_owned_by_user(sk)) in tcp_tsq_handler()
812 tcp_tsq_write(sk); in tcp_tsq_handler()
813 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
814 sock_hold(sk); in tcp_tsq_handler()
815 bh_unlock_sock(sk); in tcp_tsq_handler()
830 struct sock *sk; in tcp_tasklet_func() local
840 sk = (struct sock *)tp; in tcp_tasklet_func()
842 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tasklet_func()
844 tcp_tsq_handler(sk); in tcp_tasklet_func()
845 sk_free(sk); in tcp_tasklet_func()
860 void tcp_release_cb(struct sock *sk) in tcp_release_cb() argument
866 flags = sk->sk_tsq_flags; in tcp_release_cb()
870 } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); in tcp_release_cb()
873 tcp_tsq_write(sk); in tcp_release_cb()
874 __sock_put(sk); in tcp_release_cb()
885 sock_release_ownership(sk); in tcp_release_cb()
888 tcp_write_timer_handler(sk); in tcp_release_cb()
889 __sock_put(sk); in tcp_release_cb()
892 tcp_delack_timer_handler(sk); in tcp_release_cb()
893 __sock_put(sk); in tcp_release_cb()
896 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
897 __sock_put(sk); in tcp_release_cb()
923 struct sock *sk = skb->sk; in tcp_wfree() local
924 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree()
930 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
939 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
942 for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { in tcp_wfree()
950 nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); in tcp_wfree()
965 sk_free(sk); in tcp_wfree()
974 struct sock *sk = (struct sock *)tp; in tcp_pace_kick() local
976 tcp_tsq_handler(sk); in tcp_pace_kick()
977 sock_put(sk); in tcp_pace_kick()
982 static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, in tcp_update_skb_after_send() argument
985 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_skb_after_send()
987 if (sk->sk_pacing_status != SK_PACING_NONE) { in tcp_update_skb_after_send()
988 unsigned long rate = sk->sk_pacing_rate; in tcp_update_skb_after_send()
1017 static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, in __tcp_transmit_skb() argument
1020 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb()
1033 tp = tcp_sk(sk); in __tcp_transmit_skb()
1053 inet = inet_sk(sk); in __tcp_transmit_skb()
1058 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in __tcp_transmit_skb()
1060 tcp_options_size = tcp_established_options(sk, skb, &opts, in __tcp_transmit_skb()
1082 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in __tcp_transmit_skb()
1095 skb->sk = sk; in __tcp_transmit_skb()
1097 skb_set_hash_from_sk(skb, sk); in __tcp_transmit_skb()
1098 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1100 skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); in __tcp_transmit_skb()
1126 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1128 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1129 tcp_ecn_send(sk, skb, th, tcp_header_size); in __tcp_transmit_skb()
1139 sk_nocaps_add(sk, NETIF_F_GSO_MASK); in __tcp_transmit_skb()
1141 md5, sk, skb); in __tcp_transmit_skb()
1145 icsk->icsk_af_ops->send_check(sk, skb); in __tcp_transmit_skb()
1148 tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); in __tcp_transmit_skb()
1151 tcp_event_data_sent(tp, sk); in __tcp_transmit_skb()
1157 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, in __tcp_transmit_skb()
1173 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1176 tcp_enter_cwr(sk); in __tcp_transmit_skb()
1180 tcp_update_skb_after_send(sk, oskb, prior_wstamp); in __tcp_transmit_skb()
1181 tcp_rate_skb_sent(sk, oskb); in __tcp_transmit_skb()
1186 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
1189 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, in tcp_transmit_skb()
1190 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1198 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
1200 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb()
1205 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
1206 sk_wmem_queued_add(sk, skb->truesize); in tcp_queue_skb()
1207 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1228 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1230 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount()
1285 struct sock *sk, in tcp_insert_write_queue_after() argument
1289 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1291 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1299 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, in tcp_fragment() argument
1303 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment()
1322 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); in tcp_fragment()
1323 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1325 skb != tcp_rtx_queue_head(sk) && in tcp_fragment()
1326 skb != tcp_rtx_queue_tail(sk))) { in tcp_fragment()
1327 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); in tcp_fragment()
1335 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); in tcp_fragment()
1340 sk_wmem_queued_add(sk, buff->truesize); in tcp_fragment()
1341 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1382 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1387 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); in tcp_fragment()
1436 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1450 sk_wmem_queued_add(sk, -delta_truesize); in tcp_trim_head()
1451 sk_mem_uncharge(sk, delta_truesize); in tcp_trim_head()
1452 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_trim_head()
1463 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) in __tcp_mtu_to_mss() argument
1465 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss()
1466 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1476 const struct dst_entry *dst = __sk_dst_get(sk); in __tcp_mtu_to_mss()
1490 mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); in __tcp_mtu_to_mss()
1495 int tcp_mtu_to_mss(struct sock *sk, int pmtu) in tcp_mtu_to_mss() argument
1498 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1499 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1503 int tcp_mss_to_mtu(struct sock *sk, int mss) in tcp_mss_to_mtu() argument
1505 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu()
1506 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1516 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_mss_to_mtu()
1526 void tcp_mtup_init(struct sock *sk) in tcp_mtup_init() argument
1528 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init()
1529 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1530 struct net *net = sock_net(sk); in tcp_mtup_init()
1535 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); in tcp_mtup_init()
1564 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) in tcp_sync_mss() argument
1566 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss()
1567 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1573 mss_now = tcp_mtu_to_mss(sk, pmtu); in tcp_sync_mss()
1579 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1589 unsigned int tcp_current_mss(struct sock *sk) in tcp_current_mss() argument
1591 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss()
1592 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_current_mss()
1602 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1603 mss_now = tcp_sync_mss(sk, mtu); in tcp_current_mss()
1606 header_len = tcp_established_options(sk, NULL, &opts, &md5) + in tcp_current_mss()
1624 static void tcp_cwnd_application_limited(struct sock *sk) in tcp_cwnd_application_limited() argument
1626 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited()
1628 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1629 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1631 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1634 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1642 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) in tcp_cwnd_validate() argument
1644 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
1645 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate()
1657 if (tcp_is_cwnd_limited(sk)) { in tcp_cwnd_validate()
1666 if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && in tcp_cwnd_validate()
1667 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1669 tcp_cwnd_application_limited(sk); in tcp_cwnd_validate()
1678 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
1679 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
1680 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
1681 tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); in tcp_cwnd_validate()
1725 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, in tcp_tso_autosize() argument
1731 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), in tcp_tso_autosize()
1732 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); in tcp_tso_autosize()
1747 static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) in tcp_tso_segs() argument
1749 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
1753 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
1754 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs; in tcp_tso_segs()
1756 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); in tcp_tso_segs()
1757 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
1761 static unsigned int tcp_mss_split_point(const struct sock *sk, in tcp_mss_split_point() argument
1767 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point()
1773 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
1878 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
1887 return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, in tso_fragment()
1890 buff = sk_stream_alloc_skb(sk, 0, gfp, true); in tso_fragment()
1895 sk_wmem_queued_add(sk, buff->truesize); in tso_fragment()
1896 sk_mem_charge(sk, buff->truesize); in tso_fragment()
1925 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE); in tso_fragment()
1935 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in tcp_tso_should_defer() argument
1940 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_tso_should_defer()
1942 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer()
1976 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1979 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
2000 head = tcp_rtx_queue_head(sk); in tcp_tso_should_defer()
2037 static inline void tcp_mtu_check_reprobe(struct sock *sk) in tcp_mtu_check_reprobe() argument
2039 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_check_reprobe()
2040 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe()
2041 struct net *net = sock_net(sk); in tcp_mtu_check_reprobe()
2048 int mss = tcp_current_mss(sk); in tcp_mtu_check_reprobe()
2055 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2062 static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) in tcp_can_coalesce_send_queue_head() argument
2066 skb = tcp_send_head(sk); in tcp_can_coalesce_send_queue_head()
2067 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_can_coalesce_send_queue_head()
2089 static int tcp_mtu_probe(struct sock *sk) in tcp_mtu_probe() argument
2091 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_probe()
2092 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe()
2094 struct net *net = sock_net(sk); in tcp_mtu_probe()
2108 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2117 mss_now = tcp_current_mss(sk); in tcp_mtu_probe()
2118 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2126 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2131 tcp_mtu_check_reprobe(sk); in tcp_mtu_probe()
2152 if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) in tcp_mtu_probe()
2156 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); in tcp_mtu_probe()
2159 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2160 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2162 skb = tcp_send_head(sk); in tcp_mtu_probe()
2172 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
2173 tcp_highest_sack_replace(sk, skb, nskb); in tcp_mtu_probe()
2176 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
2189 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
2190 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
2213 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { in tcp_mtu_probe()
2217 tcp_event_new_data_sent(sk, nskb); in tcp_mtu_probe()
2219 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2229 static bool tcp_pacing_check(struct sock *sk) in tcp_pacing_check() argument
2231 struct tcp_sock *tp = tcp_sk(sk); in tcp_pacing_check()
2233 if (!tcp_needs_internal_pacing(sk)) in tcp_pacing_check()
2243 sock_hold(sk); in tcp_pacing_check()
2259 static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, in tcp_small_queue_check() argument
2266 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check()
2267 if (sk->sk_pacing_status == SK_PACING_NONE) in tcp_small_queue_check()
2269 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); in tcp_small_queue_check()
2273 tcp_sk(sk)->tcp_tx_delay) { in tcp_small_queue_check()
2274 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
2284 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2290 if (tcp_rtx_queue_empty(sk)) in tcp_small_queue_check()
2293 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2299 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2316 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) in tcp_chrono_start() argument
2318 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_start()
2329 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) in tcp_chrono_stop() argument
2331 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_stop()
2341 if (tcp_rtx_and_write_queues_empty(sk)) in tcp_chrono_stop()
2361 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, in tcp_write_xmit() argument
2364 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit()
2377 result = tcp_mtu_probe(sk); in tcp_write_xmit()
2385 max_segs = tcp_tso_segs(sk, mss_now); in tcp_write_xmit()
2386 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
2397 if (tcp_pacing_check(sk)) in tcp_write_xmit()
2419 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
2424 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, in tcp_write_xmit()
2431 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2438 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2441 if (tcp_small_queue_check(sk, skb, 0)) in tcp_write_xmit()
2452 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2459 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
2469 tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); in tcp_write_xmit()
2471 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); in tcp_write_xmit()
2474 if (tcp_in_cwnd_reduction(sk)) in tcp_write_xmit()
2479 tcp_schedule_loss_probe(sk, false); in tcp_write_xmit()
2481 tcp_cwnd_validate(sk, is_cwnd_limited); in tcp_write_xmit()
2484 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2487 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) in tcp_schedule_loss_probe() argument
2489 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_schedule_loss_probe()
2490 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe()
2500 early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; in tcp_schedule_loss_probe()
2526 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
2527 tcp_rto_delta_us(sk); /* How far in future is RTO? */ in tcp_schedule_loss_probe()
2531 tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, in tcp_schedule_loss_probe()
2540 static bool skb_still_in_host_queue(const struct sock *sk, in skb_still_in_host_queue() argument
2543 if (unlikely(skb_fclone_busy(sk, skb))) { in skb_still_in_host_queue()
2544 NET_INC_STATS(sock_net(sk), in skb_still_in_host_queue()
2554 void tcp_send_loss_probe(struct sock *sk) in tcp_send_loss_probe() argument
2556 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe()
2559 int mss = tcp_current_mss(sk); in tcp_send_loss_probe()
2561 skb = tcp_send_head(sk); in tcp_send_loss_probe()
2564 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); in tcp_send_loss_probe()
2569 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
2573 tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); in tcp_send_loss_probe()
2574 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2582 if (skb_still_in_host_queue(sk, skb)) in tcp_send_loss_probe()
2590 if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, in tcp_send_loss_probe()
2600 if (__tcp_retransmit_skb(sk, skb, 1)) in tcp_send_loss_probe()
2607 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); in tcp_send_loss_probe()
2609 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2611 tcp_rearm_rto(sk); in tcp_send_loss_probe()
2618 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, in __tcp_push_pending_frames() argument
2625 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2628 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, in __tcp_push_pending_frames()
2629 sk_gfp_mask(sk, GFP_ATOMIC))) in __tcp_push_pending_frames()
2630 tcp_check_probe_timer(sk); in __tcp_push_pending_frames()
2636 void tcp_push_one(struct sock *sk, unsigned int mss_now) in tcp_push_one() argument
2638 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one()
2642 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
2697 u32 __tcp_select_window(struct sock *sk) in __tcp_select_window() argument
2699 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_select_window()
2700 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window()
2708 int free_space = tcp_space(sk); in __tcp_select_window()
2709 int allowed_space = tcp_full_space(sk); in __tcp_select_window()
2721 if (tcp_under_memory_pressure(sk)) in __tcp_select_window()
2791 static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
2793 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans()
2808 tcp_highest_sack_replace(sk, next_skb, skb); in tcp_collapse_retrans()
2827 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); in tcp_collapse_retrans()
2831 tcp_rtx_queue_unlink_and_free(next_skb, sk); in tcp_collapse_retrans()
2836 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
2852 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, in tcp_retrans_try_collapse() argument
2855 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse()
2859 if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) in tcp_retrans_try_collapse()
2865 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
2884 if (!tcp_collapse_retrans(sk, to)) in tcp_retrans_try_collapse()
2893 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) in __tcp_retransmit_skb() argument
2895 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_retransmit_skb()
2896 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb()
2908 if (refcount_read(&sk->sk_wmem_alloc) > in __tcp_retransmit_skb()
2909 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), in __tcp_retransmit_skb()
2910 sk->sk_sndbuf)) in __tcp_retransmit_skb()
2913 if (skb_still_in_host_queue(sk, skb)) in __tcp_retransmit_skb()
2921 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2925 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
2928 cur_mss = tcp_current_mss(sk); in __tcp_retransmit_skb()
2941 if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, in __tcp_retransmit_skb()
2952 tcp_adjust_pcount(sk, skb, diff); in __tcp_retransmit_skb()
2954 tcp_retrans_try_collapse(sk, skb, cur_mss); in __tcp_retransmit_skb()
2959 tcp_ecn_clear_syn(sk, skb); in __tcp_retransmit_skb()
2963 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); in __tcp_retransmit_skb()
2965 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in __tcp_retransmit_skb()
2979 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : in __tcp_retransmit_skb()
2984 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
2985 tcp_rate_skb_sent(sk, skb); in __tcp_retransmit_skb()
2988 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
2997 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, in __tcp_retransmit_skb()
3001 trace_tcp_retransmit_skb(sk, skb); in __tcp_retransmit_skb()
3003 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); in __tcp_retransmit_skb()
3008 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) in tcp_retransmit_skb() argument
3010 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb()
3011 int err = __tcp_retransmit_skb(sk, skb, segs); in tcp_retransmit_skb()
3038 void tcp_xmit_retransmit_queue(struct sock *sk) in tcp_xmit_retransmit_queue() argument
3040 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_xmit_retransmit_queue()
3042 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue()
3049 rtx_head = tcp_rtx_queue_head(sk); in tcp_xmit_retransmit_queue()
3051 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); in tcp_xmit_retransmit_queue()
3056 if (tcp_pacing_check(sk)) in tcp_xmit_retransmit_queue()
3089 if (tcp_small_queue_check(sk, skb, 1)) in tcp_xmit_retransmit_queue()
3092 if (tcp_retransmit_skb(sk, skb, segs)) in tcp_xmit_retransmit_queue()
3095 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); in tcp_xmit_retransmit_queue()
3097 if (tcp_in_cwnd_reduction(sk)) in tcp_xmit_retransmit_queue()
3102 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_xmit_retransmit_queue()
3103 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
3116 void sk_forced_mem_schedule(struct sock *sk, int size) in sk_forced_mem_schedule() argument
3120 if (size <= sk->sk_forward_alloc) in sk_forced_mem_schedule()
3123 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; in sk_forced_mem_schedule()
3124 sk_memory_allocated_add(sk, amt); in sk_forced_mem_schedule()
3126 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in sk_forced_mem_schedule()
3127 mem_cgroup_charge_skmem(sk->sk_memcg, amt); in sk_forced_mem_schedule()
3133 void tcp_send_fin(struct sock *sk) in tcp_send_fin() argument
3135 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); in tcp_send_fin()
3136 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin()
3143 if (!tskb && tcp_under_memory_pressure(sk)) in tcp_send_fin()
3144 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3150 if (tcp_write_queue_empty(sk)) { in tcp_send_fin()
3161 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
3167 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3171 tcp_queue_skb(sk, skb); in tcp_send_fin()
3173 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); in tcp_send_fin()
3181 void tcp_send_active_reset(struct sock *sk, gfp_t priority) in tcp_send_active_reset() argument
3185 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); in tcp_send_active_reset()
3190 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
3196 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
3198 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_send_active_reset()
3200 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
3201 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
3206 trace_tcp_send_reset(sk, NULL); in tcp_send_active_reset()
3215 int tcp_send_synack(struct sock *sk) in tcp_send_synack() argument
3219 skb = tcp_rtx_queue_head(sk); in tcp_send_synack()
3234 tcp_highest_sack_replace(sk, skb, nskb); in tcp_send_synack()
3235 tcp_rtx_queue_unlink_and_free(skb, sk); in tcp_send_synack()
3237 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3238 sk_wmem_queued_add(sk, nskb->truesize); in tcp_send_synack()
3239 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3244 tcp_ecn_send_synack(sk, skb); in tcp_send_synack()
3246 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
3258 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, in tcp_make_synack() argument
3264 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack()
3295 skb_set_owner_w(skb, (struct sock *)sk); in tcp_make_synack()
3317 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); in tcp_make_synack()
3320 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, in tcp_make_synack()
3343 __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); in tcp_make_synack()
3360 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) in tcp_ca_dst_init() argument
3362 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_dst_init()
3380 static void tcp_connect_init(struct sock *sk) in tcp_connect_init() argument
3382 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_connect_init()
3383 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init()
3391 if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) in tcp_connect_init()
3395 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3403 tcp_mtup_init(sk); in tcp_connect_init()
3404 tcp_sync_mss(sk, dst_mtu(dst)); in tcp_connect_init()
3406 tcp_ca_dst_init(sk, dst); in tcp_connect_init()
3412 tcp_initialize_rcv_mss(sk); in tcp_connect_init()
3415 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3416 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3417 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3419 rcv_wnd = tcp_rwnd_init_bpf(sk); in tcp_connect_init()
3423 tcp_select_initial_window(sk, tcp_full_space(sk), in tcp_connect_init()
3427 sock_net(sk)->ipv4.sysctl_tcp_window_scaling, in tcp_connect_init()
3434 sk->sk_err = 0; in tcp_connect_init()
3435 sock_reset_flag(sk, SOCK_DONE); in tcp_connect_init()
3438 tcp_write_queue_purge(sk); in tcp_connect_init()
3451 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
3452 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3456 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
3458 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb()
3463 sk_wmem_queued_add(sk, skb->truesize); in tcp_connect_queue_skb()
3464 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3476 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) in tcp_send_syn_data() argument
3478 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data()
3484 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3493 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - in tcp_send_syn_data()
3501 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); in tcp_send_syn_data()
3525 tcp_connect_queue_skb(sk, syn_data); in tcp_send_syn_data()
3527 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_send_syn_data()
3529 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3542 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
3543 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); in tcp_send_syn_data()
3548 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
3555 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
3564 int tcp_connect(struct sock *sk) in tcp_connect() argument
3566 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect()
3570 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL); in tcp_connect()
3572 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
3575 tcp_connect_init(sk); in tcp_connect()
3578 tcp_finish_connect(sk, NULL); in tcp_connect()
3582 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); in tcp_connect()
3589 tcp_connect_queue_skb(sk, buff); in tcp_connect()
3590 tcp_ecn_send_syn(sk, buff); in tcp_connect()
3591 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
3594 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3595 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3604 buff = tcp_send_head(sk); in tcp_connect()
3609 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); in tcp_connect()
3612 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_connect()
3613 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
3622 void tcp_send_delayed_ack(struct sock *sk) in tcp_send_delayed_ack() argument
3624 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_delayed_ack()
3629 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack()
3632 if (inet_csk_in_pingpong_mode(sk) || in tcp_send_delayed_ack()
3663 tcp_send_ack(sk); in tcp_send_delayed_ack()
3672 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3676 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) in __tcp_send_ack() argument
3681 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
3689 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); in __tcp_send_ack()
3691 inet_csk_schedule_ack(sk); in __tcp_send_ack()
3692 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
3693 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in __tcp_send_ack()
3700 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); in __tcp_send_ack()
3709 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); in __tcp_send_ack()
3713 void tcp_send_ack(struct sock *sk) in tcp_send_ack() argument
3715 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); in tcp_send_ack()
3729 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) in tcp_xmit_probe_skb() argument
3731 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb()
3736 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); in tcp_xmit_probe_skb()
3747 NET_INC_STATS(sock_net(sk), mib); in tcp_xmit_probe_skb()
3748 return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); in tcp_xmit_probe_skb()
3752 void tcp_send_window_probe(struct sock *sk) in tcp_send_window_probe() argument
3754 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
3755 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
3756 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_send_window_probe()
3757 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); in tcp_send_window_probe()
3762 int tcp_write_wakeup(struct sock *sk, int mib) in tcp_write_wakeup() argument
3764 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup()
3767 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
3770 skb = tcp_send_head(sk); in tcp_write_wakeup()
3773 unsigned int mss = tcp_current_mss(sk); in tcp_write_wakeup()
3787 if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, in tcp_write_wakeup()
3794 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
3796 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()
3800 tcp_xmit_probe_skb(sk, 1, mib); in tcp_write_wakeup()
3801 return tcp_xmit_probe_skb(sk, 0, mib); in tcp_write_wakeup()
3808 void tcp_send_probe0(struct sock *sk) in tcp_send_probe0() argument
3810 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_probe0()
3811 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0()
3812 struct net *net = sock_net(sk); in tcp_send_probe0()
3816 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); in tcp_send_probe0()
3818 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
3829 timeout = tcp_probe0_when(sk, TCP_RTO_MAX); in tcp_send_probe0()
3836 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); in tcp_send_probe0()
3839 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) in tcp_rtx_synack() argument
3846 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); in tcp_rtx_synack()
3848 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); in tcp_rtx_synack()
3849 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in tcp_rtx_synack()
3850 if (unlikely(tcp_passive_fastopen(sk))) in tcp_rtx_synack()
3851 tcp_sk(sk)->total_retrans++; in tcp_rtx_synack()
3852 trace_tcp_retransmit_synack(sk, req); in tcp_rtx_synack()