Home
last modified time | relevance | path

Searched refs:tcp_sk (Results 1 – 25 of 46) sorted by relevance

12

/net/ipv4/
Dtcp_timer.c32 start_ts = tcp_sk(sk)->retrans_stamp; in tcp_clamp_rto_to_user_timeout()
35 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; in tcp_clamp_rto_to_user_timeout()
104 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources()
218 start_ts = tcp_sk(sk)->retrans_stamp; in retransmits_timed_out()
227 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; in retransmits_timed_out()
234 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout()
317 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_delack_timer_handler()
360 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer()
410 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer()
443 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rtx_probe0_timed_out()
[all …]
Dtcp_bbr.c244 unsigned int mss = tcp_sk(sk)->mss_cache; in bbr_rate_bytes_per_sec()
266 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt()
285 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate()
303 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal()
320 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd()
331 struct tcp_sock *tp = tcp_sk(sk); in bbr_cwnd_event()
436 struct tcp_sock *tp = tcp_sk(sk); in bbr_packets_in_net_at_edt()
480 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd_to_recover_or_restore()
519 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd()
554 struct tcp_sock *tp = tcp_sk(sk); in bbr_is_next_cycle_phase()
[all …]
Dtcp_input.c146 bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown && in bpf_skops_parse_hdr()
147 BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_parse_hdr()
149 bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_parse_hdr()
242 tcp_sk(sk)->advmss); in tcp_measure_rcv_mss()
279 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss()
295 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
335 tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR; in tcp_ecn_accept_cwr()
353 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce()
385 if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) in tcp_ecn_check_ce()
415 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand()
[all …]
Dtcp_output.c68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
97 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
182 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent()
258 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
308 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack()
321 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
365 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send()
478 if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), in bpf_skops_hdr_opt_len()
[all …]
Dtcp_recovery.c12 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd()
65 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss()
102 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost()
156 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_timeout()
194 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_update_reo_wnd()
225 struct tcp_sock *tp = tcp_sk(sk); in tcp_newreno_mark_lost()
Dtcp_dctcp.c77 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init()
105 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh()
113 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha()
147 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss()
185 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_get_info()
212 struct tcp_sock *tp = tcp_sk(sk); in dctcp_cwnd_undo()
Dtcp_westwood.c73 w->snd_una = tcp_sk(sk)->snd_una; in tcp_westwood_init()
127 w->snd_una = tcp_sk(sk)->snd_una; in westwood_update_window()
165 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw()
182 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count()
219 const struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_bw_rttmin()
242 struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_event()
Dtcp_cubic.c113 return tcp_sk(sk)->tcp_mstamp; in bictcp_clock_us()
118 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset()
137 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in cubictcp_init()
147 delta = now - tcp_sk(sk)->lsndtime; in cubictcp_cwnd_event()
324 struct tcp_sock *tp = tcp_sk(sk); in cubictcp_cong_avoid()
341 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_recalc_ssthresh()
386 struct tcp_sock *tp = tcp_sk(sk); in hystart_update()
448 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_acked()
Dtcp_cdg.c143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update()
244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff()
265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid()
302 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_acked()
331 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_ssthresh()
348 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cwnd_event()
376 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_init()
Dtcp_fastopen.c177 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_add_skb()
266 tp = tcp_sk(child); in tcp_fastopen_create_child()
344 tcp_sk(sk)->fastopen_no_cookie || in tcp_fastopen_no_cookie()
440 tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE; in tcp_fastopen_cookie_check()
454 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_defer_connect()
549 tcp_sk(sk)->syn_fastopen_ch = 1; in tcp_fastopen_active_should_disable()
561 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_active_disable_ofo_check()
588 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_active_detect_blackhole()
Dtcp_rate.c42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent()
82 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered()
118 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_gen()
190 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_check_app_limited()
Dtcp_highspeed.c102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init()
114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid()
153 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
Dtcp.c421 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
506 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll()
608 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
665 struct tcp_sock *tp = tcp_sk(sk); in tcp_skb_entail()
710 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
921 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
975 struct tcp_sock *tp = tcp_sk(sk); in tcp_build_frag()
1036 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages()
1165 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen()
1211 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_locked()
[all …]
Dtcp_illinois.c59 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset()
224 struct tcp_sock *tp = tcp_sk(sk); in update_params()
262 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_cong_avoid()
297 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_ssthresh()
Dtcp_hybla.c39 tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), in hybla_recalc_param()
48 struct tcp_sock *tp = tcp_sk(sk); in hybla_init()
92 struct tcp_sock *tp = tcp_sk(sk); in hybla_cong_avoid()
Dtcp_dctcp.h6 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ece_ack_cwr()
35 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; in dctcp_ece_ack_update()
Dtcp_scalable.c20 struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_cong_avoid()
36 const struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_ssthresh()
Dtcp_yeah.c43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid()
189 const struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_ssthresh()
Dtcp_bic.c77 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in bictcp_init()
142 struct tcp_sock *tp = tcp_sk(sk); in bictcp_cong_avoid()
163 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_recalc_ssthresh()
Dtcp_diag.c27 const struct tcp_sock *tp = tcp_sk(sk); in tcp_diag_get_info()
123 md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info); in tcp_diag_get_aux()
156 md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info); in tcp_diag_get_aux_size()
Dtcp_nv.c126 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_reset()
183 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_cong_avoid()
211 const struct tcp_sock *tp = tcp_sk(sk); in tcpnv_recalc_ssthresh()
243 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_acked()
Dtcp_lp.c139 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_remote_hz_estimator()
193 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_owd_calculator()
273 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_pkts_acked()
Dtcp_cong.c181 tcp_sk(sk)->prior_ssthresh = 0; in tcp_init_congestion_control()
438 struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_cong_avoid()
457 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_ssthresh()
465 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_undo_cwnd()
/net/mptcp/
Dprotocol.c477 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_subflow_could_cleanup()
579 tp = tcp_sk(ssk); in __mptcp_move_skbs_from_subflow()
1353 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1360 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1410 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); in mptcp_sendmsg_frag()
1470 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); in mptcp_subflow_active()
1524 if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd) in mptcp_subflow_get_send()
1547 tcp_sk(msk->last_snd)->snd_wnd); in mptcp_subflow_get_send()
1557 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); in mptcp_push_release()
1702 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_subflow_push_pending()
[all …]
/net/ipv6/
Dtcp_ipv6.c152 struct tcp_sock *tp = tcp_sk(sk); in tcp_v6_connect()
359 mtu = READ_ONCE(tcp_sk(sk)->mtu_info); in tcp_v6_mtu_reduced()
364 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache) in tcp_v6_mtu_reduced()
425 tp = tcp_sk(sk); in tcp_v6_err()
901 struct sock *ctl_sk = net->ipv6.tcp_sk; in tcp_v6_send_response()
1170 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, in tcp_v6_reqsk_send_ack()
1272 newtp = tcp_sk(newsk); in tcp_v6_syn_recv_sock()
1339 newtp = tcp_sk(newsk); in tcp_v6_syn_recv_sock()
1397 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); in tcp_v6_syn_recv_sock()
1579 tp = tcp_sk(sk); in tcp_v6_do_rcv()
[all …]

12