Home
last modified time | relevance | path

Searched refs:tcp_sock (Results 1 – 25 of 40) sorted by relevance

12

/net/ipv4/
Dtcp_input.c326 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr()
346 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr()
353 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce()
389 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack()
395 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn()
401 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo()
415 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand()
474 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window()
511 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window()
543 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space()
[all …]
Dtcp_recovery.c12 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_wnd()
37 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) in tcp_rack_skb_timeout()
65 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_detect_loss()
102 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost()
123 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance()
156 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_reo_timeout()
194 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_update_reo_wnd()
225 struct tcp_sock *tp = tcp_sk(sk); in tcp_newreno_mark_lost()
Dbpf_tcp_ca.c125 case offsetof(struct tcp_sock, snd_cwnd): in bpf_tcp_ca_btf_struct_access()
126 end = offsetofend(struct tcp_sock, snd_cwnd); in bpf_tcp_ca_btf_struct_access()
128 case offsetof(struct tcp_sock, snd_cwnd_cnt): in bpf_tcp_ca_btf_struct_access()
129 end = offsetofend(struct tcp_sock, snd_cwnd_cnt); in bpf_tcp_ca_btf_struct_access()
131 case offsetof(struct tcp_sock, snd_ssthresh): in bpf_tcp_ca_btf_struct_access()
132 end = offsetofend(struct tcp_sock, snd_ssthresh); in bpf_tcp_ca_btf_struct_access()
134 case offsetof(struct tcp_sock, ecn_flags): in bpf_tcp_ca_btf_struct_access()
135 end = offsetofend(struct tcp_sock, ecn_flags); in bpf_tcp_ca_btf_struct_access()
152 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt) in BPF_CALL_2() argument
Dtcp_output.c53 void tcp_mstamp_refresh(struct tcp_sock *tp) in tcp_mstamp_refresh()
68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
97 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
161 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent()
182 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent()
258 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
308 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack()
321 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
[all …]
Dtcp_dctcp.c67 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset()
77 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init()
105 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh()
113 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha()
147 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss()
185 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_get_info()
212 struct tcp_sock *tp = tcp_sk(sk); in dctcp_cwnd_undo()
Dtcp_bbr.c266 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt()
285 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate()
303 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal()
320 struct tcp_sock *tp = tcp_sk(sk); in bbr_save_cwnd()
331 struct tcp_sock *tp = tcp_sk(sk); in bbr_cwnd_event()
436 struct tcp_sock *tp = tcp_sk(sk); in bbr_packets_in_net_at_edt()
480 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd_to_recover_or_restore()
519 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_cwnd()
554 struct tcp_sock *tp = tcp_sk(sk); in bbr_is_next_cycle_phase()
590 struct tcp_sock *tp = tcp_sk(sk); in bbr_advance_cycle_phase()
[all …]
Dtcp_cdg.c143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update()
244 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff()
265 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid()
302 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_acked()
331 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_ssthresh()
348 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cwnd_event()
376 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_init()
Dtcp_rate.c42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent()
82 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_delivered()
118 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_gen()
190 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_check_app_limited()
Dtcp_timer.c104 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources()
234 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout()
360 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer()
410 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer()
443 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rtx_probe0_timed_out()
470 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer()
699 struct tcp_sock *tp = tcp_sk(sk); in tcp_keepalive_timer()
784 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer); in tcp_compressed_ack_kick()
Dtcp_highspeed.c102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init()
114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid()
153 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
Dtcp_westwood.c165 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw()
182 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count()
219 const struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_bw_rttmin()
242 struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_event()
Dtcp_illinois.c59 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset()
224 struct tcp_sock *tp = tcp_sk(sk); in update_params()
262 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_cong_avoid()
297 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_ssthresh()
Dtcp_cubic.c118 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset()
324 struct tcp_sock *tp = tcp_sk(sk); in cubictcp_cong_avoid()
341 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_recalc_ssthresh()
386 struct tcp_sock *tp = tcp_sk(sk); in hystart_update()
448 const struct tcp_sock *tp = tcp_sk(sk); in cubictcp_acked()
Dtcp_cong.c396 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start()
410 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai()
438 struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_cong_avoid()
457 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_ssthresh()
465 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_undo_cwnd()
Dtcp_scalable.c20 struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_cong_avoid()
36 const struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_ssthresh()
Dtcp_yeah.c43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init()
60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid()
189 const struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_ssthresh()
Dtcp.c398 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) in tcp_compute_delivery_rate()
419 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
504 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll()
606 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
650 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) in tcp_mark_push()
656 static inline bool forced_push(const struct tcp_sock *tp) in forced_push()
663 struct tcp_sock *tp = tcp_sk(sk); in tcp_skb_entail()
680 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) in tcp_mark_urg()
708 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
919 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
[all …]
Dtcp_vegas.c73 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable()
160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) in tcp_vegas_ssthresh()
167 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid()
Dtcp_nv.c126 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_reset()
183 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_cong_avoid()
211 const struct tcp_sock *tp = tcp_sk(sk); in tcpnv_recalc_ssthresh()
243 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_acked()
Dtcp_lp.c139 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_remote_hz_estimator()
193 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_owd_calculator()
273 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_pkts_acked()
Dtcp_fastopen.c177 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_add_skb()
248 struct tcp_sock *tp; in tcp_fastopen_create_child()
454 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_defer_connect()
561 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_active_disable_ofo_check()
588 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_active_detect_blackhole()
Dtcp_minisocks.c256 const struct tcp_sock *tp = tcp_sk(sk); in tcp_time_wait()
365 const struct tcp_sock *tp = tcp_sk(sk_listener); in tcp_openreq_init_rwin()
400 static void tcp_ecn_openreq_child(struct tcp_sock *tp, in tcp_ecn_openreq_child()
435 static void smc_check_reset_syn_req(struct tcp_sock *oldtp, in smc_check_reset_syn_req()
437 struct tcp_sock *newtp) in smc_check_reset_syn_req()
464 struct tcp_sock *oldtp, *newtp; in tcp_create_openreq_child()
Dtcp_htcp.c105 const struct tcp_sock *tp = tcp_sk(sk); in measure_achieved_throughput()
224 const struct tcp_sock *tp = tcp_sk(sk); in htcp_recalc_ssthresh()
233 struct tcp_sock *tp = tcp_sk(sk); in htcp_cong_avoid()
Dtcp_hybla.c48 struct tcp_sock *tp = tcp_sk(sk); in hybla_init()
92 struct tcp_sock *tp = tcp_sk(sk); in hybla_cong_avoid()
Dtcp_veno.c121 struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_cong_avoid()
197 const struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_ssthresh()

12