• Home
  • Raw
  • Download

Lines Matching refs:icsk

136 	struct inet_connection_sock *icsk = inet_csk(sk);  in tcp_measure_rcv_mss()  local
137 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss()
140 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss()
146 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss()
147 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss()
168 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss()
170 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss()
174 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss()
175 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss()
176 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss()
182 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() local
183 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
188 if (quickacks > icsk->icsk_ack.quick) in tcp_incr_quickack()
189 icsk->icsk_ack.quick = quickacks; in tcp_incr_quickack()
194 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode() local
197 icsk->icsk_ack.pingpong = 0; in tcp_enter_quickack_mode()
198 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_enter_quickack_mode()
208 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode() local
212 (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); in tcp_in_quickack_mode()
454 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window() local
456 icsk->icsk_ack.quick = 0; in tcp_clamp_window()
644 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv() local
655 if (!icsk->icsk_ack.ato) { in tcp_event_data_recv()
660 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_event_data_recv()
662 int m = now - icsk->icsk_ack.lrcvtime; in tcp_event_data_recv()
666 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; in tcp_event_data_recv()
667 } else if (m < icsk->icsk_ack.ato) { in tcp_event_data_recv()
668 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; in tcp_event_data_recv()
669 if (icsk->icsk_ack.ato > icsk->icsk_rto) in tcp_event_data_recv()
670 icsk->icsk_ack.ato = icsk->icsk_rto; in tcp_event_data_recv()
671 } else if (m > icsk->icsk_rto) { in tcp_event_data_recv()
679 icsk->icsk_ack.lrcvtime = now; in tcp_event_data_recv()
1904 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss() local
1907 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; in tcp_enter_loss()
1911 if (icsk->icsk_ca_state <= TCP_CA_Disorder || in tcp_enter_loss()
1913 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { in tcp_enter_loss()
1915 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1955 if (icsk->icsk_ca_state <= TCP_CA_Disorder && in tcp_enter_loss()
1968 (new_recovery || icsk->icsk_retransmits) && in tcp_enter_loss()
2387 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction() local
2389 if (icsk->icsk_ca_ops->undo_cwnd) in tcp_undo_cwnd_reduction()
2390 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2592 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed() local
2594 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; in tcp_mtup_probe_failed()
2595 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_failed()
2602 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success() local
2608 icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2613 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2614 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_success()
2615 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2625 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit() local
2659 if (icsk->icsk_ca_state != TCP_CA_Loss) { in tcp_simple_retransmit()
2792 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert() local
2817 if (icsk->icsk_ca_state == TCP_CA_Open) { in tcp_fastretrans_alert()
2821 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
2847 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
2866 if (icsk->icsk_ca_state != TCP_CA_Open && in tcp_fastretrans_alert()
2878 if (icsk->icsk_ca_state <= TCP_CA_Disorder) in tcp_fastretrans_alert()
2887 if (icsk->icsk_ca_state < TCP_CA_CWR && in tcp_fastretrans_alert()
2888 icsk->icsk_mtup.probe_size && in tcp_fastretrans_alert()
3029 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid() local
3031 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3040 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto() local
3054 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_rearm_rto()
3055 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_rearm_rto()
3132 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue() local
3243 if (unlikely(icsk->icsk_mtup.probe_size && in tcp_clean_rtx_queue()
3282 if (icsk->icsk_ca_ops->pkts_acked) in tcp_clean_rtx_queue()
3283 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us); in tcp_clean_rtx_queue()
3290 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3293 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3298 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3303 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3314 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe() local
3319 icsk->icsk_backoff = 0; in tcp_ack_probe()
3556 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event() local
3558 if (icsk->icsk_ca_ops->in_ack_event) in tcp_in_ack_event()
3559 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3565 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack() local
3601 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_ack()
3602 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_ack()
3607 icsk->icsk_retransmits = 0; in tcp_ack()
3659 icsk->icsk_probes_out = 0; in tcp_ack()
3688 if (icsk->icsk_pending == ICSK_TIME_RETRANS) in tcp_ack()
5590 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect() local
5593 icsk->icsk_ack.lrcvtime = tcp_time_stamp; in tcp_finish_connect()
5596 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5601 icsk->icsk_af_ops->rebuild_header(sk); in tcp_finish_connect()
5681 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process() local
5776 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5798 icsk->icsk_accept_queue.rskq_defer_accept || in tcp_rcv_synsent_state_process()
5799 icsk->icsk_ack.pingpong) { in tcp_rcv_synsent_state_process()
5868 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5914 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process() local
5936 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
6012 icsk->icsk_af_ops->rebuild_header(sk); in tcp_rcv_state_process()