• Home
  • Raw
  • Download

Lines Matching refs:icsk

74 	struct inet_connection_sock *icsk = inet_csk(sk);  in tcp_event_new_data_sent()  local
82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_event_new_data_sent()
83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_event_new_data_sent()
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() local
175 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
176 icsk->icsk_ack.pingpong = 1; in tcp_event_data_sent()
913 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb() local
1011 icsk->icsk_af_ops->send_check(sk, skb); in __tcp_transmit_skb()
1035 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1321 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss() local
1327 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1330 if (icsk->icsk_af_ops->net_frag_header_len) { in __tcp_mtu_to_mss()
1334 mss_now -= icsk->icsk_af_ops->net_frag_header_len; in __tcp_mtu_to_mss()
1342 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1362 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu() local
1367 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1368 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1371 if (icsk->icsk_af_ops->net_frag_header_len) { in tcp_mss_to_mtu()
1375 mtu += icsk->icsk_af_ops->net_frag_header_len; in tcp_mss_to_mtu()
1384 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init() local
1387 icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; in tcp_mtup_init()
1388 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1389 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1390 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); in tcp_mtup_init()
1391 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1392 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1393 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; in tcp_mtup_init()
1422 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss() local
1425 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1426 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
1432 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
1433 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
1434 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1792 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_tso_should_defer() local
1802 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
1870 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_check_reprobe() local
1877 delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
1882 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
1883 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
1885 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
1886 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
1889 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; in tcp_mtu_check_reprobe()
1905 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_probe() local
1919 if (!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
1920 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
1931 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
1932 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
1934 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
1939 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2033 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2195 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_schedule_loss_probe() local
2200 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) in tcp_schedule_loss_probe()
2203 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { in tcp_schedule_loss_probe()
2214 if (icsk->icsk_pending != ICSK_TIME_RETRANS) in tcp_schedule_loss_probe()
2423 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_select_window() local
2431 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
2443 icsk->icsk_ack.quick = 0; in __tcp_select_window()
2614 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_retransmit_skb() local
2619 if (icsk->icsk_mtup.probe_size) { in __tcp_retransmit_skb()
2620 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
2741 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_can_forward_retransmit() local
2745 if (icsk->icsk_ca_state != TCP_CA_Recovery) in tcp_can_forward_retransmit()
2776 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_xmit_retransmit_queue() local
2844 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3095 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_dst_init() local
3105 module_put(icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
3106 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
3107 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
3348 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_delayed_ack() local
3349 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
3356 if (icsk->icsk_ack.pingpong || in tcp_send_delayed_ack()
3357 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
3381 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
3385 if (icsk->icsk_ack.blocked || in tcp_send_delayed_ack()
3386 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { in tcp_send_delayed_ack()
3391 if (!time_before(timeout, icsk->icsk_ack.timeout)) in tcp_send_delayed_ack()
3392 timeout = icsk->icsk_ack.timeout; in tcp_send_delayed_ack()
3394 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; in tcp_send_delayed_ack()
3395 icsk->icsk_ack.timeout = timeout; in tcp_send_delayed_ack()
3396 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3533 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_probe0() local
3542 icsk->icsk_probes_out = 0; in tcp_send_probe0()
3543 icsk->icsk_backoff = 0; in tcp_send_probe0()
3548 if (icsk->icsk_backoff < sysctl_tcp_retries2) in tcp_send_probe0()
3549 icsk->icsk_backoff++; in tcp_send_probe0()
3550 icsk->icsk_probes_out++; in tcp_send_probe0()
3559 if (!icsk->icsk_probes_out) in tcp_send_probe0()
3560 icsk->icsk_probes_out = 1; in tcp_send_probe0()