/net/dccp/ |
D | timer.c | 37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() local 41 if (icsk->icsk_retransmits != 0) in dccp_write_timeout() 43 retry_until = icsk->icsk_syn_retries ? in dccp_write_timeout() 46 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { in dccp_write_timeout() 76 if (icsk->icsk_retransmits >= retry_until) { in dccp_write_timeout() 89 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer() local 102 if (icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 110 if (--icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 111 icsk->icsk_retransmits = 1; in dccp_retransmit_timer() 113 min(icsk->icsk_rto, in dccp_retransmit_timer() [all …]
|
D | output.c | 49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() local 76 if (icsk->icsk_retransmits == 0) in dccp_transmit_skb() 134 icsk->icsk_af_ops->send_check(sk, skb); in dccp_transmit_skb() 141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in dccp_transmit_skb() 164 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() local 170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + in dccp_sync_mss() 190 icsk->icsk_pmtu_cookie = pmtu; in dccp_sync_mss() 540 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() local 567 icsk->icsk_retransmits = 0; in dccp_connect() 569 icsk->icsk_rto, DCCP_RTO_MAX); in dccp_connect() [all …]
|
D | diag.c | 22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info() local 27 info->tcpi_retransmits = icsk->icsk_retransmits; in dccp_get_info() 28 info->tcpi_probes = icsk->icsk_probes_out; in dccp_get_info() 29 info->tcpi_backoff = icsk->icsk_backoff; in dccp_get_info() 30 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; in dccp_get_info()
|
D | input.c | 405 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process() local 447 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); in dccp_rcv_request_sent_state_process() 476 icsk->icsk_af_ops->rebuild_header(sk); in dccp_rcv_request_sent_state_process() 483 if (sk->sk_write_pending || icsk->icsk_ack.pingpong || in dccp_rcv_request_sent_state_process() 484 icsk->icsk_accept_queue.rskq_defer_accept) { in dccp_rcv_request_sent_state_process()
|
D | minisocks.c | 42 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() local 43 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in dccp_time_wait()
|
D | proto.c | 186 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock() local 188 icsk->icsk_rto = DCCP_TIMEOUT_INIT; in dccp_init_sock() 189 icsk->icsk_syn_retries = sysctl_dccp_request_retries; in dccp_init_sock() 193 icsk->icsk_sync_mss = dccp_sync_mss; in dccp_init_sock() 260 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect() local 300 icsk->icsk_backoff = 0; in dccp_disconnect() 304 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); in dccp_disconnect()
|
D | ipv6.c | 779 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() local 847 u32 exthdrlen = icsk->icsk_ext_hdr_len; in dccp_v6_connect() 859 icsk->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_connect() 864 icsk->icsk_ext_hdr_len = exthdrlen; in dccp_v6_connect() 865 icsk->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_connect() 904 icsk->icsk_ext_hdr_len = 0; in dccp_v6_connect() 906 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; in dccp_v6_connect()
|
/net/ipv4/ |
D | tcp_timer.c | 117 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument 123 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing() 124 icsk->icsk_mtup.enabled = 1; in tcp_mtu_probing() 125 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; in tcp_mtu_probing() 126 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing() 132 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing() 136 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing() 137 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing() 177 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() local 183 if (icsk->icsk_retransmits) { in tcp_write_timeout() [all …]
|
D | tcp_cong.c | 155 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control() local 161 icsk->icsk_ca_ops = ca; in tcp_assign_congestion_control() 175 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_assign_congestion_control() 184 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() local 187 if (icsk->icsk_ca_ops->init) in tcp_init_congestion_control() 188 icsk->icsk_ca_ops->init(sk); in tcp_init_congestion_control() 198 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control() local 201 icsk->icsk_ca_ops = ca; in tcp_reinit_congestion_control() 202 icsk->icsk_ca_setsockopt = 1; in tcp_reinit_congestion_control() 211 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control() local [all …]
|
D | inet_connection_sock.c | 267 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() local 289 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 294 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 315 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() local 316 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() 384 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() local 386 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, in inet_csk_init_xmit_timers() 388 setup_timer(&icsk->icsk_delack_timer, delack_handler, in inet_csk_init_xmit_timers() 391 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_init_xmit_timers() 397 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers() local [all …]
|
D | tcp_output.c | 74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() local 82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_event_new_data_sent() 83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_event_new_data_sent() 164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() local 175 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent() 176 icsk->icsk_ack.pingpong = 1; in tcp_event_data_sent() 913 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb() local 1011 icsk->icsk_af_ops->send_check(sk, skb); in __tcp_transmit_skb() 1035 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in __tcp_transmit_skb() 1321 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss() local [all …]
|
D | tcp_input.c | 136 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local 137 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss() 140 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss() 146 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss() 147 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss() 168 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss() 170 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss() 174 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss() 175 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss() 176 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss() [all …]
|
D | tcp.c | 382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock() local 390 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_init_sock() 420 icsk->icsk_sync_mss = tcp_sync_mss; in tcp_init_sock() 1406 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf() local 1409 if (icsk->icsk_ack.blocked || in tcp_cleanup_rbuf() 1411 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in tcp_cleanup_rbuf() 1419 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || in tcp_cleanup_rbuf() 1420 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && in tcp_cleanup_rbuf() 1421 !icsk->icsk_ack.pingpong)) && in tcp_cleanup_rbuf() 2216 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect() local [all …]
|
D | tcp_ipv4.c | 362 struct inet_connection_sock *icsk; in tcp_v4_err() local 413 icsk = inet_csk(sk); in tcp_v4_err() 462 if (seq != tp->snd_una || !icsk->icsk_retransmits || in tcp_v4_err() 463 !icsk->icsk_backoff || fastopen) in tcp_v4_err() 473 icsk->icsk_backoff--; in tcp_v4_err() 474 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : in tcp_v4_err() 476 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); in tcp_v4_err() 478 remaining = icsk->icsk_rto - in tcp_v4_err() 479 min(icsk->icsk_rto, in tcp_v4_err() 1812 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock() local [all …]
|
D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() local 92 if (icsk->icsk_ca_state == TCP_CA_Open) { in measure_rtt() 104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput() local 109 if (icsk->icsk_ca_state == TCP_CA_Open) in measure_achieved_throughput() 119 if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { in measure_achieved_throughput()
|
D | inet_diag.c | 110 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, in inet_sk_diag_fill() argument 187 if (!icsk) { in inet_sk_diag_fill() 194 if (icsk->icsk_pending == ICSK_TIME_RETRANS || in inet_sk_diag_fill() 195 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in inet_sk_diag_fill() 196 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in inet_sk_diag_fill() 198 r->idiag_retrans = icsk->icsk_retransmits; in inet_sk_diag_fill() 199 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); in inet_sk_diag_fill() 200 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { in inet_sk_diag_fill() 202 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 203 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); in inet_sk_diag_fill() [all …]
|
D | tcp_minisocks.c | 270 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() local 282 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in tcp_time_wait() 412 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() local 422 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_openreq_child() 423 icsk->icsk_ca_ops = ca; in tcp_ca_openreq_child() 431 (!icsk->icsk_ca_setsockopt || in tcp_ca_openreq_child() 432 !try_module_get(icsk->icsk_ca_ops->owner))) in tcp_ca_openreq_child()
|
D | inet_timewait_sock.c | 105 const struct inet_connection_sock *icsk = inet_csk(sk); in __inet_twsk_hashdance() local 116 tw->tw_tb = icsk->icsk_bind_hash; in __inet_twsk_hashdance() 117 WARN_ON(!icsk->icsk_bind_hash); in __inet_twsk_hashdance()
|
D | tcp_yeah.c | 61 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_yeah_pkts_acked() local 64 if (icsk->icsk_ca_state == TCP_CA_Open) in tcp_yeah_pkts_acked()
|
D | tcp_bic.c | 202 const struct inet_connection_sock *icsk = inet_csk(sk); in bictcp_acked() local 204 if (icsk->icsk_ca_state == TCP_CA_Open) { in bictcp_acked()
|
D | syncookies.c | 222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_cookie_sock() local 226 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, in tcp_get_cookie_sock()
|
D | ip_sockglue.c | 644 struct inet_connection_sock *icsk = inet_csk(sk); in do_ip_setsockopt() local 652 icsk->icsk_ext_hdr_len -= old->opt.optlen; in do_ip_setsockopt() 654 icsk->icsk_ext_hdr_len += opt->opt.optlen; in do_ip_setsockopt() 655 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); in do_ip_setsockopt()
|
D | tcp_metrics.c | 369 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_metrics() local 384 if (icsk->icsk_backoff || !tp->srtt_us) { in tcp_update_metrics() 449 icsk->icsk_ca_state == TCP_CA_Open) { in tcp_update_metrics()
|
/net/ipv6/ |
D | tcp_ipv6.c | 118 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect() local 198 u32 exthdrlen = icsk->icsk_ext_hdr_len; in tcp_v6_connect() 210 icsk->icsk_af_ops = &ipv6_mapped; in tcp_v6_connect() 219 icsk->icsk_ext_hdr_len = exthdrlen; in tcp_v6_connect() 220 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_connect() 272 icsk->icsk_ext_hdr_len = 0; in tcp_v6_connect() 274 icsk->icsk_ext_hdr_len = opt->opt_flen + in tcp_v6_connect() 1691 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock() local 1695 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_init_sock() 1752 const struct inet_connection_sock *icsk = inet_csk(sp); in get_tcp6_sock() local [all …]
|
D | ipv6_sockglue.c | 109 struct inet_connection_sock *icsk = inet_csk(sk); in ipv6_update_options() local 110 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; in ipv6_update_options() 111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); in ipv6_update_options() 220 struct inet_connection_sock *icsk = inet_csk(sk); in do_ipv6_setsockopt() local 226 icsk->icsk_af_ops = &ipv4_specific; in do_ipv6_setsockopt() 229 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in do_ipv6_setsockopt()
|