/net/dccp/ |
D | timer.c | 33 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() local 37 if (icsk->icsk_retransmits != 0) in dccp_write_timeout() 39 retry_until = icsk->icsk_syn_retries ? in dccp_write_timeout() 42 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { in dccp_write_timeout() 72 if (icsk->icsk_retransmits >= retry_until) { in dccp_write_timeout() 85 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer() local 98 if (icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 106 if (--icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 107 icsk->icsk_retransmits = 1; in dccp_retransmit_timer() 109 min(icsk->icsk_rto, in dccp_retransmit_timer() [all …]
|
D | output.c | 46 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() local 73 if (icsk->icsk_retransmits == 0) in dccp_transmit_skb() 131 icsk->icsk_af_ops->send_check(sk, skb); in dccp_transmit_skb() 138 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in dccp_transmit_skb() 163 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() local 169 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + in dccp_sync_mss() 189 icsk->icsk_pmtu_cookie = pmtu; in dccp_sync_mss() 546 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() local 573 icsk->icsk_retransmits = 0; in dccp_connect() 575 icsk->icsk_rto, DCCP_RTO_MAX); in dccp_connect() [all …]
|
D | diag.c | 19 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info() local 24 info->tcpi_retransmits = icsk->icsk_retransmits; in dccp_get_info() 25 info->tcpi_probes = icsk->icsk_probes_out; in dccp_get_info() 26 info->tcpi_backoff = icsk->icsk_backoff; in dccp_get_info() 27 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; in dccp_get_info()
|
D | minisocks.c | 39 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() local 40 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in dccp_time_wait()
|
D | input.c | 401 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process() local 443 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); in dccp_rcv_request_sent_state_process() 472 icsk->icsk_af_ops->rebuild_header(sk); in dccp_rcv_request_sent_state_process() 480 icsk->icsk_accept_queue.rskq_defer_accept) { in dccp_rcv_request_sent_state_process()
|
D | proto.c | 192 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock() local 197 icsk->icsk_rto = DCCP_TIMEOUT_INIT; in dccp_init_sock() 198 icsk->icsk_syn_retries = sysctl_dccp_request_retries; in dccp_init_sock() 202 icsk->icsk_sync_mss = dccp_sync_mss; in dccp_init_sock() 258 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect() local 296 icsk->icsk_backoff = 0; in dccp_disconnect() 300 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); in dccp_disconnect()
|
D | ipv6.c | 824 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() local 892 u32 exthdrlen = icsk->icsk_ext_hdr_len; in dccp_v6_connect() 904 icsk->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_connect() 909 icsk->icsk_ext_hdr_len = exthdrlen; in dccp_v6_connect() 910 icsk->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_connect() 952 icsk->icsk_ext_hdr_len = 0; in dccp_v6_connect() 954 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; in dccp_v6_connect()
|
/net/ipv4/ |
D | tcp_timer.c | 29 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout() local 34 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_clamp_rto_to_user_timeout() 36 return icsk->icsk_rto; in tcp_clamp_rto_to_user_timeout() 42 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); in tcp_clamp_rto_to_user_timeout() 47 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout() local 51 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_clamp_probe0_to_user_timeout() 52 if (!user_timeout || !icsk->icsk_probes_tstamp) in tcp_clamp_probe0_to_user_timeout() 55 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; in tcp_clamp_probe0_to_user_timeout() 159 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument 168 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing() [all …]
|
D | tcp_cong.c | 39 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state() local 43 if (icsk->icsk_ca_ops->set_state) in tcp_set_ca_state() 44 icsk->icsk_ca_ops->set_state(sk, ca_state); in tcp_set_ca_state() 45 icsk->icsk_ca_state = ca_state; in tcp_set_ca_state() 222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control() local 229 icsk->icsk_ca_ops = ca; in tcp_assign_congestion_control() 232 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_assign_congestion_control() 241 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() local 244 if (icsk->icsk_ca_ops->init) in tcp_init_congestion_control() 245 icsk->icsk_ca_ops->init(sk); in tcp_init_congestion_control() [all …]
|
D | tcp_ulp.c | 106 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_ulp() local 108 if (icsk->icsk_ulp_ops->update) in tcp_update_ulp() 109 icsk->icsk_ulp_ops->update(sk, proto, write_space); in tcp_update_ulp() 114 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp() local 120 if (!icsk->icsk_ulp_ops) in tcp_cleanup_ulp() 123 if (icsk->icsk_ulp_ops->release) in tcp_cleanup_ulp() 124 icsk->icsk_ulp_ops->release(sk); in tcp_cleanup_ulp() 125 module_put(icsk->icsk_ulp_ops->owner); in tcp_cleanup_ulp() 127 icsk->icsk_ulp_ops = NULL; in tcp_cleanup_ulp() 132 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_set_ulp() local [all …]
|
D | tcp_output.c | 68 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() local 81 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent() 165 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() local 176 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent() 1254 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb() local 1392 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, in __tcp_transmit_skb() 1423 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, in __tcp_transmit_skb() 1706 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss() local 1712 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss() 1715 if (icsk->icsk_af_ops->net_frag_header_len) { in __tcp_mtu_to_mss() [all …]
|
D | inet_connection_sock.c | 615 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() local 637 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 642 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 663 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() local 664 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() 760 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() local 762 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); in inet_csk_init_xmit_timers() 763 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); in inet_csk_init_xmit_timers() 765 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_init_xmit_timers() 771 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers() local [all …]
|
D | tcp_input.c | 121 void clean_acked_data_enable(struct inet_connection_sock *icsk, in clean_acked_data_enable() argument 124 icsk->icsk_clean_acked = cad; in clean_acked_data_enable() 129 void clean_acked_data_disable(struct inet_connection_sock *icsk) in clean_acked_data_disable() argument 132 icsk->icsk_clean_acked = NULL; in clean_acked_data_disable() 230 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local 231 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss() 234 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss() 240 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss() 245 if (unlikely(len != icsk->icsk_ack.rcv_mss)) { in tcp_measure_rcv_mss() 256 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, in tcp_measure_rcv_mss() [all …]
|
D | tcp.c | 415 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock() local 424 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_init_sock() 425 icsk->icsk_rto_min = TCP_RTO_MIN; in tcp_init_sock() 426 icsk->icsk_delack_max = TCP_DELACK_MAX; in tcp_init_sock() 457 icsk->icsk_sync_mss = tcp_sync_mss; in tcp_init_sock() 1457 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_cleanup_rbuf() local 1460 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in __tcp_cleanup_rbuf() 1468 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || in __tcp_cleanup_rbuf() 1469 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && in __tcp_cleanup_rbuf() 3001 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect() local [all …]
|
D | tcp_minisocks.c | 282 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() local 291 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in tcp_time_wait() 433 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() local 443 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_openreq_child() 444 icsk->icsk_ca_ops = ca; in tcp_ca_openreq_child() 452 (!icsk->icsk_ca_setsockopt || in tcp_ca_openreq_child() 453 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) in tcp_ca_openreq_child()
|
D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() local 92 if (icsk->icsk_ca_state == TCP_CA_Open) { in measure_rtt() 104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput() local 109 if (icsk->icsk_ca_state == TCP_CA_Open) in measure_achieved_throughput() 119 if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { in measure_achieved_throughput()
|
D | inet_timewait_sock.c | 118 const struct inet_connection_sock *icsk = inet_csk(sk); in inet_twsk_hashdance() local 134 tw->tw_tb = icsk->icsk_bind_hash; in inet_twsk_hashdance() 135 WARN_ON(!icsk->icsk_bind_hash); in inet_twsk_hashdance() 138 tw->tw_tb2 = icsk->icsk_bind2_hash; in inet_twsk_hashdance() 139 WARN_ON(!icsk->icsk_bind2_hash); in inet_twsk_hashdance()
|
D | tcp_diag.c | 115 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux() local 135 ulp_ops = icsk->icsk_ulp_ops; in tcp_diag_get_aux() 146 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux_size() local 170 ulp_ops = icsk->icsk_ulp_ops; in tcp_diag_get_aux_size()
|
D | inet_diag.c | 234 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, in inet_sk_diag_fill() argument 296 if (!icsk) { in inet_sk_diag_fill() 301 if (icsk->icsk_pending == ICSK_TIME_RETRANS || in inet_sk_diag_fill() 302 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || in inet_sk_diag_fill() 303 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in inet_sk_diag_fill() 305 r->idiag_retrans = icsk->icsk_retransmits; in inet_sk_diag_fill() 307 jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies); in inet_sk_diag_fill() 308 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { in inet_sk_diag_fill() 310 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 312 jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies); in inet_sk_diag_fill() [all …]
|
D | tcp_ipv4.c | 433 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ld_RTO_revert() local 442 if (seq != tp->snd_una || !icsk->icsk_retransmits || in tcp_ld_RTO_revert() 443 !icsk->icsk_backoff) in tcp_ld_RTO_revert() 450 icsk->icsk_backoff--; in tcp_ld_RTO_revert() 451 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; in tcp_ld_RTO_revert() 452 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); in tcp_ld_RTO_revert() 456 remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us); in tcp_ld_RTO_revert() 2299 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock() local 2303 icsk->icsk_af_ops = &ipv4_specific; in tcp_v4_init_sock() 2674 const struct inet_connection_sock *icsk = inet_csk(sk); in get_tcp4_sock() local [all …]
|
D | tcp_nv.c | 241 const struct inet_connection_sock *icsk = inet_csk(sk); in tcpnv_acked() local 255 if (icsk->icsk_ca_state != TCP_CA_Open && in tcpnv_acked() 256 icsk->icsk_ca_state != TCP_CA_Disorder) in tcpnv_acked()
|
/net/mptcp/ |
D | token_test.c | 34 struct inet_connection_sock *icsk; in build_icsk() local 36 icsk = kunit_kzalloc(test, sizeof(struct inet_connection_sock), in build_icsk() 38 KUNIT_EXPECT_NOT_ERR_OR_NULL(test, icsk); in build_icsk() 39 return icsk; in build_icsk() 68 struct inet_connection_sock *icsk = build_icsk(test); in mptcp_token_test_msk_basic() local 74 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); in mptcp_token_test_msk_basic() 79 mptcp_token_new_connect((struct sock *)icsk)); in mptcp_token_test_msk_basic()
|
D | subflow.c | 736 struct inet_connection_sock *icsk = inet_csk(sk); in subflow_ulp_fallback() local 739 icsk->icsk_ulp_ops = NULL; in subflow_ulp_fallback() 740 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); in subflow_ulp_fallback() 1488 struct inet_connection_sock *icsk = inet_csk(sk); in mptcpv6_handle_mapped() local 1494 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); in mptcpv6_handle_mapped() 1496 if (likely(icsk->icsk_af_ops == target)) in mptcpv6_handle_mapped() 1499 subflow->icsk_af_ops = icsk->icsk_af_ops; in mptcpv6_handle_mapped() 1500 icsk->icsk_af_ops = target; in mptcpv6_handle_mapped() 1739 struct inet_connection_sock *icsk = inet_csk(sk); in subflow_create_ctx() local 1746 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); in subflow_create_ctx() [all …]
|
/net/ipv6/ |
D | tcp_ipv6.c | 146 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect() local 228 u32 exthdrlen = icsk->icsk_ext_hdr_len; in tcp_v6_connect() 239 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped); in tcp_v6_connect() 250 icsk->icsk_ext_hdr_len = exthdrlen; in tcp_v6_connect() 252 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific); in tcp_v6_connect() 309 icsk->icsk_ext_hdr_len = 0; in tcp_v6_connect() 311 icsk->icsk_ext_hdr_len = opt->opt_flen + in tcp_v6_connect() 1945 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock() local 1949 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_init_sock() 2000 const struct inet_connection_sock *icsk = inet_csk(sp); in get_tcp6_sock() local [all …]
|
/net/tls/ |
D | tls_toe.c | 48 struct inet_connection_sock *icsk = inet_csk(sk); in tls_toe_sk_destruct() local 53 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); in tls_toe_sk_destruct()
|