/net/sctp/ |
D | transport.c | 401 __u32 cwnd, ssthresh, flight_size, pba, pmtu; in sctp_transport_raise_cwnd() local 403 cwnd = transport->cwnd; in sctp_transport_raise_cwnd() 415 if (cwnd <= ssthresh) { in sctp_transport_raise_cwnd() 438 if (flight_size < cwnd) in sctp_transport_raise_cwnd() 442 cwnd += pmtu; in sctp_transport_raise_cwnd() 444 cwnd += bytes_acked; in sctp_transport_raise_cwnd() 448 __func__, transport, bytes_acked, cwnd, ssthresh, in sctp_transport_raise_cwnd() 474 if (pba > cwnd && flight_size < cwnd) in sctp_transport_raise_cwnd() 475 pba = cwnd; in sctp_transport_raise_cwnd() 476 if (pba >= cwnd && flight_size >= cwnd) { in sctp_transport_raise_cwnd() [all …]
|
D | output.c | 694 flight_size >= transport->cwnd) in sctp_packet_can_append_data() 824 psize + chunk_len > (packet->transport->cwnd >> 1)) in sctp_packet_will_fit()
|
D | outqueue.c | 530 transport->cwnd, transport->ssthresh, transport->flight_size, in sctp_retransmit_mark() 1741 transport->cwnd, transport->ssthresh, in sctp_mark_missing()
|
D | associola.c | 687 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); in sctp_assoc_add_peer()
|
D | socket.c | 645 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, in sctp_send_asconf_add_ip() 5124 info->sctpi_p_cwnd = prim->cwnd; in sctp_get_sctp_info() 5334 status.sstat_primary.spinfo_cwnd = transport->cwnd; in sctp_getsockopt_sctp_status() 5402 pinfo.spinfo_cwnd = transport->cwnd; in sctp_getsockopt_peer_addr_info()
|
/net/ipv4/ |
D | tcp_bic.c | 83 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) in bictcp_update() argument 85 if (ca->last_cwnd == cwnd && in bictcp_update() 89 ca->last_cwnd = cwnd; in bictcp_update() 96 if (cwnd <= low_window) { in bictcp_update() 97 ca->cnt = cwnd; in bictcp_update() 102 if (cwnd < ca->last_max_cwnd) { in bictcp_update() 103 __u32 dist = (ca->last_max_cwnd - cwnd) in bictcp_update() 108 ca->cnt = cwnd / max_increment; in bictcp_update() 111 ca->cnt = (cwnd * smooth_part) / BICTCP_B; in bictcp_update() 114 ca->cnt = cwnd / dist; in bictcp_update() [all …]
|
D | tcp_cubic.c | 221 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) in bictcp_update() argument 228 if (ca->last_cwnd == cwnd && in bictcp_update() 239 ca->last_cwnd = cwnd; in bictcp_update() 245 ca->tcp_cwnd = cwnd; /* syn with cubic */ in bictcp_update() 247 if (ca->last_max_cwnd <= cwnd) { in bictcp_update() 249 ca->bic_origin_point = cwnd; in bictcp_update() 255 * (ca->last_max_cwnd - cwnd)); in bictcp_update() 293 if (bic_target > cwnd) { in bictcp_update() 294 ca->cnt = cwnd / (bic_target - cwnd); in bictcp_update() 296 ca->cnt = 100 * cwnd; /* very small increment*/ in bictcp_update() [all …]
|
D | tcp_bbr.c | 392 static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd) in bbr_quantization_budget() argument 397 cwnd += 3 * bbr_tso_segs_goal(sk); in bbr_quantization_budget() 400 cwnd = (cwnd + 1) & ~1U; in bbr_quantization_budget() 404 cwnd += 2; in bbr_quantization_budget() 406 return cwnd; in bbr_quantization_budget() 483 u32 cwnd = tp->snd_cwnd; in bbr_set_cwnd_to_recover_or_restore() local 490 cwnd = max_t(s32, cwnd - rs->losses, 1); in bbr_set_cwnd_to_recover_or_restore() 497 cwnd = tcp_packets_in_flight(tp) + acked; in bbr_set_cwnd_to_recover_or_restore() 500 cwnd = max(cwnd, bbr->prior_cwnd); in bbr_set_cwnd_to_recover_or_restore() 506 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); in bbr_set_cwnd_to_recover_or_restore() [all …]
|
D | tcp_highspeed.c | 17 unsigned int cwnd; member 130 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid() 131 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid() 134 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid() 135 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid()
|
D | tcp_cong.c | 398 u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); in tcp_slow_start() local 400 acked -= cwnd - tp->snd_cwnd; in tcp_slow_start() 401 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); in tcp_slow_start()
|
D | tcp_output.c | 146 u32 cwnd = tp->snd_cwnd; in tcp_cwnd_restart() local 151 restart_cwnd = min(restart_cwnd, cwnd); in tcp_cwnd_restart() 153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart() 154 cwnd >>= 1; in tcp_cwnd_restart() 155 tp->snd_cwnd = max(cwnd, restart_cwnd); in tcp_cwnd_restart() 2035 u32 in_flight, cwnd, halfcwnd; in tcp_cwnd_test() local 2043 cwnd = tp->snd_cwnd; in tcp_cwnd_test() 2044 if (in_flight >= cwnd) in tcp_cwnd_test() 2050 halfcwnd = max(cwnd >> 1, 1U); in tcp_cwnd_test() 2051 return min(halfcwnd, cwnd - in_flight); in tcp_cwnd_test()
|
D | Kconfig | 565 coalescence. In addition, it will decrease its cwnd multiplicatively 568 Note that in general congestion avoidance (cwnd decreased when # packets 569 queued grows) cannot coexist with congestion control (cwnd decreased only
|
D | tcp_input.c | 967 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); in tcp_init_cwnd() local 969 if (!cwnd) in tcp_init_cwnd() 970 cwnd = TCP_INIT_CWND; in tcp_init_cwnd() 971 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
|
/net/batman-adv/ |
D | tp_meter.c | 147 if (tp_vars->cwnd <= tp_vars->ss_threshold) { in batadv_tp_update_cwnd() 149 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 156 ((mss * mss) << 6) / (tp_vars->cwnd << 3)); in batadv_tp_update_cwnd() 162 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 416 tp_vars->cwnd, tp_vars->ss_threshold); in batadv_tp_sender_end() 507 tp_vars->ss_threshold = tp_vars->cwnd >> 1; in batadv_tp_sender_timeout() 513 tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold, in batadv_tp_sender_timeout() 516 tp_vars->cwnd = BATADV_TP_PLEN * 3; in batadv_tp_sender_timeout() 634 u32 rtt, recv_ack, cwnd; in batadv_tp_recv_ack() local 698 tp_vars->ss_threshold = tp_vars->cwnd >> 1; in batadv_tp_recv_ack() [all …]
|
D | types.h | 1434 u32 cwnd; member
|
/net/rxrpc/ |
D | input.c | 45 unsigned int cwnd = call->cong_cwnd; in rxrpc_congestion_management() local 55 cwnd = 1; in rxrpc_congestion_management() 56 if (cwnd >= call->cong_ssthresh && in rxrpc_congestion_management() 70 summary->cwnd = call->cong_cwnd; in rxrpc_congestion_management() 80 cwnd += 1; in rxrpc_congestion_management() 81 if (cwnd >= call->cong_ssthresh) { in rxrpc_congestion_management() 102 if (cumulative_acks >= cwnd) in rxrpc_congestion_management() 103 cwnd++; in rxrpc_congestion_management() 126 cwnd = call->cong_ssthresh + 3; in rxrpc_congestion_management() 135 cwnd += 1; in rxrpc_congestion_management() [all …]
|
D | ar-internal.h | 694 u8 cwnd; member
|
/net/sunrpc/ |
D | xprt.c | 536 unsigned long cwnd = xprt->cwnd; in xprt_adjust_cwnd() local 538 if (result >= 0 && cwnd <= xprt->cong) { in xprt_adjust_cwnd() 541 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; in xprt_adjust_cwnd() 542 if (cwnd > RPC_MAXCWND(xprt)) in xprt_adjust_cwnd() 543 cwnd = RPC_MAXCWND(xprt); in xprt_adjust_cwnd() 546 cwnd >>= 1; in xprt_adjust_cwnd() 547 if (cwnd < RPC_CWNDSCALE) in xprt_adjust_cwnd() 548 cwnd = RPC_CWNDSCALE; in xprt_adjust_cwnd() 551 xprt->cong, xprt->cwnd, cwnd); in xprt_adjust_cwnd() 552 xprt->cwnd = cwnd; in xprt_adjust_cwnd() [all …]
|
/net/dccp/ccids/ |
D | ccid2.c | 217 u32 cwnd = hc->tx_cwnd, restart_cwnd, in ccid2_cwnd_restart() local 221 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); in ccid2_cwnd_restart() 224 restart_cwnd = min(cwnd, iwnd); in ccid2_cwnd_restart() 226 while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd) in ccid2_cwnd_restart() 227 cwnd >>= 1; in ccid2_cwnd_restart() 228 hc->tx_cwnd = max(cwnd, restart_cwnd); in ccid2_cwnd_restart()
|
/net/sunrpc/xprtrdma/ |
D | svc_rdma_backchannel.c | 50 xprt->cwnd = credits << RPC_CWNDSHIFT; in svc_rdma_handle_bc_reply() 197 xprt->cwnd = RPC_CWNDSHIFT; in xprt_rdma_bc_close()
|
D | rpc_rdma.c | 1008 xprt->cwnd = grant << RPC_CWNDSHIFT; in __rpcrdma_update_cwnd_locked()
|