Lines Matching refs:r
145 fn collapse_cwnd(r: &mut Recovery) { in collapse_cwnd()
146 let cubic = &mut r.cubic_state; in collapse_cwnd()
148 r.congestion_recovery_start_time = None; in collapse_cwnd()
150 cubic.w_max = r.congestion_window as f64; in collapse_cwnd()
153 r.ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize; in collapse_cwnd()
154 r.ssthresh = cmp::max( in collapse_cwnd()
155 r.ssthresh, in collapse_cwnd()
156 r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS, in collapse_cwnd()
161 reno::collapse_cwnd(r); in collapse_cwnd()
164 fn on_packet_sent(r: &mut Recovery, sent_bytes: usize, now: Instant) { in on_packet_sent()
167 let cubic = &mut r.cubic_state; in on_packet_sent()
170 if r.bytes_in_flight == 0 { in on_packet_sent()
175 if let Some(recovery_start_time) = r.congestion_recovery_start_time { in on_packet_sent()
177 r.congestion_recovery_start_time = in on_packet_sent()
186 reno::on_packet_sent(r, sent_bytes, now); in on_packet_sent()
190 r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant, in on_packet_acked()
192 let in_congestion_recovery = r.in_congestion_recovery(packet.time_sent); in on_packet_acked()
194 r.bytes_in_flight = r.bytes_in_flight.saturating_sub(packet.size); in on_packet_acked()
200 if r.app_limited { in on_packet_acked()
210 if r.congestion_recovery_start_time.is_some() { in on_packet_acked()
211 let new_lost = r.lost_count - r.cubic_state.prior.lost_count; in on_packet_acked()
213 if r.congestion_window < r.cubic_state.prior.congestion_window && in on_packet_acked()
216 rollback(r); in on_packet_acked()
221 if r.congestion_window < r.ssthresh { in on_packet_acked()
224 r.bytes_acked_sl += packet.size; in on_packet_acked()
226 if r.bytes_acked_sl >= r.max_datagram_size { in on_packet_acked()
227 r.congestion_window += r.max_datagram_size; in on_packet_acked()
228 r.bytes_acked_sl -= r.max_datagram_size; in on_packet_acked()
231 if r.hystart.enabled() && in on_packet_acked()
233 r.hystart.try_enter_lss( in on_packet_acked()
235 r.latest_rtt, in on_packet_acked()
236 r.congestion_window, in on_packet_acked()
238 r.max_datagram_size, in on_packet_acked()
241 r.ssthresh = r.congestion_window; in on_packet_acked()
248 if r.hystart.in_lss(epoch) { in on_packet_acked()
249 ca_start_time = r.hystart.lss_start_time().unwrap(); in on_packet_acked()
252 if r.cubic_state.w_max == 0.0 { in on_packet_acked()
253 r.cubic_state.w_max = r.congestion_window as f64; in on_packet_acked()
254 r.cubic_state.k = 0.0; in on_packet_acked()
256 r.cubic_state.w_est = r.congestion_window as f64; in on_packet_acked()
257 r.cubic_state.alpha_aimd = ALPHA_AIMD; in on_packet_acked()
260 match r.congestion_recovery_start_time { in on_packet_acked()
266 r.congestion_recovery_start_time = Some(now); in on_packet_acked()
268 r.cubic_state.w_max = r.congestion_window as f64; in on_packet_acked()
269 r.cubic_state.k = 0.0; in on_packet_acked()
271 r.cubic_state.w_est = r.congestion_window as f64; in on_packet_acked()
272 r.cubic_state.alpha_aimd = ALPHA_AIMD; in on_packet_acked()
280 let target = r.cubic_state.w_cubic(t + r.min_rtt, r.max_datagram_size); in on_packet_acked()
283 let target = f64::max(target, r.congestion_window as f64); in on_packet_acked()
284 let target = f64::min(target, r.congestion_window as f64 * 1.5); in on_packet_acked()
287 let w_est_inc = r.cubic_state.w_est_inc( in on_packet_acked()
289 r.congestion_window, in on_packet_acked()
290 r.max_datagram_size, in on_packet_acked()
292 r.cubic_state.w_est += w_est_inc; in on_packet_acked()
294 if r.cubic_state.w_est >= r.cubic_state.w_max { in on_packet_acked()
295 r.cubic_state.alpha_aimd = 1.0; in on_packet_acked()
298 let mut cubic_cwnd = r.congestion_window; in on_packet_acked()
300 if r.cubic_state.w_cubic(t, r.max_datagram_size) < r.cubic_state.w_est { in on_packet_acked()
302 cubic_cwnd = cmp::max(cubic_cwnd, r.cubic_state.w_est as usize); in on_packet_acked()
306 r.max_datagram_size * (target as usize - cubic_cwnd) / cubic_cwnd; in on_packet_acked()
313 if r.hystart.in_lss(epoch) { in on_packet_acked()
314 let lss_cwnd_inc = r.hystart.lss_cwnd_inc( in on_packet_acked()
316 r.congestion_window, in on_packet_acked()
317 r.ssthresh, in on_packet_acked()
320 cubic_cwnd = cmp::max(cubic_cwnd, r.congestion_window + lss_cwnd_inc); in on_packet_acked()
324 r.cubic_state.cwnd_inc += cubic_cwnd - r.congestion_window; in on_packet_acked()
326 if r.cubic_state.cwnd_inc >= r.max_datagram_size { in on_packet_acked()
327 r.congestion_window += r.max_datagram_size; in on_packet_acked()
328 r.cubic_state.cwnd_inc -= r.max_datagram_size; in on_packet_acked()
334 r: &mut Recovery, time_sent: Instant, epoch: packet::Epoch, now: Instant, in congestion_event()
336 let in_congestion_recovery = r.in_congestion_recovery(time_sent); in congestion_event()
341 r.congestion_recovery_start_time = Some(now); in congestion_event()
344 if (r.congestion_window as f64) < r.cubic_state.w_max { in congestion_event()
345 r.cubic_state.w_max = in congestion_event()
346 r.congestion_window as f64 * (1.0 + BETA_CUBIC) / 2.0; in congestion_event()
348 r.cubic_state.w_max = r.congestion_window as f64; in congestion_event()
351 r.ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize; in congestion_event()
352 r.ssthresh = cmp::max( in congestion_event()
353 r.ssthresh, in congestion_event()
354 r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS, in congestion_event()
356 r.congestion_window = r.ssthresh; in congestion_event()
358 r.cubic_state.k = if r.cubic_state.w_max < r.congestion_window as f64 { in congestion_event()
361 r.cubic_state in congestion_event()
362 .cubic_k(r.congestion_window, r.max_datagram_size) in congestion_event()
365 r.cubic_state.cwnd_inc = in congestion_event()
366 (r.cubic_state.cwnd_inc as f64 * BETA_CUBIC) as usize; in congestion_event()
368 r.cubic_state.w_est = r.congestion_window as f64; in congestion_event()
369 r.cubic_state.alpha_aimd = ALPHA_AIMD; in congestion_event()
371 if r.hystart.in_lss(epoch) { in congestion_event()
372 r.hystart.congestion_event(); in congestion_event()
377 fn checkpoint(r: &mut Recovery) { in checkpoint()
378 r.cubic_state.prior.congestion_window = r.congestion_window; in checkpoint()
379 r.cubic_state.prior.ssthresh = r.ssthresh; in checkpoint()
380 r.cubic_state.prior.w_max = r.cubic_state.w_max; in checkpoint()
381 r.cubic_state.prior.k = r.cubic_state.k; in checkpoint()
382 r.cubic_state.prior.epoch_start = r.congestion_recovery_start_time; in checkpoint()
383 r.cubic_state.prior.lost_count = r.lost_count; in checkpoint()
386 fn rollback(r: &mut Recovery) { in rollback()
387 r.congestion_window = r.cubic_state.prior.congestion_window; in rollback()
388 r.ssthresh = r.cubic_state.prior.ssthresh; in rollback()
389 r.cubic_state.w_max = r.cubic_state.prior.w_max; in rollback()
390 r.cubic_state.k = r.cubic_state.prior.k; in rollback()
391 r.congestion_recovery_start_time = r.cubic_state.prior.epoch_start; in rollback()
408 let r = Recovery::new(&cfg); in cubic_init() localVariable
410 assert!(r.cwnd() > 0); in cubic_init()
411 assert_eq!(r.bytes_in_flight, 0); in cubic_init()
419 let mut r = Recovery::new(&cfg); in cubic_send() localVariable
421 r.on_packet_sent_cc(1000, Instant::now()); in cubic_send()
423 assert_eq!(r.bytes_in_flight, 1000); in cubic_send()
431 let mut r = Recovery::new(&cfg); in cubic_slow_start() localVariable
440 size: r.max_datagram_size, in cubic_slow_start()
452 r.on_packet_sent_cc(p.size, now); in cubic_slow_start()
455 let cwnd_prev = r.cwnd(); in cubic_slow_start()
463 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now); in cubic_slow_start()
466 assert_eq!(r.cwnd(), cwnd_prev + p.size); in cubic_slow_start()
474 let mut r = Recovery::new(&cfg); in cubic_slow_start_multi_acks() localVariable
483 size: r.max_datagram_size, in cubic_slow_start_multi_acks()
495 r.on_packet_sent_cc(p.size, now); in cubic_slow_start_multi_acks()
498 let cwnd_prev = r.cwnd(); in cubic_slow_start_multi_acks()
518 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now); in cubic_slow_start_multi_acks()
521 assert_eq!(r.cwnd(), cwnd_prev + p.size * 3); in cubic_slow_start_multi_acks()
529 let mut r = Recovery::new(&cfg); in cubic_congestion_event() localVariable
531 let prev_cwnd = r.cwnd(); in cubic_congestion_event()
533 r.congestion_event(now, packet::EPOCH_APPLICATION, now); in cubic_congestion_event()
537 assert_eq!(prev_cwnd as f64 * BETA_CUBIC, r.cwnd() as f64); in cubic_congestion_event()
545 let mut r = Recovery::new(&cfg); in cubic_congestion_avoidance() localVariable
547 let prev_cwnd = r.cwnd(); in cubic_congestion_avoidance()
551 r.on_packet_sent_cc(r.max_datagram_size, now); in cubic_congestion_avoidance()
555 r.congestion_event(now, packet::EPOCH_APPLICATION, now); in cubic_congestion_avoidance()
559 assert_eq!(r.cwnd(), cur_cwnd); in cubic_congestion_avoidance()
564 r.update_rtt(rtt, Duration::from_millis(0), now); in cubic_congestion_avoidance()
570 r.lost_count += RESTORE_COUNT_THRESHOLD; in cubic_congestion_avoidance()
578 size: r.max_datagram_size, in cubic_congestion_avoidance()
581 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now); in cubic_congestion_avoidance()
585 assert_eq!(r.cwnd(), cur_cwnd + r.max_datagram_size); in cubic_congestion_avoidance()
593 let mut r = Recovery::new(&cfg); in cubic_collapse_cwnd_and_restart() localVariable
597 r.on_packet_sent_cc(30000, now); in cubic_collapse_cwnd_and_restart()
600 r.congestion_event(now, packet::EPOCH_APPLICATION, now); in cubic_collapse_cwnd_and_restart()
603 r.collapse_cwnd(); in cubic_collapse_cwnd_and_restart()
605 r.cwnd(), in cubic_collapse_cwnd_and_restart()
606 r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS in cubic_collapse_cwnd_and_restart()
613 size: r.max_datagram_size, in cubic_collapse_cwnd_and_restart()
616 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now); in cubic_collapse_cwnd_and_restart()
620 r.cwnd(), in cubic_collapse_cwnd_and_restart()
621 r.max_datagram_size * (recovery::MINIMUM_WINDOW_PACKETS + 1) in cubic_collapse_cwnd_and_restart()
631 let mut r = Recovery::new(&cfg); in cubic_hystart_limited_slow_start() localVariable
642 size: r.max_datagram_size, in cubic_hystart_limited_slow_start()
655 r.hystart.start_round(pkt_num); in cubic_hystart_limited_slow_start()
661 r.on_packet_sent_cc(p.size, now); in cubic_hystart_limited_slow_start()
667 r.update_rtt( in cubic_hystart_limited_slow_start()
679 r.on_packets_acked(acked, epoch, now); in cubic_hystart_limited_slow_start()
683 assert_eq!(r.hystart.lss_start_time().is_some(), false); in cubic_hystart_limited_slow_start()
686 r.hystart.start_round(pkts_1st_round * 2); in cubic_hystart_limited_slow_start()
693 r.on_packet_sent_cc(p.size, now); in cubic_hystart_limited_slow_start()
698 let mut cwnd_prev = r.cwnd(); in cubic_hystart_limited_slow_start()
701 cwnd_prev = r.cwnd(); in cubic_hystart_limited_slow_start()
702 r.update_rtt( in cubic_hystart_limited_slow_start()
714 r.on_packets_acked(acked, epoch, now); in cubic_hystart_limited_slow_start()
721 assert_eq!(r.hystart.lss_start_time().is_some(), true); in cubic_hystart_limited_slow_start()
722 assert_eq!(r.cwnd(), cwnd_prev + r.max_datagram_size); in cubic_hystart_limited_slow_start()
725 r.on_packet_sent_cc(r.cwnd(), now); in cubic_hystart_limited_slow_start()
728 cwnd_prev = r.cwnd(); in cubic_hystart_limited_slow_start()
735 r.on_packets_acked(acked, epoch, now); in cubic_hystart_limited_slow_start()
739 assert_eq!(r.cwnd(), cwnd_prev + r.max_datagram_size); in cubic_hystart_limited_slow_start()
747 let mut r = Recovery::new(&cfg); in cubic_spurious_congestion_event() localVariable
749 let prev_cwnd = r.cwnd(); in cubic_spurious_congestion_event()
753 r.on_packet_sent_cc(r.max_datagram_size, now); in cubic_spurious_congestion_event()
757 r.congestion_event(now, packet::EPOCH_APPLICATION, now); in cubic_spurious_congestion_event()
761 assert_eq!(r.cwnd(), cur_cwnd); in cubic_spurious_congestion_event()
769 size: r.max_datagram_size, in cubic_spurious_congestion_event()
773 r.update_rtt(rtt, Duration::from_millis(0), now); in cubic_spurious_congestion_event()
776 r.on_packets_acked( in cubic_spurious_congestion_event()
783 assert_eq!(r.cwnd(), prev_cwnd); in cubic_spurious_congestion_event()
791 let mut r = Recovery::new(&cfg); in cubic_fast_convergence() localVariable
793 let prev_cwnd = r.cwnd(); in cubic_fast_convergence()
797 r.on_packet_sent_cc(r.max_datagram_size, now); in cubic_fast_convergence()
801 r.congestion_event(now, packet::EPOCH_APPLICATION, now); in cubic_fast_convergence()
805 assert_eq!(r.cwnd(), cur_cwnd); in cubic_fast_convergence()
809 r.update_rtt(rtt, Duration::from_millis(0), now); in cubic_fast_convergence()
815 r.lost_count += RESTORE_COUNT_THRESHOLD; in cubic_fast_convergence()
823 size: r.max_datagram_size, in cubic_fast_convergence()
826 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now); in cubic_fast_convergence()
830 assert_eq!(r.cwnd(), cur_cwnd + r.max_datagram_size); in cubic_fast_convergence()
832 let prev_cwnd = r.cwnd(); in cubic_fast_convergence()
837 r.congestion_event(now, packet::EPOCH_APPLICATION, now); in cubic_fast_convergence()
841 assert_eq!(r.cwnd(), cur_cwnd); in cubic_fast_convergence()
845 r.cubic_state.w_max, in cubic_fast_convergence()