1 // Copyright (C) 2019, Cloudflare, Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // * Redistributions in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 //! Reno Congestion Control
28 //!
29 //! Note that Slow Start can use HyStart++ when enabled.
30
31 use std::cmp;
32 use std::time::Instant;
33
34 use crate::packet;
35 use crate::recovery;
36
37 use crate::recovery::Acked;
38 use crate::recovery::CongestionControlOps;
39 use crate::recovery::Recovery;
40
41 pub static RENO: CongestionControlOps = CongestionControlOps {
42 on_packet_sent,
43 on_packet_acked,
44 congestion_event,
45 collapse_cwnd,
46 };
47
on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant)48 pub fn on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant) {
49 r.bytes_in_flight += sent_bytes;
50 }
51
on_packet_acked( r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant, )52 fn on_packet_acked(
53 r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant,
54 ) {
55 r.bytes_in_flight = r.bytes_in_flight.saturating_sub(packet.size);
56
57 if r.in_congestion_recovery(packet.time_sent) {
58 return;
59 }
60
61 if r.app_limited {
62 return;
63 }
64
65 if r.congestion_window < r.ssthresh {
66 // Slow start.
67 if r.hystart.enabled() && epoch == packet::EPOCH_APPLICATION {
68 let (cwnd, ssthresh) = r.hystart_on_packet_acked(packet, now);
69
70 r.congestion_window = cwnd;
71 r.ssthresh = ssthresh;
72 } else {
73 r.congestion_window += packet.size;
74 }
75 } else {
76 // Congestion avoidance.
77 let mut reno_cwnd = r.congestion_window;
78
79 r.bytes_acked += packet.size;
80
81 if r.bytes_acked >= r.congestion_window {
82 r.bytes_acked -= r.congestion_window;
83 reno_cwnd += recovery::MAX_DATAGRAM_SIZE;
84 }
85
86 // When in Limited Slow Start, take the max of CA cwnd and
87 // LSS cwnd.
88 if r.hystart.enabled() &&
89 epoch == packet::EPOCH_APPLICATION &&
90 r.hystart.lss_start_time().is_some()
91 {
92 let (lss_cwnd, _) = r.hystart_on_packet_acked(packet, now);
93
94 reno_cwnd = cmp::max(reno_cwnd, lss_cwnd);
95 }
96
97 r.congestion_window = reno_cwnd;
98 }
99 }
100
congestion_event( r: &mut Recovery, time_sent: Instant, epoch: packet::Epoch, now: Instant, )101 fn congestion_event(
102 r: &mut Recovery, time_sent: Instant, epoch: packet::Epoch, now: Instant,
103 ) {
104 // Start a new congestion event if packet was sent after the
105 // start of the previous congestion recovery period.
106 if !r.in_congestion_recovery(time_sent) {
107 r.congestion_recovery_start_time = Some(now);
108
109 r.congestion_window = (r.congestion_window as f64 *
110 recovery::LOSS_REDUCTION_FACTOR)
111 as usize;
112
113 r.congestion_window =
114 cmp::max(r.congestion_window, recovery::MINIMUM_WINDOW);
115
116 r.bytes_acked = (r.congestion_window as f64 *
117 recovery::LOSS_REDUCTION_FACTOR) as usize;
118
119 r.ssthresh = r.congestion_window;
120
121 if r.hystart.enabled() && epoch == packet::EPOCH_APPLICATION {
122 r.hystart.congestion_event();
123 }
124 }
125 }
126
collapse_cwnd(r: &mut Recovery)127 pub fn collapse_cwnd(r: &mut Recovery) {
128 r.congestion_window = recovery::MINIMUM_WINDOW;
129 r.bytes_acked = 0;
130 }
131
132 #[cfg(test)]
133 mod tests {
134 use super::*;
135
136 use std::time::Duration;
137
138 #[test]
reno_init()139 fn reno_init() {
140 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
141 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
142
143 let r = Recovery::new(&cfg);
144
145 assert!(r.cwnd() > 0);
146 assert_eq!(r.bytes_in_flight, 0);
147 }
148
149 #[test]
reno_send()150 fn reno_send() {
151 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
152 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
153
154 let mut r = Recovery::new(&cfg);
155
156 let now = Instant::now();
157
158 r.on_packet_sent_cc(1000, now);
159
160 assert_eq!(r.bytes_in_flight, 1000);
161 }
162
163 #[test]
reno_slow_start()164 fn reno_slow_start() {
165 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
166 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
167
168 let mut r = Recovery::new(&cfg);
169
170 let now = Instant::now();
171
172 let p = recovery::Sent {
173 pkt_num: 0,
174 frames: vec![],
175 time_sent: now,
176 time_acked: None,
177 time_lost: None,
178 size: 5000,
179 ack_eliciting: true,
180 in_flight: true,
181 delivered: 0,
182 delivered_time: std::time::Instant::now(),
183 recent_delivered_packet_sent_time: std::time::Instant::now(),
184 is_app_limited: false,
185 has_data: false,
186 };
187
188 // Send 5k x 4 = 20k, higher than default cwnd(~15k)
189 // to become no longer app limited.
190 r.on_packet_sent_cc(p.size, now);
191 r.on_packet_sent_cc(p.size, now);
192 r.on_packet_sent_cc(p.size, now);
193 r.on_packet_sent_cc(p.size, now);
194
195 let cwnd_prev = r.cwnd();
196
197 let acked = vec![Acked {
198 pkt_num: p.pkt_num,
199 time_sent: p.time_sent,
200 size: p.size,
201 }];
202
203 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now);
204
205 // Check if cwnd increased by packet size (slow start).
206 assert_eq!(r.cwnd(), cwnd_prev + p.size);
207 }
208
209 #[test]
reno_congestion_event()210 fn reno_congestion_event() {
211 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
212 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
213
214 let mut r = Recovery::new(&cfg);
215
216 let prev_cwnd = r.cwnd();
217
218 let now = Instant::now();
219
220 r.congestion_event(now, packet::EPOCH_APPLICATION, now);
221
222 // In Reno, after congestion event, cwnd will be cut in half.
223 assert_eq!(prev_cwnd / 2, r.cwnd());
224 }
225
226 #[test]
reno_congestion_avoidance()227 fn reno_congestion_avoidance() {
228 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
229 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
230
231 let mut r = Recovery::new(&cfg);
232 let now = Instant::now();
233 let prev_cwnd = r.cwnd();
234
235 // Fill up bytes_in_flight to avoid app_limited=true
236 r.on_packet_sent_cc(20000, now);
237
238 // Trigger congestion event to update ssthresh
239 r.congestion_event(now, packet::EPOCH_APPLICATION, now);
240
241 // After congestion event, cwnd will be reduced.
242 let cur_cwnd =
243 (prev_cwnd as f64 * recovery::LOSS_REDUCTION_FACTOR) as usize;
244 assert_eq!(r.cwnd(), cur_cwnd);
245
246 let rtt = Duration::from_millis(100);
247
248 let acked = vec![Acked {
249 pkt_num: 0,
250 // To exit from recovery
251 time_sent: now + rtt,
252 // More than cur_cwnd to increase cwnd
253 size: 8000,
254 }];
255
256 // Ack more than cwnd bytes with rtt=100ms
257 r.update_rtt(rtt, Duration::from_millis(0), now);
258 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now + rtt * 2);
259
260 // After acking more than cwnd, expect cwnd increased by MSS
261 assert_eq!(r.cwnd(), cur_cwnd + recovery::MAX_DATAGRAM_SIZE);
262 }
263 }
264