1 // Copyright (C) 2019, Cloudflare, Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // * Redistributions in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 //! Reno Congestion Control
28 //!
29 //! Note that Slow Start can use HyStart++ when enabled.
30
31 use std::cmp;
32 use std::time::Instant;
33
34 use crate::packet;
35 use crate::recovery;
36
37 use crate::recovery::Acked;
38 use crate::recovery::CongestionControlOps;
39 use crate::recovery::Recovery;
40
41 pub static RENO: CongestionControlOps = CongestionControlOps {
42 on_packet_sent,
43 on_packet_acked,
44 congestion_event,
45 collapse_cwnd,
46 checkpoint,
47 rollback,
48 has_custom_pacing,
49 };
50
on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant)51 pub fn on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant) {
52 r.bytes_in_flight += sent_bytes;
53 }
54
on_packet_acked( r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant, )55 fn on_packet_acked(
56 r: &mut Recovery, packet: &Acked, epoch: packet::Epoch, now: Instant,
57 ) {
58 r.bytes_in_flight = r.bytes_in_flight.saturating_sub(packet.size);
59
60 if r.in_congestion_recovery(packet.time_sent) {
61 return;
62 }
63
64 if r.app_limited {
65 return;
66 }
67
68 if r.congestion_window < r.ssthresh {
69 // In Slow slart, bytes_acked_sl is used for counting
70 // acknowledged bytes.
71 r.bytes_acked_sl += packet.size;
72
73 if r.bytes_acked_sl >= r.max_datagram_size {
74 r.congestion_window += r.max_datagram_size;
75 r.bytes_acked_sl -= r.max_datagram_size;
76 }
77
78 if r.hystart.enabled() &&
79 epoch == packet::EPOCH_APPLICATION &&
80 r.hystart.try_enter_lss(
81 packet,
82 r.latest_rtt,
83 r.congestion_window,
84 now,
85 r.max_datagram_size,
86 )
87 {
88 r.ssthresh = r.congestion_window;
89 }
90 } else {
91 // Congestion avoidance.
92 let mut reno_cwnd = r.congestion_window;
93
94 r.bytes_acked_ca += packet.size;
95
96 if r.bytes_acked_ca >= r.congestion_window {
97 r.bytes_acked_ca -= r.congestion_window;
98 reno_cwnd += r.max_datagram_size;
99 }
100
101 // When in Limited Slow Start, take the max of CA cwnd and
102 // LSS cwnd.
103 if r.hystart.in_lss(epoch) {
104 let lss_cwnd_inc = r.hystart.lss_cwnd_inc(
105 packet.size,
106 r.congestion_window,
107 r.ssthresh,
108 );
109
110 r.congestion_window =
111 cmp::max(reno_cwnd, r.congestion_window + lss_cwnd_inc);
112 } else {
113 r.congestion_window = reno_cwnd;
114 }
115 }
116 }
117
congestion_event( r: &mut Recovery, time_sent: Instant, epoch: packet::Epoch, now: Instant, )118 fn congestion_event(
119 r: &mut Recovery, time_sent: Instant, epoch: packet::Epoch, now: Instant,
120 ) {
121 // Start a new congestion event if packet was sent after the
122 // start of the previous congestion recovery period.
123 if !r.in_congestion_recovery(time_sent) {
124 r.congestion_recovery_start_time = Some(now);
125
126 r.congestion_window = (r.congestion_window as f64 *
127 recovery::LOSS_REDUCTION_FACTOR)
128 as usize;
129
130 r.congestion_window = cmp::max(
131 r.congestion_window,
132 r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS,
133 );
134
135 r.bytes_acked_ca = (r.congestion_window as f64 *
136 recovery::LOSS_REDUCTION_FACTOR) as usize;
137
138 r.ssthresh = r.congestion_window;
139
140 if r.hystart.in_lss(epoch) {
141 r.hystart.congestion_event();
142 }
143 }
144 }
145
collapse_cwnd(r: &mut Recovery)146 pub fn collapse_cwnd(r: &mut Recovery) {
147 r.congestion_window = r.max_datagram_size * recovery::MINIMUM_WINDOW_PACKETS;
148 r.bytes_acked_sl = 0;
149 r.bytes_acked_ca = 0;
150 }
151
checkpoint(_r: &mut Recovery)152 fn checkpoint(_r: &mut Recovery) {}
153
rollback(_r: &mut Recovery)154 fn rollback(_r: &mut Recovery) {}
155
has_custom_pacing() -> bool156 fn has_custom_pacing() -> bool {
157 false
158 }
159
160 #[cfg(test)]
161 mod tests {
162 use super::*;
163
164 use std::time::Duration;
165
166 #[test]
reno_init()167 fn reno_init() {
168 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
169 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
170
171 let r = Recovery::new(&cfg);
172
173 assert!(r.cwnd() > 0);
174 assert_eq!(r.bytes_in_flight, 0);
175 }
176
177 #[test]
reno_send()178 fn reno_send() {
179 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
180 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
181
182 let mut r = Recovery::new(&cfg);
183
184 let now = Instant::now();
185
186 r.on_packet_sent_cc(1000, now);
187
188 assert_eq!(r.bytes_in_flight, 1000);
189 }
190
191 #[test]
reno_slow_start()192 fn reno_slow_start() {
193 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
194 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
195
196 let mut r = Recovery::new(&cfg);
197
198 let now = Instant::now();
199
200 let p = recovery::Sent {
201 pkt_num: 0,
202 frames: vec![],
203 time_sent: now,
204 time_acked: None,
205 time_lost: None,
206 size: r.max_datagram_size,
207 ack_eliciting: true,
208 in_flight: true,
209 delivered: 0,
210 delivered_time: std::time::Instant::now(),
211 recent_delivered_packet_sent_time: std::time::Instant::now(),
212 is_app_limited: false,
213 has_data: false,
214 };
215
216 // Send initcwnd full MSS packets to become no longer app limited
217 for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
218 r.on_packet_sent_cc(p.size, now);
219 }
220
221 let cwnd_prev = r.cwnd();
222
223 let acked = vec![Acked {
224 pkt_num: p.pkt_num,
225 time_sent: p.time_sent,
226 size: p.size,
227 }];
228
229 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now);
230
231 // Check if cwnd increased by packet size (slow start).
232 assert_eq!(r.cwnd(), cwnd_prev + p.size);
233 }
234
235 #[test]
reno_slow_start_multi_acks()236 fn reno_slow_start_multi_acks() {
237 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
238 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
239
240 let mut r = Recovery::new(&cfg);
241
242 let now = Instant::now();
243
244 let p = recovery::Sent {
245 pkt_num: 0,
246 frames: vec![],
247 time_sent: now,
248 time_acked: None,
249 time_lost: None,
250 size: r.max_datagram_size,
251 ack_eliciting: true,
252 in_flight: true,
253 delivered: 0,
254 delivered_time: std::time::Instant::now(),
255 recent_delivered_packet_sent_time: std::time::Instant::now(),
256 is_app_limited: false,
257 has_data: false,
258 };
259
260 // Send initcwnd full MSS packets to become no longer app limited
261 for _ in 0..recovery::INITIAL_WINDOW_PACKETS {
262 r.on_packet_sent_cc(p.size, now);
263 }
264
265 let cwnd_prev = r.cwnd();
266
267 let acked = vec![
268 Acked {
269 pkt_num: p.pkt_num,
270 time_sent: p.time_sent,
271 size: p.size,
272 },
273 Acked {
274 pkt_num: p.pkt_num,
275 time_sent: p.time_sent,
276 size: p.size,
277 },
278 Acked {
279 pkt_num: p.pkt_num,
280 time_sent: p.time_sent,
281 size: p.size,
282 },
283 ];
284
285 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now);
286
287 // Acked 3 packets.
288 assert_eq!(r.cwnd(), cwnd_prev + p.size * 3);
289 }
290
291 #[test]
reno_congestion_event()292 fn reno_congestion_event() {
293 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
294 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
295
296 let mut r = Recovery::new(&cfg);
297
298 let prev_cwnd = r.cwnd();
299
300 let now = Instant::now();
301
302 r.congestion_event(now, packet::EPOCH_APPLICATION, now);
303
304 // In Reno, after congestion event, cwnd will be cut in half.
305 assert_eq!(prev_cwnd / 2, r.cwnd());
306 }
307
308 #[test]
reno_congestion_avoidance()309 fn reno_congestion_avoidance() {
310 let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
311 cfg.set_cc_algorithm(recovery::CongestionControlAlgorithm::Reno);
312
313 let mut r = Recovery::new(&cfg);
314 let now = Instant::now();
315 let prev_cwnd = r.cwnd();
316
317 // Fill up bytes_in_flight to avoid app_limited=true
318 r.on_packet_sent_cc(20000, now);
319
320 // Trigger congestion event to update ssthresh
321 r.congestion_event(now, packet::EPOCH_APPLICATION, now);
322
323 // After congestion event, cwnd will be reduced.
324 let cur_cwnd =
325 (prev_cwnd as f64 * recovery::LOSS_REDUCTION_FACTOR) as usize;
326 assert_eq!(r.cwnd(), cur_cwnd);
327
328 let rtt = Duration::from_millis(100);
329
330 let acked = vec![Acked {
331 pkt_num: 0,
332 // To exit from recovery
333 time_sent: now + rtt,
334 // More than cur_cwnd to increase cwnd
335 size: 8000,
336 }];
337
338 // Ack more than cwnd bytes with rtt=100ms
339 r.update_rtt(rtt, Duration::from_millis(0), now);
340 r.on_packets_acked(acked, packet::EPOCH_APPLICATION, now + rtt * 2);
341
342 // After acking more than cwnd, expect cwnd increased by MSS
343 assert_eq!(r.cwnd(), cur_cwnd + r.max_datagram_size);
344 }
345 }
346