1 /*
2 * Plugable TCP congestion control support and newReno
3 * congestion control.
4 * Based on ideas from I/O scheduler support and Web100.
5 *
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7 */
8
9 #define pr_fmt(fmt) "TCP: " fmt
10
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/types.h>
14 #include <linux/list.h>
15 #include <linux/gfp.h>
16 #include <net/tcp.h>
17
18 static DEFINE_SPINLOCK(tcp_cong_list_lock);
19 static LIST_HEAD(tcp_cong_list);
20
21 /* Simple linear search, don't expect many entries! */
tcp_ca_find(const char * name)22 static struct tcp_congestion_ops *tcp_ca_find(const char *name)
23 {
24 struct tcp_congestion_ops *e;
25
26 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
27 if (strcmp(e->name, name) == 0)
28 return e;
29 }
30
31 return NULL;
32 }
33
34 /*
35 * Attach new congestion control algorithm to the list
36 * of available options.
37 */
tcp_register_congestion_control(struct tcp_congestion_ops * ca)38 int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
39 {
40 int ret = 0;
41
42 /* all algorithms must implement ssthresh and cong_avoid ops */
43 if (!ca->ssthresh || !ca->cong_avoid) {
44 pr_err("%s does not implement required ops\n", ca->name);
45 return -EINVAL;
46 }
47
48 spin_lock(&tcp_cong_list_lock);
49 if (tcp_ca_find(ca->name)) {
50 pr_notice("%s already registered\n", ca->name);
51 ret = -EEXIST;
52 } else {
53 list_add_tail_rcu(&ca->list, &tcp_cong_list);
54 pr_info("%s registered\n", ca->name);
55 }
56 spin_unlock(&tcp_cong_list_lock);
57
58 return ret;
59 }
60 EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
61
62 /*
63 * Remove congestion control algorithm, called from
64 * the module's remove function. Module ref counts are used
65 * to ensure that this can't be done till all sockets using
66 * that method are closed.
67 */
tcp_unregister_congestion_control(struct tcp_congestion_ops * ca)68 void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
69 {
70 spin_lock(&tcp_cong_list_lock);
71 list_del_rcu(&ca->list);
72 spin_unlock(&tcp_cong_list_lock);
73 }
74 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
75
76 /* Assign choice of congestion control. */
tcp_assign_congestion_control(struct sock * sk)77 void tcp_assign_congestion_control(struct sock *sk)
78 {
79 struct inet_connection_sock *icsk = inet_csk(sk);
80 struct tcp_congestion_ops *ca;
81
82 rcu_read_lock();
83 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
84 if (likely(try_module_get(ca->owner))) {
85 icsk->icsk_ca_ops = ca;
86 goto out;
87 }
88 /* Fallback to next available. The last really
89 * guaranteed fallback is Reno from this list.
90 */
91 }
92 out:
93 rcu_read_unlock();
94
95 /* Clear out private data before diag gets it and
96 * the ca has not been initialized.
97 */
98 if (ca->get_info)
99 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
100 }
101
tcp_init_congestion_control(struct sock * sk)102 void tcp_init_congestion_control(struct sock *sk)
103 {
104 const struct inet_connection_sock *icsk = inet_csk(sk);
105
106 tcp_sk(sk)->prior_ssthresh = 0;
107 if (icsk->icsk_ca_ops->init)
108 icsk->icsk_ca_ops->init(sk);
109 }
110
111 /* Manage refcounts on socket close. */
tcp_cleanup_congestion_control(struct sock * sk)112 void tcp_cleanup_congestion_control(struct sock *sk)
113 {
114 struct inet_connection_sock *icsk = inet_csk(sk);
115
116 if (icsk->icsk_ca_ops->release)
117 icsk->icsk_ca_ops->release(sk);
118 module_put(icsk->icsk_ca_ops->owner);
119 }
120
121 /* Used by sysctl to change default congestion control */
tcp_set_default_congestion_control(const char * name)122 int tcp_set_default_congestion_control(const char *name)
123 {
124 struct tcp_congestion_ops *ca;
125 int ret = -ENOENT;
126
127 spin_lock(&tcp_cong_list_lock);
128 ca = tcp_ca_find(name);
129 #ifdef CONFIG_MODULES
130 if (!ca && capable(CAP_NET_ADMIN)) {
131 spin_unlock(&tcp_cong_list_lock);
132
133 request_module("tcp_%s", name);
134 spin_lock(&tcp_cong_list_lock);
135 ca = tcp_ca_find(name);
136 }
137 #endif
138
139 if (ca) {
140 ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */
141 list_move(&ca->list, &tcp_cong_list);
142 ret = 0;
143 }
144 spin_unlock(&tcp_cong_list_lock);
145
146 return ret;
147 }
148
149 /* Set default value from kernel configuration at bootup */
tcp_congestion_default(void)150 static int __init tcp_congestion_default(void)
151 {
152 return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
153 }
154 late_initcall(tcp_congestion_default);
155
156 /* Build string with list of available congestion control values */
tcp_get_available_congestion_control(char * buf,size_t maxlen)157 void tcp_get_available_congestion_control(char *buf, size_t maxlen)
158 {
159 struct tcp_congestion_ops *ca;
160 size_t offs = 0;
161
162 rcu_read_lock();
163 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
164 offs += snprintf(buf + offs, maxlen - offs,
165 "%s%s",
166 offs == 0 ? "" : " ", ca->name);
167 }
168 rcu_read_unlock();
169 }
170
171 /* Get current default congestion control */
tcp_get_default_congestion_control(char * name)172 void tcp_get_default_congestion_control(char *name)
173 {
174 struct tcp_congestion_ops *ca;
175 /* We will always have reno... */
176 BUG_ON(list_empty(&tcp_cong_list));
177
178 rcu_read_lock();
179 ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
180 strncpy(name, ca->name, TCP_CA_NAME_MAX);
181 rcu_read_unlock();
182 }
183
184 /* Built list of non-restricted congestion control values */
tcp_get_allowed_congestion_control(char * buf,size_t maxlen)185 void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
186 {
187 struct tcp_congestion_ops *ca;
188 size_t offs = 0;
189
190 *buf = '\0';
191 rcu_read_lock();
192 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
193 if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
194 continue;
195 offs += snprintf(buf + offs, maxlen - offs,
196 "%s%s",
197 offs == 0 ? "" : " ", ca->name);
198 }
199 rcu_read_unlock();
200 }
201
202 /* Change list of non-restricted congestion control */
tcp_set_allowed_congestion_control(char * val)203 int tcp_set_allowed_congestion_control(char *val)
204 {
205 struct tcp_congestion_ops *ca;
206 char *saved_clone, *clone, *name;
207 int ret = 0;
208
209 saved_clone = clone = kstrdup(val, GFP_USER);
210 if (!clone)
211 return -ENOMEM;
212
213 spin_lock(&tcp_cong_list_lock);
214 /* pass 1 check for bad entries */
215 while ((name = strsep(&clone, " ")) && *name) {
216 ca = tcp_ca_find(name);
217 if (!ca) {
218 ret = -ENOENT;
219 goto out;
220 }
221 }
222
223 /* pass 2 clear old values */
224 list_for_each_entry_rcu(ca, &tcp_cong_list, list)
225 ca->flags &= ~TCP_CONG_NON_RESTRICTED;
226
227 /* pass 3 mark as allowed */
228 while ((name = strsep(&val, " ")) && *name) {
229 ca = tcp_ca_find(name);
230 WARN_ON(!ca);
231 if (ca)
232 ca->flags |= TCP_CONG_NON_RESTRICTED;
233 }
234 out:
235 spin_unlock(&tcp_cong_list_lock);
236 kfree(saved_clone);
237
238 return ret;
239 }
240
241 /* Change congestion control for socket */
tcp_set_congestion_control(struct sock * sk,const char * name)242 int tcp_set_congestion_control(struct sock *sk, const char *name)
243 {
244 struct inet_connection_sock *icsk = inet_csk(sk);
245 struct tcp_congestion_ops *ca;
246 int err = 0;
247
248 rcu_read_lock();
249 ca = tcp_ca_find(name);
250
251 /* no change asking for existing value */
252 if (ca == icsk->icsk_ca_ops) {
253 icsk->icsk_ca_setsockopt = 1;
254 goto out;
255 }
256 #ifdef CONFIG_MODULES
257 /* not found attempt to autoload module */
258 if (!ca && capable(CAP_NET_ADMIN)) {
259 rcu_read_unlock();
260 request_module("tcp_%s", name);
261 rcu_read_lock();
262 ca = tcp_ca_find(name);
263 }
264 #endif
265 if (!ca)
266 err = -ENOENT;
267
268 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
269 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
270 err = -EPERM;
271
272 else if (!try_module_get(ca->owner))
273 err = -EBUSY;
274
275 else {
276 tcp_cleanup_congestion_control(sk);
277 icsk->icsk_ca_ops = ca;
278 icsk->icsk_ca_setsockopt = 1;
279
280 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
281 icsk->icsk_ca_ops->init(sk);
282 }
283 out:
284 rcu_read_unlock();
285 return err;
286 }
287
288 /* Slow start is used when congestion window is no greater than the slow start
289 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
290 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
291 * something better;) a packet is only considered (s)acked in its entirety to
292 * defend the ACK attacks described in the RFC. Slow start processes a stretch
293 * ACK of degree N as if N acks of degree 1 are received back to back except
294 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
295 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
296 */
tcp_slow_start(struct tcp_sock * tp,u32 acked)297 void tcp_slow_start(struct tcp_sock *tp, u32 acked)
298 {
299 u32 cwnd = tp->snd_cwnd + acked;
300
301 if (cwnd > tp->snd_ssthresh)
302 cwnd = tp->snd_ssthresh + 1;
303 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
304 }
305 EXPORT_SYMBOL_GPL(tcp_slow_start);
306
307 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
tcp_cong_avoid_ai(struct tcp_sock * tp,u32 w)308 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
309 {
310 if (tp->snd_cwnd_cnt >= w) {
311 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
312 tp->snd_cwnd++;
313 tp->snd_cwnd_cnt = 0;
314 } else {
315 tp->snd_cwnd_cnt++;
316 }
317 }
318 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
319
320 /*
321 * TCP Reno congestion control
322 * This is special case used for fallback as well.
323 */
324 /* This is Jacobson's slow start and congestion avoidance.
325 * SIGCOMM '88, p. 328.
326 */
tcp_reno_cong_avoid(struct sock * sk,u32 ack,u32 acked)327 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
328 {
329 struct tcp_sock *tp = tcp_sk(sk);
330
331 if (!tcp_is_cwnd_limited(sk))
332 return;
333
334 /* In "safe" area, increase. */
335 if (tp->snd_cwnd <= tp->snd_ssthresh)
336 tcp_slow_start(tp, acked);
337 /* In dangerous area, increase slowly. */
338 else
339 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
340 }
341 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
342
343 /* Slow start threshold is half the congestion window (min 2) */
tcp_reno_ssthresh(struct sock * sk)344 u32 tcp_reno_ssthresh(struct sock *sk)
345 {
346 const struct tcp_sock *tp = tcp_sk(sk);
347
348 return max(tp->snd_cwnd >> 1U, 2U);
349 }
350 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
351
352 struct tcp_congestion_ops tcp_reno = {
353 .flags = TCP_CONG_NON_RESTRICTED,
354 .name = "reno",
355 .owner = THIS_MODULE,
356 .ssthresh = tcp_reno_ssthresh,
357 .cong_avoid = tcp_reno_cong_avoid,
358 };
359