• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001-2003 International Business Machines Corp.
6  * Copyright (c) 2001 Intel Corp.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This file is part of the SCTP kernel implementation
10  *
11  * This module provides the abstraction for an SCTP transport representing
12  * a remote transport address.  For local transport addresses, we just use
13  * union sctp_addr.
14  *
15  * Please send any bug reports or fixes you make to the
16  * email address(es):
17  *    lksctp developers <linux-sctp@vger.kernel.org>
18  *
19  * Written or modified by:
20  *    La Monte H.P. Yarroll <piggy@acm.org>
21  *    Karl Knutson          <karl@athena.chicago.il.us>
22  *    Jon Grimm             <jgrimm@us.ibm.com>
23  *    Xingang Guo           <xingang.guo@intel.com>
24  *    Hui Huang             <hui.huang@nokia.com>
25  *    Sridhar Samudrala	    <sri@us.ibm.com>
26  *    Ardelle Fan	    <ardelle.fan@intel.com>
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/types.h>
33 #include <linux/random.h>
34 #include <net/sctp/sctp.h>
35 #include <net/sctp/sm.h>
36 
37 /* 1st Level Abstractions.  */
38 
39 /* Initialize a new transport from provided memory.  */
sctp_transport_init(struct net * net,struct sctp_transport * peer,const union sctp_addr * addr,gfp_t gfp)40 static struct sctp_transport *sctp_transport_init(struct net *net,
41 						  struct sctp_transport *peer,
42 						  const union sctp_addr *addr,
43 						  gfp_t gfp)
44 {
45 	/* Copy in the address.  */
46 	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
47 	memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len);
48 	memset(&peer->saddr, 0, sizeof(union sctp_addr));
49 
50 	peer->sack_generation = 0;
51 
52 	/* From 6.3.1 RTO Calculation:
53 	 *
54 	 * C1) Until an RTT measurement has been made for a packet sent to the
55 	 * given destination transport address, set RTO to the protocol
56 	 * parameter 'RTO.Initial'.
57 	 */
58 	peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
59 
60 	peer->last_time_heard = 0;
61 	peer->last_time_ecne_reduced = jiffies;
62 
63 	peer->param_flags = SPP_HB_DISABLE |
64 			    SPP_PMTUD_ENABLE |
65 			    SPP_SACKDELAY_ENABLE;
66 
67 	/* Initialize the default path max_retrans.  */
68 	peer->pathmaxrxt  = net->sctp.max_retrans_path;
69 	peer->pf_retrans  = net->sctp.pf_retrans;
70 
71 	INIT_LIST_HEAD(&peer->transmitted);
72 	INIT_LIST_HEAD(&peer->send_ready);
73 	INIT_LIST_HEAD(&peer->transports);
74 
75 	timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
76 	timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
77 	timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
78 	timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0);
79 	timer_setup(&peer->proto_unreach_timer,
80 		    sctp_generate_proto_unreach_event, 0);
81 
82 	/* Initialize the 64-bit random nonce sent with heartbeat. */
83 	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
84 
85 	refcount_set(&peer->refcnt, 1);
86 
87 	return peer;
88 }
89 
90 /* Allocate and initialize a new transport.  */
sctp_transport_new(struct net * net,const union sctp_addr * addr,gfp_t gfp)91 struct sctp_transport *sctp_transport_new(struct net *net,
92 					  const union sctp_addr *addr,
93 					  gfp_t gfp)
94 {
95 	struct sctp_transport *transport;
96 
97 	transport = kzalloc(sizeof(*transport), gfp);
98 	if (!transport)
99 		goto fail;
100 
101 	if (!sctp_transport_init(net, transport, addr, gfp))
102 		goto fail_init;
103 
104 	SCTP_DBG_OBJCNT_INC(transport);
105 
106 	return transport;
107 
108 fail_init:
109 	kfree(transport);
110 
111 fail:
112 	return NULL;
113 }
114 
115 /* This transport is no longer needed.  Free up if possible, or
116  * delay until it last reference count.
117  */
sctp_transport_free(struct sctp_transport * transport)118 void sctp_transport_free(struct sctp_transport *transport)
119 {
120 	/* Try to delete the heartbeat timer.  */
121 	if (del_timer(&transport->hb_timer))
122 		sctp_transport_put(transport);
123 
124 	/* Delete the T3_rtx timer if it's active.
125 	 * There is no point in not doing this now and letting
126 	 * structure hang around in memory since we know
127 	 * the transport is going away.
128 	 */
129 	if (del_timer(&transport->T3_rtx_timer))
130 		sctp_transport_put(transport);
131 
132 	if (del_timer(&transport->reconf_timer))
133 		sctp_transport_put(transport);
134 
135 	if (del_timer(&transport->probe_timer))
136 		sctp_transport_put(transport);
137 
138 	/* Delete the ICMP proto unreachable timer if it's active. */
139 	if (del_timer(&transport->proto_unreach_timer))
140 		sctp_transport_put(transport);
141 
142 	sctp_transport_put(transport);
143 }
144 
sctp_transport_destroy_rcu(struct rcu_head * head)145 static void sctp_transport_destroy_rcu(struct rcu_head *head)
146 {
147 	struct sctp_transport *transport;
148 
149 	transport = container_of(head, struct sctp_transport, rcu);
150 
151 	dst_release(transport->dst);
152 	kfree(transport);
153 	SCTP_DBG_OBJCNT_DEC(transport);
154 }
155 
156 /* Destroy the transport data structure.
157  * Assumes there are no more users of this structure.
158  */
sctp_transport_destroy(struct sctp_transport * transport)159 static void sctp_transport_destroy(struct sctp_transport *transport)
160 {
161 	if (unlikely(refcount_read(&transport->refcnt))) {
162 		WARN(1, "Attempt to destroy undead transport %p!\n", transport);
163 		return;
164 	}
165 
166 	sctp_packet_free(&transport->packet);
167 
168 	if (transport->asoc)
169 		sctp_association_put(transport->asoc);
170 
171 	call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
172 }
173 
174 /* Start T3_rtx timer if it is not already running and update the heartbeat
175  * timer.  This routine is called every time a DATA chunk is sent.
176  */
sctp_transport_reset_t3_rtx(struct sctp_transport * transport)177 void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
178 {
179 	/* RFC 2960 6.3.2 Retransmission Timer Rules
180 	 *
181 	 * R1) Every time a DATA chunk is sent to any address(including a
182 	 * retransmission), if the T3-rtx timer of that address is not running
183 	 * start it running so that it will expire after the RTO of that
184 	 * address.
185 	 */
186 
187 	if (!timer_pending(&transport->T3_rtx_timer))
188 		if (!mod_timer(&transport->T3_rtx_timer,
189 			       jiffies + transport->rto))
190 			sctp_transport_hold(transport);
191 }
192 
sctp_transport_reset_hb_timer(struct sctp_transport * transport)193 void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
194 {
195 	unsigned long expires;
196 
197 	/* When a data chunk is sent, reset the heartbeat interval.  */
198 	expires = jiffies + sctp_transport_timeout(transport);
199 	if ((time_before(transport->hb_timer.expires, expires) ||
200 	     !timer_pending(&transport->hb_timer)) &&
201 	    !mod_timer(&transport->hb_timer,
202 		       expires + prandom_u32_max(transport->rto)))
203 		sctp_transport_hold(transport);
204 }
205 
sctp_transport_reset_reconf_timer(struct sctp_transport * transport)206 void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
207 {
208 	if (!timer_pending(&transport->reconf_timer))
209 		if (!mod_timer(&transport->reconf_timer,
210 			       jiffies + transport->rto))
211 			sctp_transport_hold(transport);
212 }
213 
sctp_transport_reset_probe_timer(struct sctp_transport * transport)214 void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
215 {
216 	if (timer_pending(&transport->probe_timer))
217 		return;
218 	if (!mod_timer(&transport->probe_timer,
219 		       jiffies + transport->probe_interval))
220 		sctp_transport_hold(transport);
221 }
222 
223 /* This transport has been assigned to an association.
224  * Initialize fields from the association or from the sock itself.
225  * Register the reference count in the association.
226  */
sctp_transport_set_owner(struct sctp_transport * transport,struct sctp_association * asoc)227 void sctp_transport_set_owner(struct sctp_transport *transport,
228 			      struct sctp_association *asoc)
229 {
230 	transport->asoc = asoc;
231 	sctp_association_hold(asoc);
232 }
233 
234 /* Initialize the pmtu of a transport. */
sctp_transport_pmtu(struct sctp_transport * transport,struct sock * sk)235 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
236 {
237 	/* If we don't have a fresh route, look one up */
238 	if (!transport->dst || transport->dst->obsolete) {
239 		sctp_transport_dst_release(transport);
240 		transport->af_specific->get_dst(transport, &transport->saddr,
241 						&transport->fl, sk);
242 	}
243 
244 	if (transport->param_flags & SPP_PMTUD_DISABLE) {
245 		struct sctp_association *asoc = transport->asoc;
246 
247 		if (!transport->pathmtu && asoc && asoc->pathmtu)
248 			transport->pathmtu = asoc->pathmtu;
249 		if (transport->pathmtu)
250 			return;
251 	}
252 
253 	if (transport->dst)
254 		transport->pathmtu = sctp_dst_mtu(transport->dst);
255 	else
256 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
257 
258 	sctp_transport_pl_update(transport);
259 }
260 
sctp_transport_pl_send(struct sctp_transport * t)261 bool sctp_transport_pl_send(struct sctp_transport *t)
262 {
263 	if (t->pl.probe_count < SCTP_MAX_PROBES)
264 		goto out;
265 
266 	t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
267 	t->pl.probe_count = 0;
268 	if (t->pl.state == SCTP_PL_BASE) {
269 		if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
270 			t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
271 
272 			t->pl.pmtu = SCTP_BASE_PLPMTU;
273 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
274 			sctp_assoc_sync_pmtu(t->asoc);
275 		}
276 	} else if (t->pl.state == SCTP_PL_SEARCH) {
277 		if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
278 			t->pl.state = SCTP_PL_BASE;  /* Search -> Base */
279 			t->pl.probe_size = SCTP_BASE_PLPMTU;
280 			t->pl.probe_high = 0;
281 
282 			t->pl.pmtu = SCTP_BASE_PLPMTU;
283 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
284 			sctp_assoc_sync_pmtu(t->asoc);
285 		} else { /* Normal probe failure. */
286 			t->pl.probe_high = t->pl.probe_size;
287 			t->pl.probe_size = t->pl.pmtu;
288 		}
289 	} else if (t->pl.state == SCTP_PL_COMPLETE) {
290 		if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
291 			t->pl.state = SCTP_PL_BASE;  /* Search Complete -> Base */
292 			t->pl.probe_size = SCTP_BASE_PLPMTU;
293 
294 			t->pl.pmtu = SCTP_BASE_PLPMTU;
295 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
296 			sctp_assoc_sync_pmtu(t->asoc);
297 		}
298 	}
299 
300 out:
301 	if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
302 	    !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
303 		t->pl.raise_count++;
304 		return false;
305 	}
306 
307 	pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
308 		 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
309 
310 	t->pl.probe_count++;
311 	return true;
312 }
313 
sctp_transport_pl_recv(struct sctp_transport * t)314 bool sctp_transport_pl_recv(struct sctp_transport *t)
315 {
316 	pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
317 		 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
318 
319 	t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
320 	t->pl.pmtu = t->pl.probe_size;
321 	t->pl.probe_count = 0;
322 	if (t->pl.state == SCTP_PL_BASE) {
323 		t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */
324 		t->pl.probe_size += SCTP_PL_BIG_STEP;
325 	} else if (t->pl.state == SCTP_PL_ERROR) {
326 		t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */
327 
328 		t->pl.pmtu = t->pl.probe_size;
329 		t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
330 		sctp_assoc_sync_pmtu(t->asoc);
331 		t->pl.probe_size += SCTP_PL_BIG_STEP;
332 	} else if (t->pl.state == SCTP_PL_SEARCH) {
333 		if (!t->pl.probe_high) {
334 			if (t->pl.probe_size < SCTP_MAX_PLPMTU) {
335 				t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
336 						       SCTP_MAX_PLPMTU);
337 				return false;
338 			}
339 			t->pl.probe_high = SCTP_MAX_PLPMTU;
340 		}
341 		t->pl.probe_size += SCTP_PL_MIN_STEP;
342 		if (t->pl.probe_size >= t->pl.probe_high) {
343 			t->pl.probe_high = 0;
344 			t->pl.raise_count = 0;
345 			t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
346 
347 			t->pl.probe_size = t->pl.pmtu;
348 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
349 			sctp_assoc_sync_pmtu(t->asoc);
350 		}
351 	} else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
352 		/* Raise probe_size again after 30 * interval in Search Complete */
353 		t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
354 		t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU);
355 	}
356 
357 	return t->pl.state == SCTP_PL_COMPLETE;
358 }
359 
sctp_transport_pl_toobig(struct sctp_transport * t,u32 pmtu)360 static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
361 {
362 	pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n",
363 		 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu);
364 
365 	if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size)
366 		return false;
367 
368 	if (t->pl.state == SCTP_PL_BASE) {
369 		if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) {
370 			t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
371 
372 			t->pl.pmtu = SCTP_BASE_PLPMTU;
373 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
374 			return true;
375 		}
376 	} else if (t->pl.state == SCTP_PL_SEARCH) {
377 		if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
378 			t->pl.state = SCTP_PL_BASE;  /* Search -> Base */
379 			t->pl.probe_size = SCTP_BASE_PLPMTU;
380 			t->pl.probe_count = 0;
381 
382 			t->pl.probe_high = 0;
383 			t->pl.pmtu = SCTP_BASE_PLPMTU;
384 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
385 			return true;
386 		} else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) {
387 			t->pl.probe_size = pmtu;
388 			t->pl.probe_count = 0;
389 		}
390 	} else if (t->pl.state == SCTP_PL_COMPLETE) {
391 		if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
392 			t->pl.state = SCTP_PL_BASE;  /* Complete -> Base */
393 			t->pl.probe_size = SCTP_BASE_PLPMTU;
394 			t->pl.probe_count = 0;
395 
396 			t->pl.probe_high = 0;
397 			t->pl.pmtu = SCTP_BASE_PLPMTU;
398 			t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
399 			return true;
400 		}
401 	}
402 
403 	return false;
404 }
405 
sctp_transport_update_pmtu(struct sctp_transport * t,u32 pmtu)406 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
407 {
408 	struct sock *sk = t->asoc->base.sk;
409 	struct dst_entry *dst;
410 	bool change = true;
411 
412 	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
413 		pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
414 				    __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
415 		/* Use default minimum segment instead */
416 		pmtu = SCTP_DEFAULT_MINSEGMENT;
417 	}
418 	pmtu = SCTP_TRUNC4(pmtu);
419 
420 	if (sctp_transport_pl_enabled(t))
421 		return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t));
422 
423 	dst = sctp_transport_dst_check(t);
424 	if (dst) {
425 		struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
426 		union sctp_addr addr;
427 
428 		pf->af->from_sk(&addr, sk);
429 		pf->to_sk_daddr(&t->ipaddr, sk);
430 		dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
431 		pf->to_sk_daddr(&addr, sk);
432 
433 		dst = sctp_transport_dst_check(t);
434 	}
435 
436 	if (!dst) {
437 		t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
438 		dst = t->dst;
439 	}
440 
441 	if (dst) {
442 		/* Re-fetch, as under layers may have a higher minimum size */
443 		pmtu = sctp_dst_mtu(dst);
444 		change = t->pathmtu != pmtu;
445 	}
446 	t->pathmtu = pmtu;
447 
448 	return change;
449 }
450 
451 /* Caches the dst entry and source address for a transport's destination
452  * address.
453  */
sctp_transport_route(struct sctp_transport * transport,union sctp_addr * saddr,struct sctp_sock * opt)454 void sctp_transport_route(struct sctp_transport *transport,
455 			  union sctp_addr *saddr, struct sctp_sock *opt)
456 {
457 	struct sctp_association *asoc = transport->asoc;
458 	struct sctp_af *af = transport->af_specific;
459 
460 	sctp_transport_dst_release(transport);
461 	af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
462 
463 	if (saddr)
464 		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
465 	else
466 		af->get_saddr(opt, transport, &transport->fl);
467 
468 	sctp_transport_pmtu(transport, sctp_opt2sk(opt));
469 
470 	/* Initialize sk->sk_rcv_saddr, if the transport is the
471 	 * association's active path for getsockname().
472 	 */
473 	if (transport->dst && asoc &&
474 	    (!asoc->peer.primary_path || transport == asoc->peer.active_path))
475 		opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
476 }
477 
478 /* Hold a reference to a transport.  */
sctp_transport_hold(struct sctp_transport * transport)479 int sctp_transport_hold(struct sctp_transport *transport)
480 {
481 	return refcount_inc_not_zero(&transport->refcnt);
482 }
483 
484 /* Release a reference to a transport and clean up
485  * if there are no more references.
486  */
sctp_transport_put(struct sctp_transport * transport)487 void sctp_transport_put(struct sctp_transport *transport)
488 {
489 	if (refcount_dec_and_test(&transport->refcnt))
490 		sctp_transport_destroy(transport);
491 }
492 
493 /* Update transport's RTO based on the newly calculated RTT. */
sctp_transport_update_rto(struct sctp_transport * tp,__u32 rtt)494 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
495 {
496 	if (unlikely(!tp->rto_pending))
497 		/* We should not be doing any RTO updates unless rto_pending is set.  */
498 		pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
499 
500 	if (tp->rttvar || tp->srtt) {
501 		struct net *net = tp->asoc->base.net;
502 		/* 6.3.1 C3) When a new RTT measurement R' is made, set
503 		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
504 		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
505 		 */
506 
507 		/* Note:  The above algorithm has been rewritten to
508 		 * express rto_beta and rto_alpha as inverse powers
509 		 * of two.
510 		 * For example, assuming the default value of RTO.Alpha of
511 		 * 1/8, rto_alpha would be expressed as 3.
512 		 */
513 		tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
514 			+ (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
515 		tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
516 			+ (rtt >> net->sctp.rto_alpha);
517 	} else {
518 		/* 6.3.1 C2) When the first RTT measurement R is made, set
519 		 * SRTT <- R, RTTVAR <- R/2.
520 		 */
521 		tp->srtt = rtt;
522 		tp->rttvar = rtt >> 1;
523 	}
524 
525 	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
526 	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
527 	 */
528 	if (tp->rttvar == 0)
529 		tp->rttvar = SCTP_CLOCK_GRANULARITY;
530 
531 	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
532 	tp->rto = tp->srtt + (tp->rttvar << 2);
533 
534 	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
535 	 * seconds then it is rounded up to RTO.Min seconds.
536 	 */
537 	if (tp->rto < tp->asoc->rto_min)
538 		tp->rto = tp->asoc->rto_min;
539 
540 	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is
541 	 * at least RTO.max seconds.
542 	 */
543 	if (tp->rto > tp->asoc->rto_max)
544 		tp->rto = tp->asoc->rto_max;
545 
546 	sctp_max_rto(tp->asoc, tp);
547 	tp->rtt = rtt;
548 
549 	/* Reset rto_pending so that a new RTT measurement is started when a
550 	 * new data chunk is sent.
551 	 */
552 	tp->rto_pending = 0;
553 
554 	pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
555 		 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
556 }
557 
558 /* This routine updates the transport's cwnd and partial_bytes_acked
559  * parameters based on the bytes acked in the received SACK.
560  */
sctp_transport_raise_cwnd(struct sctp_transport * transport,__u32 sack_ctsn,__u32 bytes_acked)561 void sctp_transport_raise_cwnd(struct sctp_transport *transport,
562 			       __u32 sack_ctsn, __u32 bytes_acked)
563 {
564 	struct sctp_association *asoc = transport->asoc;
565 	__u32 cwnd, ssthresh, flight_size, pba, pmtu;
566 
567 	cwnd = transport->cwnd;
568 	flight_size = transport->flight_size;
569 
570 	/* See if we need to exit Fast Recovery first */
571 	if (asoc->fast_recovery &&
572 	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
573 		asoc->fast_recovery = 0;
574 
575 	ssthresh = transport->ssthresh;
576 	pba = transport->partial_bytes_acked;
577 	pmtu = transport->asoc->pathmtu;
578 
579 	if (cwnd <= ssthresh) {
580 		/* RFC 4960 7.2.1
581 		 * o  When cwnd is less than or equal to ssthresh, an SCTP
582 		 *    endpoint MUST use the slow-start algorithm to increase
583 		 *    cwnd only if the current congestion window is being fully
584 		 *    utilized, an incoming SACK advances the Cumulative TSN
585 		 *    Ack Point, and the data sender is not in Fast Recovery.
586 		 *    Only when these three conditions are met can the cwnd be
587 		 *    increased; otherwise, the cwnd MUST not be increased.
588 		 *    If these conditions are met, then cwnd MUST be increased
589 		 *    by, at most, the lesser of 1) the total size of the
590 		 *    previously outstanding DATA chunk(s) acknowledged, and
591 		 *    2) the destination's path MTU.  This upper bound protects
592 		 *    against the ACK-Splitting attack outlined in [SAVAGE99].
593 		 */
594 		if (asoc->fast_recovery)
595 			return;
596 
597 		/* The appropriate cwnd increase algorithm is performed
598 		 * if, and only if the congestion window is being fully
599 		 * utilized.  Note that RFC4960 Errata 3.22 removed the
600 		 * other condition on ctsn moving.
601 		 */
602 		if (flight_size < cwnd)
603 			return;
604 
605 		if (bytes_acked > pmtu)
606 			cwnd += pmtu;
607 		else
608 			cwnd += bytes_acked;
609 
610 		pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
611 			 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
612 			 __func__, transport, bytes_acked, cwnd, ssthresh,
613 			 flight_size, pba);
614 	} else {
615 		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
616 		 * upon each SACK arrival, increase partial_bytes_acked
617 		 * by the total number of bytes of all new chunks
618 		 * acknowledged in that SACK including chunks
619 		 * acknowledged by the new Cumulative TSN Ack and by Gap
620 		 * Ack Blocks. (updated by RFC4960 Errata 3.22)
621 		 *
622 		 * When partial_bytes_acked is greater than cwnd and
623 		 * before the arrival of the SACK the sender had less
624 		 * bytes of data outstanding than cwnd (i.e., before
625 		 * arrival of the SACK, flightsize was less than cwnd),
626 		 * reset partial_bytes_acked to cwnd. (RFC 4960 Errata
627 		 * 3.26)
628 		 *
629 		 * When partial_bytes_acked is equal to or greater than
630 		 * cwnd and before the arrival of the SACK the sender
631 		 * had cwnd or more bytes of data outstanding (i.e.,
632 		 * before arrival of the SACK, flightsize was greater
633 		 * than or equal to cwnd), partial_bytes_acked is reset
634 		 * to (partial_bytes_acked - cwnd). Next, cwnd is
635 		 * increased by MTU. (RFC 4960 Errata 3.12)
636 		 */
637 		pba += bytes_acked;
638 		if (pba > cwnd && flight_size < cwnd)
639 			pba = cwnd;
640 		if (pba >= cwnd && flight_size >= cwnd) {
641 			pba = pba - cwnd;
642 			cwnd += pmtu;
643 		}
644 
645 		pr_debug("%s: congestion avoidance: transport:%p, "
646 			 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
647 			 "flight_size:%d, pba:%d\n", __func__,
648 			 transport, bytes_acked, cwnd, ssthresh,
649 			 flight_size, pba);
650 	}
651 
652 	transport->cwnd = cwnd;
653 	transport->partial_bytes_acked = pba;
654 }
655 
656 /* This routine is used to lower the transport's cwnd when congestion is
657  * detected.
658  */
sctp_transport_lower_cwnd(struct sctp_transport * transport,enum sctp_lower_cwnd reason)659 void sctp_transport_lower_cwnd(struct sctp_transport *transport,
660 			       enum sctp_lower_cwnd reason)
661 {
662 	struct sctp_association *asoc = transport->asoc;
663 
664 	switch (reason) {
665 	case SCTP_LOWER_CWND_T3_RTX:
666 		/* RFC 2960 Section 7.2.3, sctpimpguide
667 		 * When the T3-rtx timer expires on an address, SCTP should
668 		 * perform slow start by:
669 		 *      ssthresh = max(cwnd/2, 4*MTU)
670 		 *      cwnd = 1*MTU
671 		 *      partial_bytes_acked = 0
672 		 */
673 		transport->ssthresh = max(transport->cwnd/2,
674 					  4*asoc->pathmtu);
675 		transport->cwnd = asoc->pathmtu;
676 
677 		/* T3-rtx also clears fast recovery */
678 		asoc->fast_recovery = 0;
679 		break;
680 
681 	case SCTP_LOWER_CWND_FAST_RTX:
682 		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
683 		 * destination address(es) to which the missing DATA chunks
684 		 * were last sent, according to the formula described in
685 		 * Section 7.2.3.
686 		 *
687 		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
688 		 * losses from SACK (see Section 7.2.4), An endpoint
689 		 * should do the following:
690 		 *      ssthresh = max(cwnd/2, 4*MTU)
691 		 *      cwnd = ssthresh
692 		 *      partial_bytes_acked = 0
693 		 */
694 		if (asoc->fast_recovery)
695 			return;
696 
697 		/* Mark Fast recovery */
698 		asoc->fast_recovery = 1;
699 		asoc->fast_recovery_exit = asoc->next_tsn - 1;
700 
701 		transport->ssthresh = max(transport->cwnd/2,
702 					  4*asoc->pathmtu);
703 		transport->cwnd = transport->ssthresh;
704 		break;
705 
706 	case SCTP_LOWER_CWND_ECNE:
707 		/* RFC 2481 Section 6.1.2.
708 		 * If the sender receives an ECN-Echo ACK packet
709 		 * then the sender knows that congestion was encountered in the
710 		 * network on the path from the sender to the receiver. The
711 		 * indication of congestion should be treated just as a
712 		 * congestion loss in non-ECN Capable TCP. That is, the TCP
713 		 * source halves the congestion window "cwnd" and reduces the
714 		 * slow start threshold "ssthresh".
715 		 * A critical condition is that TCP does not react to
716 		 * congestion indications more than once every window of
717 		 * data (or more loosely more than once every round-trip time).
718 		 */
719 		if (time_after(jiffies, transport->last_time_ecne_reduced +
720 					transport->rtt)) {
721 			transport->ssthresh = max(transport->cwnd/2,
722 						  4*asoc->pathmtu);
723 			transport->cwnd = transport->ssthresh;
724 			transport->last_time_ecne_reduced = jiffies;
725 		}
726 		break;
727 
728 	case SCTP_LOWER_CWND_INACTIVE:
729 		/* RFC 2960 Section 7.2.1, sctpimpguide
730 		 * When the endpoint does not transmit data on a given
731 		 * transport address, the cwnd of the transport address
732 		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
733 		 * NOTE: Although the draft recommends that this check needs
734 		 * to be done every RTO interval, we do it every hearbeat
735 		 * interval.
736 		 */
737 		transport->cwnd = max(transport->cwnd/2,
738 					 4*asoc->pathmtu);
739 		/* RFC 4960 Errata 3.27.2: also adjust sshthresh */
740 		transport->ssthresh = transport->cwnd;
741 		break;
742 	}
743 
744 	transport->partial_bytes_acked = 0;
745 
746 	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
747 		 __func__, transport, reason, transport->cwnd,
748 		 transport->ssthresh);
749 }
750 
751 /* Apply Max.Burst limit to the congestion window:
752  * sctpimpguide-05 2.14.2
753  * D) When the time comes for the sender to
754  * transmit new DATA chunks, the protocol parameter Max.Burst MUST
755  * first be applied to limit how many new DATA chunks may be sent.
756  * The limit is applied by adjusting cwnd as follows:
757  * 	if ((flightsize+ Max.Burst * MTU) < cwnd)
758  * 		cwnd = flightsize + Max.Burst * MTU
759  */
760 
sctp_transport_burst_limited(struct sctp_transport * t)761 void sctp_transport_burst_limited(struct sctp_transport *t)
762 {
763 	struct sctp_association *asoc = t->asoc;
764 	u32 old_cwnd = t->cwnd;
765 	u32 max_burst_bytes;
766 
767 	if (t->burst_limited || asoc->max_burst == 0)
768 		return;
769 
770 	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
771 	if (max_burst_bytes < old_cwnd) {
772 		t->cwnd = max_burst_bytes;
773 		t->burst_limited = old_cwnd;
774 	}
775 }
776 
777 /* Restore the old cwnd congestion window, after the burst had it's
778  * desired effect.
779  */
sctp_transport_burst_reset(struct sctp_transport * t)780 void sctp_transport_burst_reset(struct sctp_transport *t)
781 {
782 	if (t->burst_limited) {
783 		t->cwnd = t->burst_limited;
784 		t->burst_limited = 0;
785 	}
786 }
787 
788 /* What is the next timeout value for this transport? */
sctp_transport_timeout(struct sctp_transport * trans)789 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
790 {
791 	/* RTO + timer slack +/- 50% of RTO */
792 	unsigned long timeout = trans->rto >> 1;
793 
794 	if (trans->state != SCTP_UNCONFIRMED &&
795 	    trans->state != SCTP_PF)
796 		timeout += trans->hbinterval;
797 
798 	return max_t(unsigned long, timeout, HZ / 5);
799 }
800 
801 /* Reset transport variables to their initial values */
sctp_transport_reset(struct sctp_transport * t)802 void sctp_transport_reset(struct sctp_transport *t)
803 {
804 	struct sctp_association *asoc = t->asoc;
805 
806 	/* RFC 2960 (bis), Section 5.2.4
807 	 * All the congestion control parameters (e.g., cwnd, ssthresh)
808 	 * related to this peer MUST be reset to their initial values
809 	 * (see Section 6.2.1)
810 	 */
811 	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
812 	t->burst_limited = 0;
813 	t->ssthresh = asoc->peer.i.a_rwnd;
814 	t->rto = asoc->rto_initial;
815 	sctp_max_rto(asoc, t);
816 	t->rtt = 0;
817 	t->srtt = 0;
818 	t->rttvar = 0;
819 
820 	/* Reset these additional variables so that we have a clean slate. */
821 	t->partial_bytes_acked = 0;
822 	t->flight_size = 0;
823 	t->error_count = 0;
824 	t->rto_pending = 0;
825 	t->hb_sent = 0;
826 
827 	/* Initialize the state information for SFR-CACC */
828 	t->cacc.changeover_active = 0;
829 	t->cacc.cycling_changeover = 0;
830 	t->cacc.next_tsn_at_change = 0;
831 	t->cacc.cacc_saw_newack = 0;
832 }
833 
834 /* Schedule retransmission on the given transport */
sctp_transport_immediate_rtx(struct sctp_transport * t)835 void sctp_transport_immediate_rtx(struct sctp_transport *t)
836 {
837 	/* Stop pending T3_rtx_timer */
838 	if (del_timer(&t->T3_rtx_timer))
839 		sctp_transport_put(t);
840 
841 	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
842 	if (!timer_pending(&t->T3_rtx_timer)) {
843 		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
844 			sctp_transport_hold(t);
845 	}
846 }
847 
848 /* Drop dst */
sctp_transport_dst_release(struct sctp_transport * t)849 void sctp_transport_dst_release(struct sctp_transport *t)
850 {
851 	dst_release(t->dst);
852 	t->dst = NULL;
853 	t->dst_pending_confirm = 0;
854 }
855 
856 /* Schedule neighbour confirm */
sctp_transport_dst_confirm(struct sctp_transport * t)857 void sctp_transport_dst_confirm(struct sctp_transport *t)
858 {
859 	t->dst_pending_confirm = 1;
860 }
861