• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * (C) Copyright IBM Corp. 2001, 2004
4  * Copyright (c) 1999-2000 Cisco, Inc.
5  * Copyright (c) 1999-2001 Motorola, Inc.
6  * Copyright (c) 2001 Intel Corp.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This file is part of the SCTP kernel implementation
10  *
11  * This module provides the abstraction for an SCTP association.
12  *
13  * Please send any bug reports or fixes you make to the
14  * email address(es):
15  *    lksctp developers <linux-sctp@vger.kernel.org>
16  *
17  * Written or modified by:
18  *    La Monte H.P. Yarroll <piggy@acm.org>
19  *    Karl Knutson          <karl@athena.chicago.il.us>
20  *    Jon Grimm             <jgrimm@us.ibm.com>
21  *    Xingang Guo           <xingang.guo@intel.com>
22  *    Hui Huang             <hui.huang@nokia.com>
23  *    Sridhar Samudrala	    <sri@us.ibm.com>
24  *    Daisy Chang	    <daisyc@us.ibm.com>
25  *    Ryan Layer	    <rmlayer@us.ibm.com>
26  *    Kevin Gao             <kevin.gao@intel.com>
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/types.h>
32 #include <linux/fcntl.h>
33 #include <linux/poll.h>
34 #include <linux/init.h>
35 
36 #include <linux/slab.h>
37 #include <linux/in.h>
38 #include <net/ipv6.h>
39 #include <net/sctp/sctp.h>
40 #include <net/sctp/sm.h>
41 
42 /* Forward declarations for internal functions. */
43 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
44 static void sctp_assoc_bh_rcv(struct work_struct *work);
45 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
46 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
47 
48 /* 1st Level Abstractions. */
49 
50 /* Initialize a new association from provided memory. */
sctp_association_init(struct sctp_association * asoc,const struct sctp_endpoint * ep,const struct sock * sk,enum sctp_scope scope,gfp_t gfp)51 static struct sctp_association *sctp_association_init(
52 					struct sctp_association *asoc,
53 					const struct sctp_endpoint *ep,
54 					const struct sock *sk,
55 					enum sctp_scope scope, gfp_t gfp)
56 {
57 	struct sctp_sock *sp;
58 	struct sctp_paramhdr *p;
59 	int i;
60 
61 	/* Retrieve the SCTP per socket area.  */
62 	sp = sctp_sk((struct sock *)sk);
63 
64 	/* Discarding const is appropriate here.  */
65 	asoc->ep = (struct sctp_endpoint *)ep;
66 	asoc->base.sk = (struct sock *)sk;
67 	asoc->base.net = sock_net(sk);
68 
69 	sctp_endpoint_hold(asoc->ep);
70 	sock_hold(asoc->base.sk);
71 
72 	/* Initialize the common base substructure.  */
73 	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
74 
75 	/* Initialize the object handling fields.  */
76 	refcount_set(&asoc->base.refcnt, 1);
77 
78 	/* Initialize the bind addr area.  */
79 	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
80 
81 	asoc->state = SCTP_STATE_CLOSED;
82 	asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
83 	asoc->user_frag = sp->user_frag;
84 
85 	/* Set the association max_retrans and RTO values from the
86 	 * socket values.
87 	 */
88 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
89 	asoc->pf_retrans  = sp->pf_retrans;
90 
91 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
92 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
93 	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
94 
95 	/* Initialize the association's heartbeat interval based on the
96 	 * sock configured value.
97 	 */
98 	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
99 
100 	/* Initialize path max retrans value. */
101 	asoc->pathmaxrxt = sp->pathmaxrxt;
102 
103 	asoc->flowlabel = sp->flowlabel;
104 	asoc->dscp = sp->dscp;
105 
106 	/* Set association default SACK delay */
107 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
108 	asoc->sackfreq = sp->sackfreq;
109 
110 	/* Set the association default flags controlling
111 	 * Heartbeat, SACK delay, and Path MTU Discovery.
112 	 */
113 	asoc->param_flags = sp->param_flags;
114 
115 	/* Initialize the maximum number of new data packets that can be sent
116 	 * in a burst.
117 	 */
118 	asoc->max_burst = sp->max_burst;
119 
120 	asoc->subscribe = sp->subscribe;
121 
122 	/* initialize association timers */
123 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
124 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
125 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
126 
127 	/* sctpimpguide Section 2.12.2
128 	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
129 	 * recommended value of 5 times 'RTO.Max'.
130 	 */
131 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
132 		= 5 * asoc->rto_max;
133 
134 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
135 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
136 
137 	/* Initializes the timers */
138 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
139 		timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
140 
141 	/* Pull default initialization values from the sock options.
142 	 * Note: This assumes that the values have already been
143 	 * validated in the sock.
144 	 */
145 	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
146 	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
147 	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
148 
149 	asoc->max_init_timeo =
150 		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
151 
152 	/* Set the local window size for receive.
153 	 * This is also the rcvbuf space per association.
154 	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
155 	 * 1500 bytes in one SCTP packet.
156 	 */
157 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
158 		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
159 	else
160 		asoc->rwnd = sk->sk_rcvbuf/2;
161 
162 	asoc->a_rwnd = asoc->rwnd;
163 
164 	/* Use my own max window until I learn something better.  */
165 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
166 
167 	/* Initialize the receive memory counter */
168 	atomic_set(&asoc->rmem_alloc, 0);
169 
170 	init_waitqueue_head(&asoc->wait);
171 
172 	asoc->c.my_vtag = sctp_generate_tag(ep);
173 	asoc->c.my_port = ep->base.bind_addr.port;
174 
175 	asoc->c.initial_tsn = sctp_generate_tsn(ep);
176 
177 	asoc->next_tsn = asoc->c.initial_tsn;
178 
179 	asoc->ctsn_ack_point = asoc->next_tsn - 1;
180 	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
181 	asoc->highest_sacked = asoc->ctsn_ack_point;
182 	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
183 
184 	/* ADDIP Section 4.1 Asconf Chunk Procedures
185 	 *
186 	 * When an endpoint has an ASCONF signaled change to be sent to the
187 	 * remote endpoint it should do the following:
188 	 * ...
189 	 * A2) a serial number should be assigned to the chunk. The serial
190 	 * number SHOULD be a monotonically increasing number. The serial
191 	 * numbers SHOULD be initialized at the start of the
192 	 * association to the same value as the initial TSN.
193 	 */
194 	asoc->addip_serial = asoc->c.initial_tsn;
195 	asoc->strreset_outseq = asoc->c.initial_tsn;
196 
197 	INIT_LIST_HEAD(&asoc->addip_chunk_list);
198 	INIT_LIST_HEAD(&asoc->asconf_ack_list);
199 
200 	/* Make an empty list of remote transport addresses.  */
201 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
202 
203 	/* RFC 2960 5.1 Normal Establishment of an Association
204 	 *
205 	 * After the reception of the first data chunk in an
206 	 * association the endpoint must immediately respond with a
207 	 * sack to acknowledge the data chunk.  Subsequent
208 	 * acknowledgements should be done as described in Section
209 	 * 6.2.
210 	 *
211 	 * [We implement this by telling a new association that it
212 	 * already received one packet.]
213 	 */
214 	asoc->peer.sack_needed = 1;
215 	asoc->peer.sack_generation = 1;
216 
217 	/* Create an input queue.  */
218 	sctp_inq_init(&asoc->base.inqueue);
219 	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
220 
221 	/* Create an output queue.  */
222 	sctp_outq_init(asoc, &asoc->outqueue);
223 
224 	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
225 		goto fail_init;
226 
227 	if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
228 			     0, gfp))
229 		goto fail_init;
230 
231 	/* Initialize default path MTU. */
232 	asoc->pathmtu = sp->pathmtu;
233 	sctp_assoc_update_frag_point(asoc);
234 
235 	/* Assume that peer would support both address types unless we are
236 	 * told otherwise.
237 	 */
238 	asoc->peer.ipv4_address = 1;
239 	if (asoc->base.sk->sk_family == PF_INET6)
240 		asoc->peer.ipv6_address = 1;
241 	INIT_LIST_HEAD(&asoc->asocs);
242 
243 	asoc->default_stream = sp->default_stream;
244 	asoc->default_ppid = sp->default_ppid;
245 	asoc->default_flags = sp->default_flags;
246 	asoc->default_context = sp->default_context;
247 	asoc->default_timetolive = sp->default_timetolive;
248 	asoc->default_rcv_context = sp->default_rcv_context;
249 
250 	/* AUTH related initializations */
251 	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
252 	if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
253 		goto stream_free;
254 
255 	asoc->active_key_id = ep->active_key_id;
256 	asoc->strreset_enable = ep->strreset_enable;
257 
258 	/* Save the hmacs and chunks list into this association */
259 	if (ep->auth_hmacs_list)
260 		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
261 			ntohs(ep->auth_hmacs_list->param_hdr.length));
262 	if (ep->auth_chunk_list)
263 		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
264 			ntohs(ep->auth_chunk_list->param_hdr.length));
265 
266 	/* Get the AUTH random number for this association */
267 	p = (struct sctp_paramhdr *)asoc->c.auth_random;
268 	p->type = SCTP_PARAM_RANDOM;
269 	p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
270 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
271 
272 	return asoc;
273 
274 stream_free:
275 	sctp_stream_free(&asoc->stream);
276 fail_init:
277 	sock_put(asoc->base.sk);
278 	sctp_endpoint_put(asoc->ep);
279 	return NULL;
280 }
281 
282 /* Allocate and initialize a new association */
sctp_association_new(const struct sctp_endpoint * ep,const struct sock * sk,enum sctp_scope scope,gfp_t gfp)283 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
284 					      const struct sock *sk,
285 					      enum sctp_scope scope, gfp_t gfp)
286 {
287 	struct sctp_association *asoc;
288 
289 	asoc = kzalloc(sizeof(*asoc), gfp);
290 	if (!asoc)
291 		goto fail;
292 
293 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
294 		goto fail_init;
295 
296 	SCTP_DBG_OBJCNT_INC(assoc);
297 
298 	pr_debug("Created asoc %p\n", asoc);
299 
300 	return asoc;
301 
302 fail_init:
303 	kfree(asoc);
304 fail:
305 	return NULL;
306 }
307 
308 /* Free this association if possible.  There may still be users, so
309  * the actual deallocation may be delayed.
310  */
sctp_association_free(struct sctp_association * asoc)311 void sctp_association_free(struct sctp_association *asoc)
312 {
313 	struct sock *sk = asoc->base.sk;
314 	struct sctp_transport *transport;
315 	struct list_head *pos, *temp;
316 	int i;
317 
318 	/* Only real associations count against the endpoint, so
319 	 * don't bother for if this is a temporary association.
320 	 */
321 	if (!list_empty(&asoc->asocs)) {
322 		list_del(&asoc->asocs);
323 
324 		/* Decrement the backlog value for a TCP-style listening
325 		 * socket.
326 		 */
327 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
328 			sk->sk_ack_backlog--;
329 	}
330 
331 	/* Mark as dead, so other users can know this structure is
332 	 * going away.
333 	 */
334 	asoc->base.dead = true;
335 
336 	/* Dispose of any data lying around in the outqueue. */
337 	sctp_outq_free(&asoc->outqueue);
338 
339 	/* Dispose of any pending messages for the upper layer. */
340 	sctp_ulpq_free(&asoc->ulpq);
341 
342 	/* Dispose of any pending chunks on the inqueue. */
343 	sctp_inq_free(&asoc->base.inqueue);
344 
345 	sctp_tsnmap_free(&asoc->peer.tsn_map);
346 
347 	/* Free stream information. */
348 	sctp_stream_free(&asoc->stream);
349 
350 	if (asoc->strreset_chunk)
351 		sctp_chunk_free(asoc->strreset_chunk);
352 
353 	/* Clean up the bound address list. */
354 	sctp_bind_addr_free(&asoc->base.bind_addr);
355 
356 	/* Do we need to go through all of our timers and
357 	 * delete them?   To be safe we will try to delete all, but we
358 	 * should be able to go through and make a guess based
359 	 * on our state.
360 	 */
361 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
362 		if (del_timer(&asoc->timers[i]))
363 			sctp_association_put(asoc);
364 	}
365 
366 	/* Free peer's cached cookie. */
367 	kfree(asoc->peer.cookie);
368 	kfree(asoc->peer.peer_random);
369 	kfree(asoc->peer.peer_chunks);
370 	kfree(asoc->peer.peer_hmacs);
371 
372 	/* Release the transport structures. */
373 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
374 		transport = list_entry(pos, struct sctp_transport, transports);
375 		list_del_rcu(pos);
376 		sctp_unhash_transport(transport);
377 		sctp_transport_free(transport);
378 	}
379 
380 	asoc->peer.transport_count = 0;
381 
382 	sctp_asconf_queue_teardown(asoc);
383 
384 	/* Free pending address space being deleted */
385 	kfree(asoc->asconf_addr_del_pending);
386 
387 	/* AUTH - Free the endpoint shared keys */
388 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
389 
390 	/* AUTH - Free the association shared key */
391 	sctp_auth_key_put(asoc->asoc_shared_key);
392 
393 	sctp_association_put(asoc);
394 }
395 
396 /* Cleanup and free up an association. */
sctp_association_destroy(struct sctp_association * asoc)397 static void sctp_association_destroy(struct sctp_association *asoc)
398 {
399 	if (unlikely(!asoc->base.dead)) {
400 		WARN(1, "Attempt to destroy undead association %p!\n", asoc);
401 		return;
402 	}
403 
404 	sctp_endpoint_put(asoc->ep);
405 	sock_put(asoc->base.sk);
406 
407 	if (asoc->assoc_id != 0) {
408 		spin_lock_bh(&sctp_assocs_id_lock);
409 		idr_remove(&sctp_assocs_id, asoc->assoc_id);
410 		spin_unlock_bh(&sctp_assocs_id_lock);
411 	}
412 
413 	WARN_ON(atomic_read(&asoc->rmem_alloc));
414 
415 	kfree_rcu(asoc, rcu);
416 	SCTP_DBG_OBJCNT_DEC(assoc);
417 }
418 
419 /* Change the primary destination address for the peer. */
sctp_assoc_set_primary(struct sctp_association * asoc,struct sctp_transport * transport)420 void sctp_assoc_set_primary(struct sctp_association *asoc,
421 			    struct sctp_transport *transport)
422 {
423 	int changeover = 0;
424 
425 	/* it's a changeover only if we already have a primary path
426 	 * that we are changing
427 	 */
428 	if (asoc->peer.primary_path != NULL &&
429 	    asoc->peer.primary_path != transport)
430 		changeover = 1 ;
431 
432 	asoc->peer.primary_path = transport;
433 
434 	/* Set a default msg_name for events. */
435 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
436 	       sizeof(union sctp_addr));
437 
438 	/* If the primary path is changing, assume that the
439 	 * user wants to use this new path.
440 	 */
441 	if ((transport->state == SCTP_ACTIVE) ||
442 	    (transport->state == SCTP_UNKNOWN))
443 		asoc->peer.active_path = transport;
444 
445 	/*
446 	 * SFR-CACC algorithm:
447 	 * Upon the receipt of a request to change the primary
448 	 * destination address, on the data structure for the new
449 	 * primary destination, the sender MUST do the following:
450 	 *
451 	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
452 	 * to this destination address earlier. The sender MUST set
453 	 * CYCLING_CHANGEOVER to indicate that this switch is a
454 	 * double switch to the same destination address.
455 	 *
456 	 * Really, only bother is we have data queued or outstanding on
457 	 * the association.
458 	 */
459 	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
460 		return;
461 
462 	if (transport->cacc.changeover_active)
463 		transport->cacc.cycling_changeover = changeover;
464 
465 	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
466 	 * a changeover has occurred.
467 	 */
468 	transport->cacc.changeover_active = changeover;
469 
470 	/* 3) The sender MUST store the next TSN to be sent in
471 	 * next_tsn_at_change.
472 	 */
473 	transport->cacc.next_tsn_at_change = asoc->next_tsn;
474 }
475 
476 /* Remove a transport from an association.  */
sctp_assoc_rm_peer(struct sctp_association * asoc,struct sctp_transport * peer)477 void sctp_assoc_rm_peer(struct sctp_association *asoc,
478 			struct sctp_transport *peer)
479 {
480 	struct sctp_transport *transport;
481 	struct list_head *pos;
482 	struct sctp_chunk *ch;
483 
484 	pr_debug("%s: association:%p addr:%pISpc\n",
485 		 __func__, asoc, &peer->ipaddr.sa);
486 
487 	/* If we are to remove the current retran_path, update it
488 	 * to the next peer before removing this peer from the list.
489 	 */
490 	if (asoc->peer.retran_path == peer)
491 		sctp_assoc_update_retran_path(asoc);
492 
493 	/* Remove this peer from the list. */
494 	list_del_rcu(&peer->transports);
495 	/* Remove this peer from the transport hashtable */
496 	sctp_unhash_transport(peer);
497 
498 	/* Get the first transport of asoc. */
499 	pos = asoc->peer.transport_addr_list.next;
500 	transport = list_entry(pos, struct sctp_transport, transports);
501 
502 	/* Update any entries that match the peer to be deleted. */
503 	if (asoc->peer.primary_path == peer)
504 		sctp_assoc_set_primary(asoc, transport);
505 	if (asoc->peer.active_path == peer)
506 		asoc->peer.active_path = transport;
507 	if (asoc->peer.retran_path == peer)
508 		asoc->peer.retran_path = transport;
509 	if (asoc->peer.last_data_from == peer)
510 		asoc->peer.last_data_from = transport;
511 
512 	if (asoc->strreset_chunk &&
513 	    asoc->strreset_chunk->transport == peer) {
514 		asoc->strreset_chunk->transport = transport;
515 		sctp_transport_reset_reconf_timer(transport);
516 	}
517 
518 	/* If we remove the transport an INIT was last sent to, set it to
519 	 * NULL. Combined with the update of the retran path above, this
520 	 * will cause the next INIT to be sent to the next available
521 	 * transport, maintaining the cycle.
522 	 */
523 	if (asoc->init_last_sent_to == peer)
524 		asoc->init_last_sent_to = NULL;
525 
526 	/* If we remove the transport an SHUTDOWN was last sent to, set it
527 	 * to NULL. Combined with the update of the retran path above, this
528 	 * will cause the next SHUTDOWN to be sent to the next available
529 	 * transport, maintaining the cycle.
530 	 */
531 	if (asoc->shutdown_last_sent_to == peer)
532 		asoc->shutdown_last_sent_to = NULL;
533 
534 	/* If we remove the transport an ASCONF was last sent to, set it to
535 	 * NULL.
536 	 */
537 	if (asoc->addip_last_asconf &&
538 	    asoc->addip_last_asconf->transport == peer)
539 		asoc->addip_last_asconf->transport = NULL;
540 
541 	/* If we have something on the transmitted list, we have to
542 	 * save it off.  The best place is the active path.
543 	 */
544 	if (!list_empty(&peer->transmitted)) {
545 		struct sctp_transport *active = asoc->peer.active_path;
546 
547 		/* Reset the transport of each chunk on this list */
548 		list_for_each_entry(ch, &peer->transmitted,
549 					transmitted_list) {
550 			ch->transport = NULL;
551 			ch->rtt_in_progress = 0;
552 		}
553 
554 		list_splice_tail_init(&peer->transmitted,
555 					&active->transmitted);
556 
557 		/* Start a T3 timer here in case it wasn't running so
558 		 * that these migrated packets have a chance to get
559 		 * retransmitted.
560 		 */
561 		if (!timer_pending(&active->T3_rtx_timer))
562 			if (!mod_timer(&active->T3_rtx_timer,
563 					jiffies + active->rto))
564 				sctp_transport_hold(active);
565 	}
566 
567 	list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
568 		if (ch->transport == peer)
569 			ch->transport = NULL;
570 
571 	asoc->peer.transport_count--;
572 
573 	sctp_transport_free(peer);
574 }
575 
576 /* Add a transport address to an association.  */
sctp_assoc_add_peer(struct sctp_association * asoc,const union sctp_addr * addr,const gfp_t gfp,const int peer_state)577 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
578 					   const union sctp_addr *addr,
579 					   const gfp_t gfp,
580 					   const int peer_state)
581 {
582 	struct net *net = sock_net(asoc->base.sk);
583 	struct sctp_transport *peer;
584 	struct sctp_sock *sp;
585 	unsigned short port;
586 
587 	sp = sctp_sk(asoc->base.sk);
588 
589 	/* AF_INET and AF_INET6 share common port field. */
590 	port = ntohs(addr->v4.sin_port);
591 
592 	pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
593 		 asoc, &addr->sa, peer_state);
594 
595 	/* Set the port if it has not been set yet.  */
596 	if (0 == asoc->peer.port)
597 		asoc->peer.port = port;
598 
599 	/* Check to see if this is a duplicate. */
600 	peer = sctp_assoc_lookup_paddr(asoc, addr);
601 	if (peer) {
602 		/* An UNKNOWN state is only set on transports added by
603 		 * user in sctp_connectx() call.  Such transports should be
604 		 * considered CONFIRMED per RFC 4960, Section 5.4.
605 		 */
606 		if (peer->state == SCTP_UNKNOWN) {
607 			peer->state = SCTP_ACTIVE;
608 		}
609 		return peer;
610 	}
611 
612 	peer = sctp_transport_new(net, addr, gfp);
613 	if (!peer)
614 		return NULL;
615 
616 	sctp_transport_set_owner(peer, asoc);
617 
618 	/* Initialize the peer's heartbeat interval based on the
619 	 * association configured value.
620 	 */
621 	peer->hbinterval = asoc->hbinterval;
622 
623 	/* Set the path max_retrans.  */
624 	peer->pathmaxrxt = asoc->pathmaxrxt;
625 
626 	/* And the partial failure retrans threshold */
627 	peer->pf_retrans = asoc->pf_retrans;
628 
629 	/* Initialize the peer's SACK delay timeout based on the
630 	 * association configured value.
631 	 */
632 	peer->sackdelay = asoc->sackdelay;
633 	peer->sackfreq = asoc->sackfreq;
634 
635 	if (addr->sa.sa_family == AF_INET6) {
636 		__be32 info = addr->v6.sin6_flowinfo;
637 
638 		if (info) {
639 			peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
640 			peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
641 		} else {
642 			peer->flowlabel = asoc->flowlabel;
643 		}
644 	}
645 	peer->dscp = asoc->dscp;
646 
647 	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
648 	 * based on association setting.
649 	 */
650 	peer->param_flags = asoc->param_flags;
651 
652 	/* Initialize the pmtu of the transport. */
653 	sctp_transport_route(peer, NULL, sp);
654 
655 	/* If this is the first transport addr on this association,
656 	 * initialize the association PMTU to the peer's PMTU.
657 	 * If not and the current association PMTU is higher than the new
658 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
659 	 */
660 	sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
661 				  min_t(int, peer->pathmtu, asoc->pathmtu) :
662 				  peer->pathmtu);
663 
664 	peer->pmtu_pending = 0;
665 
666 	/* The asoc->peer.port might not be meaningful yet, but
667 	 * initialize the packet structure anyway.
668 	 */
669 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
670 			 asoc->peer.port);
671 
672 	/* 7.2.1 Slow-Start
673 	 *
674 	 * o The initial cwnd before DATA transmission or after a sufficiently
675 	 *   long idle period MUST be set to
676 	 *      min(4*MTU, max(2*MTU, 4380 bytes))
677 	 *
678 	 * o The initial value of ssthresh MAY be arbitrarily high
679 	 *   (for example, implementations MAY use the size of the
680 	 *   receiver advertised window).
681 	 */
682 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
683 
684 	/* At this point, we may not have the receiver's advertised window,
685 	 * so initialize ssthresh to the default value and it will be set
686 	 * later when we process the INIT.
687 	 */
688 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
689 
690 	peer->partial_bytes_acked = 0;
691 	peer->flight_size = 0;
692 	peer->burst_limited = 0;
693 
694 	/* Set the transport's RTO.initial value */
695 	peer->rto = asoc->rto_initial;
696 	sctp_max_rto(asoc, peer);
697 
698 	/* Set the peer's active state. */
699 	peer->state = peer_state;
700 
701 	/* Add this peer into the transport hashtable */
702 	if (sctp_hash_transport(peer)) {
703 		sctp_transport_free(peer);
704 		return NULL;
705 	}
706 
707 	/* Attach the remote transport to our asoc.  */
708 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
709 	asoc->peer.transport_count++;
710 
711 	/* If we do not yet have a primary path, set one.  */
712 	if (!asoc->peer.primary_path) {
713 		sctp_assoc_set_primary(asoc, peer);
714 		asoc->peer.retran_path = peer;
715 	}
716 
717 	if (asoc->peer.active_path == asoc->peer.retran_path &&
718 	    peer->state != SCTP_UNCONFIRMED) {
719 		asoc->peer.retran_path = peer;
720 	}
721 
722 	return peer;
723 }
724 
725 /* Delete a transport address from an association.  */
sctp_assoc_del_peer(struct sctp_association * asoc,const union sctp_addr * addr)726 void sctp_assoc_del_peer(struct sctp_association *asoc,
727 			 const union sctp_addr *addr)
728 {
729 	struct list_head	*pos;
730 	struct list_head	*temp;
731 	struct sctp_transport	*transport;
732 
733 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
734 		transport = list_entry(pos, struct sctp_transport, transports);
735 		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
736 			/* Do book keeping for removing the peer and free it. */
737 			sctp_assoc_rm_peer(asoc, transport);
738 			break;
739 		}
740 	}
741 }
742 
743 /* Lookup a transport by address. */
sctp_assoc_lookup_paddr(const struct sctp_association * asoc,const union sctp_addr * address)744 struct sctp_transport *sctp_assoc_lookup_paddr(
745 					const struct sctp_association *asoc,
746 					const union sctp_addr *address)
747 {
748 	struct sctp_transport *t;
749 
750 	/* Cycle through all transports searching for a peer address. */
751 
752 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
753 			transports) {
754 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
755 			return t;
756 	}
757 
758 	return NULL;
759 }
760 
761 /* Remove all transports except a give one */
sctp_assoc_del_nonprimary_peers(struct sctp_association * asoc,struct sctp_transport * primary)762 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
763 				     struct sctp_transport *primary)
764 {
765 	struct sctp_transport	*temp;
766 	struct sctp_transport	*t;
767 
768 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
769 				 transports) {
770 		/* if the current transport is not the primary one, delete it */
771 		if (t != primary)
772 			sctp_assoc_rm_peer(asoc, t);
773 	}
774 }
775 
776 /* Engage in transport control operations.
777  * Mark the transport up or down and send a notification to the user.
778  * Select and update the new active and retran paths.
779  */
sctp_assoc_control_transport(struct sctp_association * asoc,struct sctp_transport * transport,enum sctp_transport_cmd command,sctp_sn_error_t error)780 void sctp_assoc_control_transport(struct sctp_association *asoc,
781 				  struct sctp_transport *transport,
782 				  enum sctp_transport_cmd command,
783 				  sctp_sn_error_t error)
784 {
785 	struct sctp_ulpevent *event;
786 	struct sockaddr_storage addr;
787 	int spc_state = 0;
788 	bool ulp_notify = true;
789 
790 	/* Record the transition on the transport.  */
791 	switch (command) {
792 	case SCTP_TRANSPORT_UP:
793 		/* If we are moving from UNCONFIRMED state due
794 		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
795 		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
796 		 */
797 		if (SCTP_UNCONFIRMED == transport->state &&
798 		    SCTP_HEARTBEAT_SUCCESS == error)
799 			spc_state = SCTP_ADDR_CONFIRMED;
800 		else
801 			spc_state = SCTP_ADDR_AVAILABLE;
802 		/* Don't inform ULP about transition from PF to
803 		 * active state and set cwnd to 1 MTU, see SCTP
804 		 * Quick failover draft section 5.1, point 5
805 		 */
806 		if (transport->state == SCTP_PF) {
807 			ulp_notify = false;
808 			transport->cwnd = asoc->pathmtu;
809 		}
810 		transport->state = SCTP_ACTIVE;
811 		break;
812 
813 	case SCTP_TRANSPORT_DOWN:
814 		/* If the transport was never confirmed, do not transition it
815 		 * to inactive state.  Also, release the cached route since
816 		 * there may be a better route next time.
817 		 */
818 		if (transport->state != SCTP_UNCONFIRMED)
819 			transport->state = SCTP_INACTIVE;
820 		else {
821 			sctp_transport_dst_release(transport);
822 			ulp_notify = false;
823 		}
824 
825 		spc_state = SCTP_ADDR_UNREACHABLE;
826 		break;
827 
828 	case SCTP_TRANSPORT_PF:
829 		transport->state = SCTP_PF;
830 		ulp_notify = false;
831 		break;
832 
833 	default:
834 		return;
835 	}
836 
837 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
838 	 * to the user.
839 	 */
840 	if (ulp_notify) {
841 		memset(&addr, 0, sizeof(struct sockaddr_storage));
842 		memcpy(&addr, &transport->ipaddr,
843 		       transport->af_specific->sockaddr_len);
844 
845 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
846 					0, spc_state, error, GFP_ATOMIC);
847 		if (event)
848 			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
849 	}
850 
851 	/* Select new active and retran paths. */
852 	sctp_select_active_and_retran_path(asoc);
853 }
854 
855 /* Hold a reference to an association. */
sctp_association_hold(struct sctp_association * asoc)856 void sctp_association_hold(struct sctp_association *asoc)
857 {
858 	refcount_inc(&asoc->base.refcnt);
859 }
860 
861 /* Release a reference to an association and cleanup
862  * if there are no more references.
863  */
sctp_association_put(struct sctp_association * asoc)864 void sctp_association_put(struct sctp_association *asoc)
865 {
866 	if (refcount_dec_and_test(&asoc->base.refcnt))
867 		sctp_association_destroy(asoc);
868 }
869 
870 /* Allocate the next TSN, Transmission Sequence Number, for the given
871  * association.
872  */
sctp_association_get_next_tsn(struct sctp_association * asoc)873 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
874 {
875 	/* From Section 1.6 Serial Number Arithmetic:
876 	 * Transmission Sequence Numbers wrap around when they reach
877 	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
878 	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
879 	 */
880 	__u32 retval = asoc->next_tsn;
881 	asoc->next_tsn++;
882 	asoc->unack_data++;
883 
884 	return retval;
885 }
886 
887 /* Compare two addresses to see if they match.  Wildcard addresses
888  * only match themselves.
889  */
sctp_cmp_addr_exact(const union sctp_addr * ss1,const union sctp_addr * ss2)890 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
891 			const union sctp_addr *ss2)
892 {
893 	struct sctp_af *af;
894 
895 	af = sctp_get_af_specific(ss1->sa.sa_family);
896 	if (unlikely(!af))
897 		return 0;
898 
899 	return af->cmp_addr(ss1, ss2);
900 }
901 
902 /* Return an ecne chunk to get prepended to a packet.
903  * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
904  * No we don't, but we could/should.
905  */
sctp_get_ecne_prepend(struct sctp_association * asoc)906 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
907 {
908 	if (!asoc->need_ecne)
909 		return NULL;
910 
911 	/* Send ECNE if needed.
912 	 * Not being able to allocate a chunk here is not deadly.
913 	 */
914 	return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
915 }
916 
917 /*
918  * Find which transport this TSN was sent on.
919  */
sctp_assoc_lookup_tsn(struct sctp_association * asoc,__u32 tsn)920 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
921 					     __u32 tsn)
922 {
923 	struct sctp_transport *active;
924 	struct sctp_transport *match;
925 	struct sctp_transport *transport;
926 	struct sctp_chunk *chunk;
927 	__be32 key = htonl(tsn);
928 
929 	match = NULL;
930 
931 	/*
932 	 * FIXME: In general, find a more efficient data structure for
933 	 * searching.
934 	 */
935 
936 	/*
937 	 * The general strategy is to search each transport's transmitted
938 	 * list.   Return which transport this TSN lives on.
939 	 *
940 	 * Let's be hopeful and check the active_path first.
941 	 * Another optimization would be to know if there is only one
942 	 * outbound path and not have to look for the TSN at all.
943 	 *
944 	 */
945 
946 	active = asoc->peer.active_path;
947 
948 	list_for_each_entry(chunk, &active->transmitted,
949 			transmitted_list) {
950 
951 		if (key == chunk->subh.data_hdr->tsn) {
952 			match = active;
953 			goto out;
954 		}
955 	}
956 
957 	/* If not found, go search all the other transports. */
958 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
959 			transports) {
960 
961 		if (transport == active)
962 			continue;
963 		list_for_each_entry(chunk, &transport->transmitted,
964 				transmitted_list) {
965 			if (key == chunk->subh.data_hdr->tsn) {
966 				match = transport;
967 				goto out;
968 			}
969 		}
970 	}
971 out:
972 	return match;
973 }
974 
975 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
sctp_assoc_bh_rcv(struct work_struct * work)976 static void sctp_assoc_bh_rcv(struct work_struct *work)
977 {
978 	struct sctp_association *asoc =
979 		container_of(work, struct sctp_association,
980 			     base.inqueue.immediate);
981 	struct net *net = sock_net(asoc->base.sk);
982 	union sctp_subtype subtype;
983 	struct sctp_endpoint *ep;
984 	struct sctp_chunk *chunk;
985 	struct sctp_inq *inqueue;
986 	int first_time = 1;	/* is this the first time through the loop */
987 	int error = 0;
988 	int state;
989 
990 	/* The association should be held so we should be safe. */
991 	ep = asoc->ep;
992 
993 	inqueue = &asoc->base.inqueue;
994 	sctp_association_hold(asoc);
995 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
996 		state = asoc->state;
997 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
998 
999 		/* If the first chunk in the packet is AUTH, do special
1000 		 * processing specified in Section 6.3 of SCTP-AUTH spec
1001 		 */
1002 		if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1003 			struct sctp_chunkhdr *next_hdr;
1004 
1005 			next_hdr = sctp_inq_peek(inqueue);
1006 			if (!next_hdr)
1007 				goto normal;
1008 
1009 			/* If the next chunk is COOKIE-ECHO, skip the AUTH
1010 			 * chunk while saving a pointer to it so we can do
1011 			 * Authentication later (during cookie-echo
1012 			 * processing).
1013 			 */
1014 			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1015 				chunk->auth_chunk = skb_clone(chunk->skb,
1016 							      GFP_ATOMIC);
1017 				chunk->auth = 1;
1018 				continue;
1019 			}
1020 		}
1021 
1022 normal:
1023 		/* SCTP-AUTH, Section 6.3:
1024 		 *    The receiver has a list of chunk types which it expects
1025 		 *    to be received only after an AUTH-chunk.  This list has
1026 		 *    been sent to the peer during the association setup.  It
1027 		 *    MUST silently discard these chunks if they are not placed
1028 		 *    after an AUTH chunk in the packet.
1029 		 */
1030 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1031 			continue;
1032 
1033 		/* Remember where the last DATA chunk came from so we
1034 		 * know where to send the SACK.
1035 		 */
1036 		if (sctp_chunk_is_data(chunk))
1037 			asoc->peer.last_data_from = chunk->transport;
1038 		else {
1039 			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1040 			asoc->stats.ictrlchunks++;
1041 			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1042 				asoc->stats.isacks++;
1043 		}
1044 
1045 		if (chunk->transport)
1046 			chunk->transport->last_time_heard = ktime_get();
1047 
1048 		/* Run through the state machine. */
1049 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1050 				   state, ep, asoc, chunk, GFP_ATOMIC);
1051 
1052 		/* Check to see if the association is freed in response to
1053 		 * the incoming chunk.  If so, get out of the while loop.
1054 		 */
1055 		if (asoc->base.dead)
1056 			break;
1057 
1058 		/* If there is an error on chunk, discard this packet. */
1059 		if (error && chunk)
1060 			chunk->pdiscard = 1;
1061 
1062 		if (first_time)
1063 			first_time = 0;
1064 	}
1065 	sctp_association_put(asoc);
1066 }
1067 
1068 /* This routine moves an association from its old sk to a new sk.  */
sctp_assoc_migrate(struct sctp_association * assoc,struct sock * newsk)1069 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1070 {
1071 	struct sctp_sock *newsp = sctp_sk(newsk);
1072 	struct sock *oldsk = assoc->base.sk;
1073 
1074 	/* Delete the association from the old endpoint's list of
1075 	 * associations.
1076 	 */
1077 	list_del_init(&assoc->asocs);
1078 
1079 	/* Decrement the backlog value for a TCP-style socket. */
1080 	if (sctp_style(oldsk, TCP))
1081 		oldsk->sk_ack_backlog--;
1082 
1083 	/* Release references to the old endpoint and the sock.  */
1084 	sctp_endpoint_put(assoc->ep);
1085 	sock_put(assoc->base.sk);
1086 
1087 	/* Get a reference to the new endpoint.  */
1088 	assoc->ep = newsp->ep;
1089 	sctp_endpoint_hold(assoc->ep);
1090 
1091 	/* Get a reference to the new sock.  */
1092 	assoc->base.sk = newsk;
1093 	sock_hold(assoc->base.sk);
1094 
1095 	/* Add the association to the new endpoint's list of associations.  */
1096 	sctp_endpoint_add_asoc(newsp->ep, assoc);
1097 }
1098 
1099 /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
sctp_assoc_update(struct sctp_association * asoc,struct sctp_association * new)1100 int sctp_assoc_update(struct sctp_association *asoc,
1101 		      struct sctp_association *new)
1102 {
1103 	struct sctp_transport *trans;
1104 	struct list_head *pos, *temp;
1105 
1106 	/* Copy in new parameters of peer. */
1107 	asoc->c = new->c;
1108 	asoc->peer.rwnd = new->peer.rwnd;
1109 	asoc->peer.sack_needed = new->peer.sack_needed;
1110 	asoc->peer.auth_capable = new->peer.auth_capable;
1111 	asoc->peer.i = new->peer.i;
1112 
1113 	if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1114 			      asoc->peer.i.initial_tsn, GFP_ATOMIC))
1115 		return -ENOMEM;
1116 
1117 	/* Remove any peer addresses not present in the new association. */
1118 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1119 		trans = list_entry(pos, struct sctp_transport, transports);
1120 		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1121 			sctp_assoc_rm_peer(asoc, trans);
1122 			continue;
1123 		}
1124 
1125 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1126 			sctp_transport_reset(trans);
1127 	}
1128 
1129 	/* If the case is A (association restart), use
1130 	 * initial_tsn as next_tsn. If the case is B, use
1131 	 * current next_tsn in case data sent to peer
1132 	 * has been discarded and needs retransmission.
1133 	 */
1134 	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1135 		asoc->next_tsn = new->next_tsn;
1136 		asoc->ctsn_ack_point = new->ctsn_ack_point;
1137 		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1138 
1139 		/* Reinitialize SSN for both local streams
1140 		 * and peer's streams.
1141 		 */
1142 		sctp_stream_clear(&asoc->stream);
1143 
1144 		/* Flush the ULP reassembly and ordered queue.
1145 		 * Any data there will now be stale and will
1146 		 * cause problems.
1147 		 */
1148 		sctp_ulpq_flush(&asoc->ulpq);
1149 
1150 		/* reset the overall association error count so
1151 		 * that the restarted association doesn't get torn
1152 		 * down on the next retransmission timer.
1153 		 */
1154 		asoc->overall_error_count = 0;
1155 
1156 	} else {
1157 		/* Add any peer addresses from the new association. */
1158 		list_for_each_entry(trans, &new->peer.transport_addr_list,
1159 				    transports)
1160 			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
1161 			    !sctp_assoc_add_peer(asoc, &trans->ipaddr,
1162 						 GFP_ATOMIC, trans->state))
1163 				return -ENOMEM;
1164 
1165 		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1166 		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1167 
1168 		if (sctp_state(asoc, COOKIE_WAIT))
1169 			sctp_stream_update(&asoc->stream, &new->stream);
1170 
1171 		/* get a new assoc id if we don't have one yet. */
1172 		if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
1173 			return -ENOMEM;
1174 	}
1175 
1176 	/* SCTP-AUTH: Save the peer parameters from the new associations
1177 	 * and also move the association shared keys over
1178 	 */
1179 	kfree(asoc->peer.peer_random);
1180 	asoc->peer.peer_random = new->peer.peer_random;
1181 	new->peer.peer_random = NULL;
1182 
1183 	kfree(asoc->peer.peer_chunks);
1184 	asoc->peer.peer_chunks = new->peer.peer_chunks;
1185 	new->peer.peer_chunks = NULL;
1186 
1187 	kfree(asoc->peer.peer_hmacs);
1188 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1189 	new->peer.peer_hmacs = NULL;
1190 
1191 	return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1192 }
1193 
1194 /* Update the retran path for sending a retransmitted packet.
1195  * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1196  *
1197  *   When there is outbound data to send and the primary path
1198  *   becomes inactive (e.g., due to failures), or where the
1199  *   SCTP user explicitly requests to send data to an
1200  *   inactive destination transport address, before reporting
1201  *   an error to its ULP, the SCTP endpoint should try to send
1202  *   the data to an alternate active destination transport
1203  *   address if one exists.
1204  *
1205  *   When retransmitting data that timed out, if the endpoint
1206  *   is multihomed, it should consider each source-destination
1207  *   address pair in its retransmission selection policy.
1208  *   When retransmitting timed-out data, the endpoint should
1209  *   attempt to pick the most divergent source-destination
1210  *   pair from the original source-destination pair to which
1211  *   the packet was transmitted.
1212  *
1213  *   Note: Rules for picking the most divergent source-destination
1214  *   pair are an implementation decision and are not specified
1215  *   within this document.
1216  *
1217  * Our basic strategy is to round-robin transports in priorities
1218  * according to sctp_trans_score() e.g., if no such
1219  * transport with state SCTP_ACTIVE exists, round-robin through
1220  * SCTP_UNKNOWN, etc. You get the picture.
1221  */
sctp_trans_score(const struct sctp_transport * trans)1222 static u8 sctp_trans_score(const struct sctp_transport *trans)
1223 {
1224 	switch (trans->state) {
1225 	case SCTP_ACTIVE:
1226 		return 3;	/* best case */
1227 	case SCTP_UNKNOWN:
1228 		return 2;
1229 	case SCTP_PF:
1230 		return 1;
1231 	default: /* case SCTP_INACTIVE */
1232 		return 0;	/* worst case */
1233 	}
1234 }
1235 
sctp_trans_elect_tie(struct sctp_transport * trans1,struct sctp_transport * trans2)1236 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1237 						   struct sctp_transport *trans2)
1238 {
1239 	if (trans1->error_count > trans2->error_count) {
1240 		return trans2;
1241 	} else if (trans1->error_count == trans2->error_count &&
1242 		   ktime_after(trans2->last_time_heard,
1243 			       trans1->last_time_heard)) {
1244 		return trans2;
1245 	} else {
1246 		return trans1;
1247 	}
1248 }
1249 
sctp_trans_elect_best(struct sctp_transport * curr,struct sctp_transport * best)1250 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1251 						    struct sctp_transport *best)
1252 {
1253 	u8 score_curr, score_best;
1254 
1255 	if (best == NULL || curr == best)
1256 		return curr;
1257 
1258 	score_curr = sctp_trans_score(curr);
1259 	score_best = sctp_trans_score(best);
1260 
1261 	/* First, try a score-based selection if both transport states
1262 	 * differ. If we're in a tie, lets try to make a more clever
1263 	 * decision here based on error counts and last time heard.
1264 	 */
1265 	if (score_curr > score_best)
1266 		return curr;
1267 	else if (score_curr == score_best)
1268 		return sctp_trans_elect_tie(best, curr);
1269 	else
1270 		return best;
1271 }
1272 
sctp_assoc_update_retran_path(struct sctp_association * asoc)1273 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1274 {
1275 	struct sctp_transport *trans = asoc->peer.retran_path;
1276 	struct sctp_transport *trans_next = NULL;
1277 
1278 	/* We're done as we only have the one and only path. */
1279 	if (asoc->peer.transport_count == 1)
1280 		return;
1281 	/* If active_path and retran_path are the same and active,
1282 	 * then this is the only active path. Use it.
1283 	 */
1284 	if (asoc->peer.active_path == asoc->peer.retran_path &&
1285 	    asoc->peer.active_path->state == SCTP_ACTIVE)
1286 		return;
1287 
1288 	/* Iterate from retran_path's successor back to retran_path. */
1289 	for (trans = list_next_entry(trans, transports); 1;
1290 	     trans = list_next_entry(trans, transports)) {
1291 		/* Manually skip the head element. */
1292 		if (&trans->transports == &asoc->peer.transport_addr_list)
1293 			continue;
1294 		if (trans->state == SCTP_UNCONFIRMED)
1295 			continue;
1296 		trans_next = sctp_trans_elect_best(trans, trans_next);
1297 		/* Active is good enough for immediate return. */
1298 		if (trans_next->state == SCTP_ACTIVE)
1299 			break;
1300 		/* We've reached the end, time to update path. */
1301 		if (trans == asoc->peer.retran_path)
1302 			break;
1303 	}
1304 
1305 	asoc->peer.retran_path = trans_next;
1306 
1307 	pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1308 		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1309 }
1310 
sctp_select_active_and_retran_path(struct sctp_association * asoc)1311 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1312 {
1313 	struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1314 	struct sctp_transport *trans_pf = NULL;
1315 
1316 	/* Look for the two most recently used active transports. */
1317 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1318 			    transports) {
1319 		/* Skip uninteresting transports. */
1320 		if (trans->state == SCTP_INACTIVE ||
1321 		    trans->state == SCTP_UNCONFIRMED)
1322 			continue;
1323 		/* Keep track of the best PF transport from our
1324 		 * list in case we don't find an active one.
1325 		 */
1326 		if (trans->state == SCTP_PF) {
1327 			trans_pf = sctp_trans_elect_best(trans, trans_pf);
1328 			continue;
1329 		}
1330 		/* For active transports, pick the most recent ones. */
1331 		if (trans_pri == NULL ||
1332 		    ktime_after(trans->last_time_heard,
1333 				trans_pri->last_time_heard)) {
1334 			trans_sec = trans_pri;
1335 			trans_pri = trans;
1336 		} else if (trans_sec == NULL ||
1337 			   ktime_after(trans->last_time_heard,
1338 				       trans_sec->last_time_heard)) {
1339 			trans_sec = trans;
1340 		}
1341 	}
1342 
1343 	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1344 	 *
1345 	 * By default, an endpoint should always transmit to the primary
1346 	 * path, unless the SCTP user explicitly specifies the
1347 	 * destination transport address (and possibly source transport
1348 	 * address) to use. [If the primary is active but not most recent,
1349 	 * bump the most recently used transport.]
1350 	 */
1351 	if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1352 	     asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1353 	     asoc->peer.primary_path != trans_pri) {
1354 		trans_sec = trans_pri;
1355 		trans_pri = asoc->peer.primary_path;
1356 	}
1357 
1358 	/* We did not find anything useful for a possible retransmission
1359 	 * path; either primary path that we found is the the same as
1360 	 * the current one, or we didn't generally find an active one.
1361 	 */
1362 	if (trans_sec == NULL)
1363 		trans_sec = trans_pri;
1364 
1365 	/* If we failed to find a usable transport, just camp on the
1366 	 * active or pick a PF iff it's the better choice.
1367 	 */
1368 	if (trans_pri == NULL) {
1369 		trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1370 		trans_sec = trans_pri;
1371 	}
1372 
1373 	/* Set the active and retran transports. */
1374 	asoc->peer.active_path = trans_pri;
1375 	asoc->peer.retran_path = trans_sec;
1376 }
1377 
1378 struct sctp_transport *
sctp_assoc_choose_alter_transport(struct sctp_association * asoc,struct sctp_transport * last_sent_to)1379 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1380 				  struct sctp_transport *last_sent_to)
1381 {
1382 	/* If this is the first time packet is sent, use the active path,
1383 	 * else use the retran path. If the last packet was sent over the
1384 	 * retran path, update the retran path and use it.
1385 	 */
1386 	if (last_sent_to == NULL) {
1387 		return asoc->peer.active_path;
1388 	} else {
1389 		if (last_sent_to == asoc->peer.retran_path)
1390 			sctp_assoc_update_retran_path(asoc);
1391 
1392 		return asoc->peer.retran_path;
1393 	}
1394 }
1395 
sctp_assoc_update_frag_point(struct sctp_association * asoc)1396 void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1397 {
1398 	int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1399 				    sctp_datachk_len(&asoc->stream));
1400 
1401 	if (asoc->user_frag)
1402 		frag = min_t(int, frag, asoc->user_frag);
1403 
1404 	frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1405 				sctp_datachk_len(&asoc->stream));
1406 
1407 	asoc->frag_point = SCTP_TRUNC4(frag);
1408 }
1409 
sctp_assoc_set_pmtu(struct sctp_association * asoc,__u32 pmtu)1410 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1411 {
1412 	if (asoc->pathmtu != pmtu) {
1413 		asoc->pathmtu = pmtu;
1414 		sctp_assoc_update_frag_point(asoc);
1415 	}
1416 
1417 	pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1418 		 asoc->pathmtu, asoc->frag_point);
1419 }
1420 
1421 /* Update the association's pmtu and frag_point by going through all the
1422  * transports. This routine is called when a transport's PMTU has changed.
1423  */
sctp_assoc_sync_pmtu(struct sctp_association * asoc)1424 void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1425 {
1426 	struct sctp_transport *t;
1427 	__u32 pmtu = 0;
1428 
1429 	if (!asoc)
1430 		return;
1431 
1432 	/* Get the lowest pmtu of all the transports. */
1433 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1434 		if (t->pmtu_pending && t->dst) {
1435 			sctp_transport_update_pmtu(t,
1436 						   atomic_read(&t->mtu_info));
1437 			t->pmtu_pending = 0;
1438 		}
1439 		if (!pmtu || (t->pathmtu < pmtu))
1440 			pmtu = t->pathmtu;
1441 	}
1442 
1443 	sctp_assoc_set_pmtu(asoc, pmtu);
1444 }
1445 
1446 /* Should we send a SACK to update our peer? */
sctp_peer_needs_update(struct sctp_association * asoc)1447 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1448 {
1449 	struct net *net = sock_net(asoc->base.sk);
1450 	switch (asoc->state) {
1451 	case SCTP_STATE_ESTABLISHED:
1452 	case SCTP_STATE_SHUTDOWN_PENDING:
1453 	case SCTP_STATE_SHUTDOWN_RECEIVED:
1454 	case SCTP_STATE_SHUTDOWN_SENT:
1455 		if ((asoc->rwnd > asoc->a_rwnd) &&
1456 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1457 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1458 			   asoc->pathmtu)))
1459 			return true;
1460 		break;
1461 	default:
1462 		break;
1463 	}
1464 	return false;
1465 }
1466 
1467 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
sctp_assoc_rwnd_increase(struct sctp_association * asoc,unsigned int len)1468 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1469 {
1470 	struct sctp_chunk *sack;
1471 	struct timer_list *timer;
1472 
1473 	if (asoc->rwnd_over) {
1474 		if (asoc->rwnd_over >= len) {
1475 			asoc->rwnd_over -= len;
1476 		} else {
1477 			asoc->rwnd += (len - asoc->rwnd_over);
1478 			asoc->rwnd_over = 0;
1479 		}
1480 	} else {
1481 		asoc->rwnd += len;
1482 	}
1483 
1484 	/* If we had window pressure, start recovering it
1485 	 * once our rwnd had reached the accumulated pressure
1486 	 * threshold.  The idea is to recover slowly, but up
1487 	 * to the initial advertised window.
1488 	 */
1489 	if (asoc->rwnd_press) {
1490 		int change = min(asoc->pathmtu, asoc->rwnd_press);
1491 		asoc->rwnd += change;
1492 		asoc->rwnd_press -= change;
1493 	}
1494 
1495 	pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1496 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1497 		 asoc->a_rwnd);
1498 
1499 	/* Send a window update SACK if the rwnd has increased by at least the
1500 	 * minimum of the association's PMTU and half of the receive buffer.
1501 	 * The algorithm used is similar to the one described in
1502 	 * Section 4.2.3.3 of RFC 1122.
1503 	 */
1504 	if (sctp_peer_needs_update(asoc)) {
1505 		asoc->a_rwnd = asoc->rwnd;
1506 
1507 		pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1508 			 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1509 			 asoc->a_rwnd);
1510 
1511 		sack = sctp_make_sack(asoc);
1512 		if (!sack)
1513 			return;
1514 
1515 		asoc->peer.sack_needed = 0;
1516 
1517 		sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1518 
1519 		/* Stop the SACK timer.  */
1520 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1521 		if (del_timer(timer))
1522 			sctp_association_put(asoc);
1523 	}
1524 }
1525 
1526 /* Decrease asoc's rwnd by len. */
sctp_assoc_rwnd_decrease(struct sctp_association * asoc,unsigned int len)1527 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1528 {
1529 	int rx_count;
1530 	int over = 0;
1531 
1532 	if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1533 		pr_debug("%s: association:%p has asoc->rwnd:%u, "
1534 			 "asoc->rwnd_over:%u!\n", __func__, asoc,
1535 			 asoc->rwnd, asoc->rwnd_over);
1536 
1537 	if (asoc->ep->rcvbuf_policy)
1538 		rx_count = atomic_read(&asoc->rmem_alloc);
1539 	else
1540 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1541 
1542 	/* If we've reached or overflowed our receive buffer, announce
1543 	 * a 0 rwnd if rwnd would still be positive.  Store the
1544 	 * the potential pressure overflow so that the window can be restored
1545 	 * back to original value.
1546 	 */
1547 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1548 		over = 1;
1549 
1550 	if (asoc->rwnd >= len) {
1551 		asoc->rwnd -= len;
1552 		if (over) {
1553 			asoc->rwnd_press += asoc->rwnd;
1554 			asoc->rwnd = 0;
1555 		}
1556 	} else {
1557 		asoc->rwnd_over += len - asoc->rwnd;
1558 		asoc->rwnd = 0;
1559 	}
1560 
1561 	pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1562 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1563 		 asoc->rwnd_press);
1564 }
1565 
1566 /* Build the bind address list for the association based on info from the
1567  * local endpoint and the remote peer.
1568  */
sctp_assoc_set_bind_addr_from_ep(struct sctp_association * asoc,enum sctp_scope scope,gfp_t gfp)1569 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1570 				     enum sctp_scope scope, gfp_t gfp)
1571 {
1572 	int flags;
1573 
1574 	/* Use scoping rules to determine the subset of addresses from
1575 	 * the endpoint.
1576 	 */
1577 	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1578 	if (asoc->peer.ipv4_address)
1579 		flags |= SCTP_ADDR4_PEERSUPP;
1580 	if (asoc->peer.ipv6_address)
1581 		flags |= SCTP_ADDR6_PEERSUPP;
1582 
1583 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1584 				   &asoc->base.bind_addr,
1585 				   &asoc->ep->base.bind_addr,
1586 				   scope, gfp, flags);
1587 }
1588 
1589 /* Build the association's bind address list from the cookie.  */
sctp_assoc_set_bind_addr_from_cookie(struct sctp_association * asoc,struct sctp_cookie * cookie,gfp_t gfp)1590 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1591 					 struct sctp_cookie *cookie,
1592 					 gfp_t gfp)
1593 {
1594 	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1595 	int var_size3 = cookie->raw_addr_list_len;
1596 	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1597 
1598 	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1599 				      asoc->ep->base.bind_addr.port, gfp);
1600 }
1601 
1602 /* Lookup laddr in the bind address list of an association. */
sctp_assoc_lookup_laddr(struct sctp_association * asoc,const union sctp_addr * laddr)1603 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1604 			    const union sctp_addr *laddr)
1605 {
1606 	int found = 0;
1607 
1608 	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1609 	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1610 				 sctp_sk(asoc->base.sk)))
1611 		found = 1;
1612 
1613 	return found;
1614 }
1615 
1616 /* Set an association id for a given association */
sctp_assoc_set_id(struct sctp_association * asoc,gfp_t gfp)1617 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1618 {
1619 	bool preload = gfpflags_allow_blocking(gfp);
1620 	int ret;
1621 
1622 	/* If the id is already assigned, keep it. */
1623 	if (asoc->assoc_id)
1624 		return 0;
1625 
1626 	if (preload)
1627 		idr_preload(gfp);
1628 	spin_lock_bh(&sctp_assocs_id_lock);
1629 	/* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and
1630 	 * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC.
1631 	 */
1632 	ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
1633 			       GFP_NOWAIT);
1634 	spin_unlock_bh(&sctp_assocs_id_lock);
1635 	if (preload)
1636 		idr_preload_end();
1637 	if (ret < 0)
1638 		return ret;
1639 
1640 	asoc->assoc_id = (sctp_assoc_t)ret;
1641 	return 0;
1642 }
1643 
1644 /* Free the ASCONF queue */
sctp_assoc_free_asconf_queue(struct sctp_association * asoc)1645 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1646 {
1647 	struct sctp_chunk *asconf;
1648 	struct sctp_chunk *tmp;
1649 
1650 	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1651 		list_del_init(&asconf->list);
1652 		sctp_chunk_free(asconf);
1653 	}
1654 }
1655 
1656 /* Free asconf_ack cache */
sctp_assoc_free_asconf_acks(struct sctp_association * asoc)1657 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1658 {
1659 	struct sctp_chunk *ack;
1660 	struct sctp_chunk *tmp;
1661 
1662 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1663 				transmitted_list) {
1664 		list_del_init(&ack->transmitted_list);
1665 		sctp_chunk_free(ack);
1666 	}
1667 }
1668 
1669 /* Clean up the ASCONF_ACK queue */
sctp_assoc_clean_asconf_ack_cache(const struct sctp_association * asoc)1670 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1671 {
1672 	struct sctp_chunk *ack;
1673 	struct sctp_chunk *tmp;
1674 
1675 	/* We can remove all the entries from the queue up to
1676 	 * the "Peer-Sequence-Number".
1677 	 */
1678 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1679 				transmitted_list) {
1680 		if (ack->subh.addip_hdr->serial ==
1681 				htonl(asoc->peer.addip_serial))
1682 			break;
1683 
1684 		list_del_init(&ack->transmitted_list);
1685 		sctp_chunk_free(ack);
1686 	}
1687 }
1688 
1689 /* Find the ASCONF_ACK whose serial number matches ASCONF */
sctp_assoc_lookup_asconf_ack(const struct sctp_association * asoc,__be32 serial)1690 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1691 					const struct sctp_association *asoc,
1692 					__be32 serial)
1693 {
1694 	struct sctp_chunk *ack;
1695 
1696 	/* Walk through the list of cached ASCONF-ACKs and find the
1697 	 * ack chunk whose serial number matches that of the request.
1698 	 */
1699 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1700 		if (sctp_chunk_pending(ack))
1701 			continue;
1702 		if (ack->subh.addip_hdr->serial == serial) {
1703 			sctp_chunk_hold(ack);
1704 			return ack;
1705 		}
1706 	}
1707 
1708 	return NULL;
1709 }
1710 
sctp_asconf_queue_teardown(struct sctp_association * asoc)1711 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1712 {
1713 	/* Free any cached ASCONF_ACK chunk. */
1714 	sctp_assoc_free_asconf_acks(asoc);
1715 
1716 	/* Free the ASCONF queue. */
1717 	sctp_assoc_free_asconf_queue(asoc);
1718 
1719 	/* Free any cached ASCONF chunk. */
1720 	if (asoc->addip_last_asconf)
1721 		sctp_chunk_free(asoc->addip_last_asconf);
1722 }
1723