• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 La Monte H.P. Yarroll
7  *
8  * This file is part of the SCTP kernel implementation
9  *
10  * This module provides the abstraction for an SCTP association.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Jon Grimm             <jgrimm@us.ibm.com>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang             <hui.huang@nokia.com>
38  *    Sridhar Samudrala	    <sri@us.ibm.com>
39  *    Daisy Chang	    <daisyc@us.ibm.com>
40  *    Ryan Layer	    <rmlayer@us.ibm.com>
41  *    Kevin Gao             <kevin.gao@intel.com>
42  */
43 
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
50 
51 #include <linux/slab.h>
52 #include <linux/in.h>
53 #include <net/ipv6.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56 
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59 static void sctp_assoc_bh_rcv(struct work_struct *work);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62 
63 /* 1st Level Abstractions. */
64 
65 /* Initialize a new association from provided memory. */
sctp_association_init(struct sctp_association * asoc,const struct sctp_endpoint * ep,const struct sock * sk,sctp_scope_t scope,gfp_t gfp)66 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 					  const struct sctp_endpoint *ep,
68 					  const struct sock *sk,
69 					  sctp_scope_t scope,
70 					  gfp_t gfp)
71 {
72 	struct net *net = sock_net(sk);
73 	struct sctp_sock *sp;
74 	int i;
75 	sctp_paramhdr_t *p;
76 	int err;
77 
78 	/* Retrieve the SCTP per socket area.  */
79 	sp = sctp_sk((struct sock *)sk);
80 
81 	/* Discarding const is appropriate here.  */
82 	asoc->ep = (struct sctp_endpoint *)ep;
83 	asoc->base.sk = (struct sock *)sk;
84 
85 	sctp_endpoint_hold(asoc->ep);
86 	sock_hold(asoc->base.sk);
87 
88 	/* Initialize the common base substructure.  */
89 	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90 
91 	/* Initialize the object handling fields.  */
92 	atomic_set(&asoc->base.refcnt, 1);
93 
94 	/* Initialize the bind addr area.  */
95 	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96 
97 	asoc->state = SCTP_STATE_CLOSED;
98 	asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
99 	asoc->user_frag = sp->user_frag;
100 
101 	/* Set the association max_retrans and RTO values from the
102 	 * socket values.
103 	 */
104 	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
105 	asoc->pf_retrans  = net->sctp.pf_retrans;
106 
107 	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
109 	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110 
111 	/* Initialize the association's heartbeat interval based on the
112 	 * sock configured value.
113 	 */
114 	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115 
116 	/* Initialize path max retrans value. */
117 	asoc->pathmaxrxt = sp->pathmaxrxt;
118 
119 	/* Initialize default path MTU. */
120 	asoc->pathmtu = sp->pathmtu;
121 
122 	/* Set association default SACK delay */
123 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
124 	asoc->sackfreq = sp->sackfreq;
125 
126 	/* Set the association default flags controlling
127 	 * Heartbeat, SACK delay, and Path MTU Discovery.
128 	 */
129 	asoc->param_flags = sp->param_flags;
130 
131 	/* Initialize the maximum number of new data packets that can be sent
132 	 * in a burst.
133 	 */
134 	asoc->max_burst = sp->max_burst;
135 
136 	/* initialize association timers */
137 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
138 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
139 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140 
141 	/* sctpimpguide Section 2.12.2
142 	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
143 	 * recommended value of 5 times 'RTO.Max'.
144 	 */
145 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
146 		= 5 * asoc->rto_max;
147 
148 	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
149 	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
150 
151 	/* Initializes the timers */
152 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 		setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 				(unsigned long)asoc);
155 
156 	/* Pull default initialization values from the sock options.
157 	 * Note: This assumes that the values have already been
158 	 * validated in the sock.
159 	 */
160 	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
162 	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
163 
164 	asoc->max_init_timeo =
165 		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
166 
167 	/* Set the local window size for receive.
168 	 * This is also the rcvbuf space per association.
169 	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
170 	 * 1500 bytes in one SCTP packet.
171 	 */
172 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
174 	else
175 		asoc->rwnd = sk->sk_rcvbuf/2;
176 
177 	asoc->a_rwnd = asoc->rwnd;
178 
179 	/* Use my own max window until I learn something better.  */
180 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
181 
182 	/* Initialize the receive memory counter */
183 	atomic_set(&asoc->rmem_alloc, 0);
184 
185 	init_waitqueue_head(&asoc->wait);
186 
187 	asoc->c.my_vtag = sctp_generate_tag(ep);
188 	asoc->c.my_port = ep->base.bind_addr.port;
189 
190 	asoc->c.initial_tsn = sctp_generate_tsn(ep);
191 
192 	asoc->next_tsn = asoc->c.initial_tsn;
193 
194 	asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 	asoc->highest_sacked = asoc->ctsn_ack_point;
197 	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
198 
199 	/* ADDIP Section 4.1 Asconf Chunk Procedures
200 	 *
201 	 * When an endpoint has an ASCONF signaled change to be sent to the
202 	 * remote endpoint it should do the following:
203 	 * ...
204 	 * A2) a serial number should be assigned to the chunk. The serial
205 	 * number SHOULD be a monotonically increasing number. The serial
206 	 * numbers SHOULD be initialized at the start of the
207 	 * association to the same value as the initial TSN.
208 	 */
209 	asoc->addip_serial = asoc->c.initial_tsn;
210 
211 	INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 	INIT_LIST_HEAD(&asoc->asconf_ack_list);
213 
214 	/* Make an empty list of remote transport addresses.  */
215 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
216 
217 	/* RFC 2960 5.1 Normal Establishment of an Association
218 	 *
219 	 * After the reception of the first data chunk in an
220 	 * association the endpoint must immediately respond with a
221 	 * sack to acknowledge the data chunk.  Subsequent
222 	 * acknowledgements should be done as described in Section
223 	 * 6.2.
224 	 *
225 	 * [We implement this by telling a new association that it
226 	 * already received one packet.]
227 	 */
228 	asoc->peer.sack_needed = 1;
229 	asoc->peer.sack_generation = 1;
230 
231 	/* Assume that the peer will tell us if he recognizes ASCONF
232 	 * as part of INIT exchange.
233 	 * The sctp_addip_noauth option is there for backward compatibility
234 	 * and will revert old behavior.
235 	 */
236 	if (net->sctp.addip_noauth)
237 		asoc->peer.asconf_capable = 1;
238 
239 	/* Create an input queue.  */
240 	sctp_inq_init(&asoc->base.inqueue);
241 	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
242 
243 	/* Create an output queue.  */
244 	sctp_outq_init(asoc, &asoc->outqueue);
245 
246 	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 		goto fail_init;
248 
249 	/* Assume that peer would support both address types unless we are
250 	 * told otherwise.
251 	 */
252 	asoc->peer.ipv4_address = 1;
253 	if (asoc->base.sk->sk_family == PF_INET6)
254 		asoc->peer.ipv6_address = 1;
255 	INIT_LIST_HEAD(&asoc->asocs);
256 
257 	asoc->default_stream = sp->default_stream;
258 	asoc->default_ppid = sp->default_ppid;
259 	asoc->default_flags = sp->default_flags;
260 	asoc->default_context = sp->default_context;
261 	asoc->default_timetolive = sp->default_timetolive;
262 	asoc->default_rcv_context = sp->default_rcv_context;
263 
264 	/* AUTH related initializations */
265 	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 	err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
267 	if (err)
268 		goto fail_init;
269 
270 	asoc->active_key_id = ep->active_key_id;
271 
272 	/* Save the hmacs and chunks list into this association */
273 	if (ep->auth_hmacs_list)
274 		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
275 			ntohs(ep->auth_hmacs_list->param_hdr.length));
276 	if (ep->auth_chunk_list)
277 		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
278 			ntohs(ep->auth_chunk_list->param_hdr.length));
279 
280 	/* Get the AUTH random number for this association */
281 	p = (sctp_paramhdr_t *)asoc->c.auth_random;
282 	p->type = SCTP_PARAM_RANDOM;
283 	p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
284 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
285 
286 	return asoc;
287 
288 fail_init:
289 	sock_put(asoc->base.sk);
290 	sctp_endpoint_put(asoc->ep);
291 	return NULL;
292 }
293 
294 /* Allocate and initialize a new association */
sctp_association_new(const struct sctp_endpoint * ep,const struct sock * sk,sctp_scope_t scope,gfp_t gfp)295 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
296 					 const struct sock *sk,
297 					 sctp_scope_t scope,
298 					 gfp_t gfp)
299 {
300 	struct sctp_association *asoc;
301 
302 	asoc = kzalloc(sizeof(*asoc), gfp);
303 	if (!asoc)
304 		goto fail;
305 
306 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
307 		goto fail_init;
308 
309 	SCTP_DBG_OBJCNT_INC(assoc);
310 
311 	pr_debug("Created asoc %p\n", asoc);
312 
313 	return asoc;
314 
315 fail_init:
316 	kfree(asoc);
317 fail:
318 	return NULL;
319 }
320 
321 /* Free this association if possible.  There may still be users, so
322  * the actual deallocation may be delayed.
323  */
sctp_association_free(struct sctp_association * asoc)324 void sctp_association_free(struct sctp_association *asoc)
325 {
326 	struct sock *sk = asoc->base.sk;
327 	struct sctp_transport *transport;
328 	struct list_head *pos, *temp;
329 	int i;
330 
331 	/* Only real associations count against the endpoint, so
332 	 * don't bother for if this is a temporary association.
333 	 */
334 	if (!list_empty(&asoc->asocs)) {
335 		list_del(&asoc->asocs);
336 
337 		/* Decrement the backlog value for a TCP-style listening
338 		 * socket.
339 		 */
340 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
341 			sk->sk_ack_backlog--;
342 	}
343 
344 	/* Mark as dead, so other users can know this structure is
345 	 * going away.
346 	 */
347 	asoc->base.dead = true;
348 
349 	/* Dispose of any data lying around in the outqueue. */
350 	sctp_outq_free(&asoc->outqueue);
351 
352 	/* Dispose of any pending messages for the upper layer. */
353 	sctp_ulpq_free(&asoc->ulpq);
354 
355 	/* Dispose of any pending chunks on the inqueue. */
356 	sctp_inq_free(&asoc->base.inqueue);
357 
358 	sctp_tsnmap_free(&asoc->peer.tsn_map);
359 
360 	/* Free ssnmap storage. */
361 	sctp_ssnmap_free(asoc->ssnmap);
362 
363 	/* Clean up the bound address list. */
364 	sctp_bind_addr_free(&asoc->base.bind_addr);
365 
366 	/* Do we need to go through all of our timers and
367 	 * delete them?   To be safe we will try to delete all, but we
368 	 * should be able to go through and make a guess based
369 	 * on our state.
370 	 */
371 	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
372 		if (del_timer(&asoc->timers[i]))
373 			sctp_association_put(asoc);
374 	}
375 
376 	/* Free peer's cached cookie. */
377 	kfree(asoc->peer.cookie);
378 	kfree(asoc->peer.peer_random);
379 	kfree(asoc->peer.peer_chunks);
380 	kfree(asoc->peer.peer_hmacs);
381 
382 	/* Release the transport structures. */
383 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
384 		transport = list_entry(pos, struct sctp_transport, transports);
385 		list_del_rcu(pos);
386 		sctp_transport_free(transport);
387 	}
388 
389 	asoc->peer.transport_count = 0;
390 
391 	sctp_asconf_queue_teardown(asoc);
392 
393 	/* Free pending address space being deleted */
394 	kfree(asoc->asconf_addr_del_pending);
395 
396 	/* AUTH - Free the endpoint shared keys */
397 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
398 
399 	/* AUTH - Free the association shared key */
400 	sctp_auth_key_put(asoc->asoc_shared_key);
401 
402 	sctp_association_put(asoc);
403 }
404 
405 /* Cleanup and free up an association. */
sctp_association_destroy(struct sctp_association * asoc)406 static void sctp_association_destroy(struct sctp_association *asoc)
407 {
408 	if (unlikely(!asoc->base.dead)) {
409 		WARN(1, "Attempt to destroy undead association %p!\n", asoc);
410 		return;
411 	}
412 
413 	sctp_endpoint_put(asoc->ep);
414 	sock_put(asoc->base.sk);
415 
416 	if (asoc->assoc_id != 0) {
417 		spin_lock_bh(&sctp_assocs_id_lock);
418 		idr_remove(&sctp_assocs_id, asoc->assoc_id);
419 		spin_unlock_bh(&sctp_assocs_id_lock);
420 	}
421 
422 	WARN_ON(atomic_read(&asoc->rmem_alloc));
423 
424 	kfree(asoc);
425 	SCTP_DBG_OBJCNT_DEC(assoc);
426 }
427 
428 /* Change the primary destination address for the peer. */
sctp_assoc_set_primary(struct sctp_association * asoc,struct sctp_transport * transport)429 void sctp_assoc_set_primary(struct sctp_association *asoc,
430 			    struct sctp_transport *transport)
431 {
432 	int changeover = 0;
433 
434 	/* it's a changeover only if we already have a primary path
435 	 * that we are changing
436 	 */
437 	if (asoc->peer.primary_path != NULL &&
438 	    asoc->peer.primary_path != transport)
439 		changeover = 1 ;
440 
441 	asoc->peer.primary_path = transport;
442 
443 	/* Set a default msg_name for events. */
444 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
445 	       sizeof(union sctp_addr));
446 
447 	/* If the primary path is changing, assume that the
448 	 * user wants to use this new path.
449 	 */
450 	if ((transport->state == SCTP_ACTIVE) ||
451 	    (transport->state == SCTP_UNKNOWN))
452 		asoc->peer.active_path = transport;
453 
454 	/*
455 	 * SFR-CACC algorithm:
456 	 * Upon the receipt of a request to change the primary
457 	 * destination address, on the data structure for the new
458 	 * primary destination, the sender MUST do the following:
459 	 *
460 	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
461 	 * to this destination address earlier. The sender MUST set
462 	 * CYCLING_CHANGEOVER to indicate that this switch is a
463 	 * double switch to the same destination address.
464 	 *
465 	 * Really, only bother is we have data queued or outstanding on
466 	 * the association.
467 	 */
468 	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
469 		return;
470 
471 	if (transport->cacc.changeover_active)
472 		transport->cacc.cycling_changeover = changeover;
473 
474 	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
475 	 * a changeover has occurred.
476 	 */
477 	transport->cacc.changeover_active = changeover;
478 
479 	/* 3) The sender MUST store the next TSN to be sent in
480 	 * next_tsn_at_change.
481 	 */
482 	transport->cacc.next_tsn_at_change = asoc->next_tsn;
483 }
484 
485 /* Remove a transport from an association.  */
sctp_assoc_rm_peer(struct sctp_association * asoc,struct sctp_transport * peer)486 void sctp_assoc_rm_peer(struct sctp_association *asoc,
487 			struct sctp_transport *peer)
488 {
489 	struct sctp_transport *transport;
490 	struct list_head *pos;
491 	struct sctp_chunk *ch;
492 
493 	pr_debug("%s: association:%p addr:%pISpc\n",
494 		 __func__, asoc, &peer->ipaddr.sa);
495 
496 	/* If we are to remove the current retran_path, update it
497 	 * to the next peer before removing this peer from the list.
498 	 */
499 	if (asoc->peer.retran_path == peer)
500 		sctp_assoc_update_retran_path(asoc);
501 
502 	/* Remove this peer from the list. */
503 	list_del_rcu(&peer->transports);
504 
505 	/* Get the first transport of asoc. */
506 	pos = asoc->peer.transport_addr_list.next;
507 	transport = list_entry(pos, struct sctp_transport, transports);
508 
509 	/* Update any entries that match the peer to be deleted. */
510 	if (asoc->peer.primary_path == peer)
511 		sctp_assoc_set_primary(asoc, transport);
512 	if (asoc->peer.active_path == peer)
513 		asoc->peer.active_path = transport;
514 	if (asoc->peer.retran_path == peer)
515 		asoc->peer.retran_path = transport;
516 	if (asoc->peer.last_data_from == peer)
517 		asoc->peer.last_data_from = transport;
518 
519 	/* If we remove the transport an INIT was last sent to, set it to
520 	 * NULL. Combined with the update of the retran path above, this
521 	 * will cause the next INIT to be sent to the next available
522 	 * transport, maintaining the cycle.
523 	 */
524 	if (asoc->init_last_sent_to == peer)
525 		asoc->init_last_sent_to = NULL;
526 
527 	/* If we remove the transport an SHUTDOWN was last sent to, set it
528 	 * to NULL. Combined with the update of the retran path above, this
529 	 * will cause the next SHUTDOWN to be sent to the next available
530 	 * transport, maintaining the cycle.
531 	 */
532 	if (asoc->shutdown_last_sent_to == peer)
533 		asoc->shutdown_last_sent_to = NULL;
534 
535 	/* If we remove the transport an ASCONF was last sent to, set it to
536 	 * NULL.
537 	 */
538 	if (asoc->addip_last_asconf &&
539 	    asoc->addip_last_asconf->transport == peer)
540 		asoc->addip_last_asconf->transport = NULL;
541 
542 	/* If we have something on the transmitted list, we have to
543 	 * save it off.  The best place is the active path.
544 	 */
545 	if (!list_empty(&peer->transmitted)) {
546 		struct sctp_transport *active = asoc->peer.active_path;
547 
548 		/* Reset the transport of each chunk on this list */
549 		list_for_each_entry(ch, &peer->transmitted,
550 					transmitted_list) {
551 			ch->transport = NULL;
552 			ch->rtt_in_progress = 0;
553 		}
554 
555 		list_splice_tail_init(&peer->transmitted,
556 					&active->transmitted);
557 
558 		/* Start a T3 timer here in case it wasn't running so
559 		 * that these migrated packets have a chance to get
560 		 * retransmitted.
561 		 */
562 		if (!timer_pending(&active->T3_rtx_timer))
563 			if (!mod_timer(&active->T3_rtx_timer,
564 					jiffies + active->rto))
565 				sctp_transport_hold(active);
566 	}
567 
568 	list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
569 		if (ch->transport == peer)
570 			ch->transport = NULL;
571 
572 	asoc->peer.transport_count--;
573 
574 	sctp_transport_free(peer);
575 }
576 
577 /* Add a transport address to an association.  */
sctp_assoc_add_peer(struct sctp_association * asoc,const union sctp_addr * addr,const gfp_t gfp,const int peer_state)578 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
579 					   const union sctp_addr *addr,
580 					   const gfp_t gfp,
581 					   const int peer_state)
582 {
583 	struct net *net = sock_net(asoc->base.sk);
584 	struct sctp_transport *peer;
585 	struct sctp_sock *sp;
586 	unsigned short port;
587 
588 	sp = sctp_sk(asoc->base.sk);
589 
590 	/* AF_INET and AF_INET6 share common port field. */
591 	port = ntohs(addr->v4.sin_port);
592 
593 	pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
594 		 asoc, &addr->sa, peer_state);
595 
596 	/* Set the port if it has not been set yet.  */
597 	if (0 == asoc->peer.port)
598 		asoc->peer.port = port;
599 
600 	/* Check to see if this is a duplicate. */
601 	peer = sctp_assoc_lookup_paddr(asoc, addr);
602 	if (peer) {
603 		/* An UNKNOWN state is only set on transports added by
604 		 * user in sctp_connectx() call.  Such transports should be
605 		 * considered CONFIRMED per RFC 4960, Section 5.4.
606 		 */
607 		if (peer->state == SCTP_UNKNOWN) {
608 			peer->state = SCTP_ACTIVE;
609 		}
610 		return peer;
611 	}
612 
613 	peer = sctp_transport_new(net, addr, gfp);
614 	if (!peer)
615 		return NULL;
616 
617 	sctp_transport_set_owner(peer, asoc);
618 
619 	/* Initialize the peer's heartbeat interval based on the
620 	 * association configured value.
621 	 */
622 	peer->hbinterval = asoc->hbinterval;
623 
624 	/* Set the path max_retrans.  */
625 	peer->pathmaxrxt = asoc->pathmaxrxt;
626 
627 	/* And the partial failure retrans threshold */
628 	peer->pf_retrans = asoc->pf_retrans;
629 
630 	/* Initialize the peer's SACK delay timeout based on the
631 	 * association configured value.
632 	 */
633 	peer->sackdelay = asoc->sackdelay;
634 	peer->sackfreq = asoc->sackfreq;
635 
636 	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
637 	 * based on association setting.
638 	 */
639 	peer->param_flags = asoc->param_flags;
640 
641 	sctp_transport_route(peer, NULL, sp);
642 
643 	/* Initialize the pmtu of the transport. */
644 	if (peer->param_flags & SPP_PMTUD_DISABLE) {
645 		if (asoc->pathmtu)
646 			peer->pathmtu = asoc->pathmtu;
647 		else
648 			peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
649 	}
650 
651 	/* If this is the first transport addr on this association,
652 	 * initialize the association PMTU to the peer's PMTU.
653 	 * If not and the current association PMTU is higher than the new
654 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
655 	 */
656 	if (asoc->pathmtu)
657 		asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
658 	else
659 		asoc->pathmtu = peer->pathmtu;
660 
661 	pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
662 		 asoc->pathmtu);
663 
664 	peer->pmtu_pending = 0;
665 
666 	asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
667 
668 	/* The asoc->peer.port might not be meaningful yet, but
669 	 * initialize the packet structure anyway.
670 	 */
671 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
672 			 asoc->peer.port);
673 
674 	/* 7.2.1 Slow-Start
675 	 *
676 	 * o The initial cwnd before DATA transmission or after a sufficiently
677 	 *   long idle period MUST be set to
678 	 *      min(4*MTU, max(2*MTU, 4380 bytes))
679 	 *
680 	 * o The initial value of ssthresh MAY be arbitrarily high
681 	 *   (for example, implementations MAY use the size of the
682 	 *   receiver advertised window).
683 	 */
684 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
685 
686 	/* At this point, we may not have the receiver's advertised window,
687 	 * so initialize ssthresh to the default value and it will be set
688 	 * later when we process the INIT.
689 	 */
690 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
691 
692 	peer->partial_bytes_acked = 0;
693 	peer->flight_size = 0;
694 	peer->burst_limited = 0;
695 
696 	/* Set the transport's RTO.initial value */
697 	peer->rto = asoc->rto_initial;
698 	sctp_max_rto(asoc, peer);
699 
700 	/* Set the peer's active state. */
701 	peer->state = peer_state;
702 
703 	/* Attach the remote transport to our asoc.  */
704 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
705 	asoc->peer.transport_count++;
706 
707 	/* If we do not yet have a primary path, set one.  */
708 	if (!asoc->peer.primary_path) {
709 		sctp_assoc_set_primary(asoc, peer);
710 		asoc->peer.retran_path = peer;
711 	}
712 
713 	if (asoc->peer.active_path == asoc->peer.retran_path &&
714 	    peer->state != SCTP_UNCONFIRMED) {
715 		asoc->peer.retran_path = peer;
716 	}
717 
718 	return peer;
719 }
720 
721 /* Delete a transport address from an association.  */
sctp_assoc_del_peer(struct sctp_association * asoc,const union sctp_addr * addr)722 void sctp_assoc_del_peer(struct sctp_association *asoc,
723 			 const union sctp_addr *addr)
724 {
725 	struct list_head	*pos;
726 	struct list_head	*temp;
727 	struct sctp_transport	*transport;
728 
729 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
730 		transport = list_entry(pos, struct sctp_transport, transports);
731 		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
732 			/* Do book keeping for removing the peer and free it. */
733 			sctp_assoc_rm_peer(asoc, transport);
734 			break;
735 		}
736 	}
737 }
738 
739 /* Lookup a transport by address. */
sctp_assoc_lookup_paddr(const struct sctp_association * asoc,const union sctp_addr * address)740 struct sctp_transport *sctp_assoc_lookup_paddr(
741 					const struct sctp_association *asoc,
742 					const union sctp_addr *address)
743 {
744 	struct sctp_transport *t;
745 
746 	/* Cycle through all transports searching for a peer address. */
747 
748 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
749 			transports) {
750 		if (sctp_cmp_addr_exact(address, &t->ipaddr))
751 			return t;
752 	}
753 
754 	return NULL;
755 }
756 
757 /* Remove all transports except a give one */
sctp_assoc_del_nonprimary_peers(struct sctp_association * asoc,struct sctp_transport * primary)758 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
759 				     struct sctp_transport *primary)
760 {
761 	struct sctp_transport	*temp;
762 	struct sctp_transport	*t;
763 
764 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
765 				 transports) {
766 		/* if the current transport is not the primary one, delete it */
767 		if (t != primary)
768 			sctp_assoc_rm_peer(asoc, t);
769 	}
770 }
771 
772 /* Engage in transport control operations.
773  * Mark the transport up or down and send a notification to the user.
774  * Select and update the new active and retran paths.
775  */
sctp_assoc_control_transport(struct sctp_association * asoc,struct sctp_transport * transport,sctp_transport_cmd_t command,sctp_sn_error_t error)776 void sctp_assoc_control_transport(struct sctp_association *asoc,
777 				  struct sctp_transport *transport,
778 				  sctp_transport_cmd_t command,
779 				  sctp_sn_error_t error)
780 {
781 	struct sctp_ulpevent *event;
782 	struct sockaddr_storage addr;
783 	int spc_state = 0;
784 	bool ulp_notify = true;
785 
786 	/* Record the transition on the transport.  */
787 	switch (command) {
788 	case SCTP_TRANSPORT_UP:
789 		/* If we are moving from UNCONFIRMED state due
790 		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
791 		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
792 		 */
793 		if (SCTP_UNCONFIRMED == transport->state &&
794 		    SCTP_HEARTBEAT_SUCCESS == error)
795 			spc_state = SCTP_ADDR_CONFIRMED;
796 		else
797 			spc_state = SCTP_ADDR_AVAILABLE;
798 		/* Don't inform ULP about transition from PF to
799 		 * active state and set cwnd to 1 MTU, see SCTP
800 		 * Quick failover draft section 5.1, point 5
801 		 */
802 		if (transport->state == SCTP_PF) {
803 			ulp_notify = false;
804 			transport->cwnd = asoc->pathmtu;
805 		}
806 		transport->state = SCTP_ACTIVE;
807 		break;
808 
809 	case SCTP_TRANSPORT_DOWN:
810 		/* If the transport was never confirmed, do not transition it
811 		 * to inactive state.  Also, release the cached route since
812 		 * there may be a better route next time.
813 		 */
814 		if (transport->state != SCTP_UNCONFIRMED)
815 			transport->state = SCTP_INACTIVE;
816 		else {
817 			dst_release(transport->dst);
818 			transport->dst = NULL;
819 			ulp_notify = false;
820 		}
821 
822 		spc_state = SCTP_ADDR_UNREACHABLE;
823 		break;
824 
825 	case SCTP_TRANSPORT_PF:
826 		transport->state = SCTP_PF;
827 		ulp_notify = false;
828 		break;
829 
830 	default:
831 		return;
832 	}
833 
834 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
835 	 * to the user.
836 	 */
837 	if (ulp_notify) {
838 		memset(&addr, 0, sizeof(struct sockaddr_storage));
839 		memcpy(&addr, &transport->ipaddr,
840 		       transport->af_specific->sockaddr_len);
841 
842 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
843 					0, spc_state, error, GFP_ATOMIC);
844 		if (event)
845 			sctp_ulpq_tail_event(&asoc->ulpq, event);
846 	}
847 
848 	/* Select new active and retran paths. */
849 	sctp_select_active_and_retran_path(asoc);
850 }
851 
852 /* Hold a reference to an association. */
sctp_association_hold(struct sctp_association * asoc)853 void sctp_association_hold(struct sctp_association *asoc)
854 {
855 	atomic_inc(&asoc->base.refcnt);
856 }
857 
858 /* Release a reference to an association and cleanup
859  * if there are no more references.
860  */
sctp_association_put(struct sctp_association * asoc)861 void sctp_association_put(struct sctp_association *asoc)
862 {
863 	if (atomic_dec_and_test(&asoc->base.refcnt))
864 		sctp_association_destroy(asoc);
865 }
866 
867 /* Allocate the next TSN, Transmission Sequence Number, for the given
868  * association.
869  */
sctp_association_get_next_tsn(struct sctp_association * asoc)870 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
871 {
872 	/* From Section 1.6 Serial Number Arithmetic:
873 	 * Transmission Sequence Numbers wrap around when they reach
874 	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
875 	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
876 	 */
877 	__u32 retval = asoc->next_tsn;
878 	asoc->next_tsn++;
879 	asoc->unack_data++;
880 
881 	return retval;
882 }
883 
884 /* Compare two addresses to see if they match.  Wildcard addresses
885  * only match themselves.
886  */
sctp_cmp_addr_exact(const union sctp_addr * ss1,const union sctp_addr * ss2)887 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
888 			const union sctp_addr *ss2)
889 {
890 	struct sctp_af *af;
891 
892 	af = sctp_get_af_specific(ss1->sa.sa_family);
893 	if (unlikely(!af))
894 		return 0;
895 
896 	return af->cmp_addr(ss1, ss2);
897 }
898 
899 /* Return an ecne chunk to get prepended to a packet.
900  * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
901  * No we don't, but we could/should.
902  */
sctp_get_ecne_prepend(struct sctp_association * asoc)903 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
904 {
905 	if (!asoc->need_ecne)
906 		return NULL;
907 
908 	/* Send ECNE if needed.
909 	 * Not being able to allocate a chunk here is not deadly.
910 	 */
911 	return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
912 }
913 
914 /*
915  * Find which transport this TSN was sent on.
916  */
sctp_assoc_lookup_tsn(struct sctp_association * asoc,__u32 tsn)917 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
918 					     __u32 tsn)
919 {
920 	struct sctp_transport *active;
921 	struct sctp_transport *match;
922 	struct sctp_transport *transport;
923 	struct sctp_chunk *chunk;
924 	__be32 key = htonl(tsn);
925 
926 	match = NULL;
927 
928 	/*
929 	 * FIXME: In general, find a more efficient data structure for
930 	 * searching.
931 	 */
932 
933 	/*
934 	 * The general strategy is to search each transport's transmitted
935 	 * list.   Return which transport this TSN lives on.
936 	 *
937 	 * Let's be hopeful and check the active_path first.
938 	 * Another optimization would be to know if there is only one
939 	 * outbound path and not have to look for the TSN at all.
940 	 *
941 	 */
942 
943 	active = asoc->peer.active_path;
944 
945 	list_for_each_entry(chunk, &active->transmitted,
946 			transmitted_list) {
947 
948 		if (key == chunk->subh.data_hdr->tsn) {
949 			match = active;
950 			goto out;
951 		}
952 	}
953 
954 	/* If not found, go search all the other transports. */
955 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
956 			transports) {
957 
958 		if (transport == active)
959 			continue;
960 		list_for_each_entry(chunk, &transport->transmitted,
961 				transmitted_list) {
962 			if (key == chunk->subh.data_hdr->tsn) {
963 				match = transport;
964 				goto out;
965 			}
966 		}
967 	}
968 out:
969 	return match;
970 }
971 
972 /* Is this the association we are looking for? */
sctp_assoc_is_match(struct sctp_association * asoc,struct net * net,const union sctp_addr * laddr,const union sctp_addr * paddr)973 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
974 					   struct net *net,
975 					   const union sctp_addr *laddr,
976 					   const union sctp_addr *paddr)
977 {
978 	struct sctp_transport *transport;
979 
980 	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
981 	    (htons(asoc->peer.port) == paddr->v4.sin_port) &&
982 	    net_eq(sock_net(asoc->base.sk), net)) {
983 		transport = sctp_assoc_lookup_paddr(asoc, paddr);
984 		if (!transport)
985 			goto out;
986 
987 		if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
988 					 sctp_sk(asoc->base.sk)))
989 			goto out;
990 	}
991 	transport = NULL;
992 
993 out:
994 	return transport;
995 }
996 
997 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
sctp_assoc_bh_rcv(struct work_struct * work)998 static void sctp_assoc_bh_rcv(struct work_struct *work)
999 {
1000 	struct sctp_association *asoc =
1001 		container_of(work, struct sctp_association,
1002 			     base.inqueue.immediate);
1003 	struct net *net = sock_net(asoc->base.sk);
1004 	struct sctp_endpoint *ep;
1005 	struct sctp_chunk *chunk;
1006 	struct sctp_inq *inqueue;
1007 	sctp_subtype_t subtype;
1008 	int first_time = 1;	/* is this the first time through the loop */
1009 	int error = 0;
1010 	int state;
1011 
1012 	/* The association should be held so we should be safe. */
1013 	ep = asoc->ep;
1014 
1015 	inqueue = &asoc->base.inqueue;
1016 	sctp_association_hold(asoc);
1017 	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1018 		state = asoc->state;
1019 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1020 
1021 		/* If the first chunk in the packet is AUTH, do special
1022 		 * processing specified in Section 6.3 of SCTP-AUTH spec
1023 		 */
1024 		if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1025 			struct sctp_chunkhdr *next_hdr;
1026 
1027 			next_hdr = sctp_inq_peek(inqueue);
1028 			if (!next_hdr)
1029 				goto normal;
1030 
1031 			/* If the next chunk is COOKIE-ECHO, skip the AUTH
1032 			 * chunk while saving a pointer to it so we can do
1033 			 * Authentication later (during cookie-echo
1034 			 * processing).
1035 			 */
1036 			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1037 				chunk->auth_chunk = skb_clone(chunk->skb,
1038 							      GFP_ATOMIC);
1039 				chunk->auth = 1;
1040 				continue;
1041 			}
1042 		}
1043 
1044 normal:
1045 		/* SCTP-AUTH, Section 6.3:
1046 		 *    The receiver has a list of chunk types which it expects
1047 		 *    to be received only after an AUTH-chunk.  This list has
1048 		 *    been sent to the peer during the association setup.  It
1049 		 *    MUST silently discard these chunks if they are not placed
1050 		 *    after an AUTH chunk in the packet.
1051 		 */
1052 		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1053 			continue;
1054 
1055 		/* Remember where the last DATA chunk came from so we
1056 		 * know where to send the SACK.
1057 		 */
1058 		if (sctp_chunk_is_data(chunk))
1059 			asoc->peer.last_data_from = chunk->transport;
1060 		else {
1061 			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1062 			asoc->stats.ictrlchunks++;
1063 			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1064 				asoc->stats.isacks++;
1065 		}
1066 
1067 		if (chunk->transport)
1068 			chunk->transport->last_time_heard = ktime_get();
1069 
1070 		/* Run through the state machine. */
1071 		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1072 				   state, ep, asoc, chunk, GFP_ATOMIC);
1073 
1074 		/* Check to see if the association is freed in response to
1075 		 * the incoming chunk.  If so, get out of the while loop.
1076 		 */
1077 		if (asoc->base.dead)
1078 			break;
1079 
1080 		/* If there is an error on chunk, discard this packet. */
1081 		if (error && chunk)
1082 			chunk->pdiscard = 1;
1083 
1084 		if (first_time)
1085 			first_time = 0;
1086 	}
1087 	sctp_association_put(asoc);
1088 }
1089 
1090 /* This routine moves an association from its old sk to a new sk.  */
sctp_assoc_migrate(struct sctp_association * assoc,struct sock * newsk)1091 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1092 {
1093 	struct sctp_sock *newsp = sctp_sk(newsk);
1094 	struct sock *oldsk = assoc->base.sk;
1095 
1096 	/* Delete the association from the old endpoint's list of
1097 	 * associations.
1098 	 */
1099 	list_del_init(&assoc->asocs);
1100 
1101 	/* Decrement the backlog value for a TCP-style socket. */
1102 	if (sctp_style(oldsk, TCP))
1103 		oldsk->sk_ack_backlog--;
1104 
1105 	/* Release references to the old endpoint and the sock.  */
1106 	sctp_endpoint_put(assoc->ep);
1107 	sock_put(assoc->base.sk);
1108 
1109 	/* Get a reference to the new endpoint.  */
1110 	assoc->ep = newsp->ep;
1111 	sctp_endpoint_hold(assoc->ep);
1112 
1113 	/* Get a reference to the new sock.  */
1114 	assoc->base.sk = newsk;
1115 	sock_hold(assoc->base.sk);
1116 
1117 	/* Add the association to the new endpoint's list of associations.  */
1118 	sctp_endpoint_add_asoc(newsp->ep, assoc);
1119 }
1120 
1121 /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
sctp_assoc_update(struct sctp_association * asoc,struct sctp_association * new)1122 void sctp_assoc_update(struct sctp_association *asoc,
1123 		       struct sctp_association *new)
1124 {
1125 	struct sctp_transport *trans;
1126 	struct list_head *pos, *temp;
1127 
1128 	/* Copy in new parameters of peer. */
1129 	asoc->c = new->c;
1130 	asoc->peer.rwnd = new->peer.rwnd;
1131 	asoc->peer.sack_needed = new->peer.sack_needed;
1132 	asoc->peer.auth_capable = new->peer.auth_capable;
1133 	asoc->peer.i = new->peer.i;
1134 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1135 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1136 
1137 	/* Remove any peer addresses not present in the new association. */
1138 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1139 		trans = list_entry(pos, struct sctp_transport, transports);
1140 		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1141 			sctp_assoc_rm_peer(asoc, trans);
1142 			continue;
1143 		}
1144 
1145 		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1146 			sctp_transport_reset(trans);
1147 	}
1148 
1149 	/* If the case is A (association restart), use
1150 	 * initial_tsn as next_tsn. If the case is B, use
1151 	 * current next_tsn in case data sent to peer
1152 	 * has been discarded and needs retransmission.
1153 	 */
1154 	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1155 		asoc->next_tsn = new->next_tsn;
1156 		asoc->ctsn_ack_point = new->ctsn_ack_point;
1157 		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1158 
1159 		/* Reinitialize SSN for both local streams
1160 		 * and peer's streams.
1161 		 */
1162 		sctp_ssnmap_clear(asoc->ssnmap);
1163 
1164 		/* Flush the ULP reassembly and ordered queue.
1165 		 * Any data there will now be stale and will
1166 		 * cause problems.
1167 		 */
1168 		sctp_ulpq_flush(&asoc->ulpq);
1169 
1170 		/* reset the overall association error count so
1171 		 * that the restarted association doesn't get torn
1172 		 * down on the next retransmission timer.
1173 		 */
1174 		asoc->overall_error_count = 0;
1175 
1176 	} else {
1177 		/* Add any peer addresses from the new association. */
1178 		list_for_each_entry(trans, &new->peer.transport_addr_list,
1179 				transports) {
1180 			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1181 				sctp_assoc_add_peer(asoc, &trans->ipaddr,
1182 						    GFP_ATOMIC, trans->state);
1183 		}
1184 
1185 		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1186 		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1187 		if (!asoc->ssnmap) {
1188 			/* Move the ssnmap. */
1189 			asoc->ssnmap = new->ssnmap;
1190 			new->ssnmap = NULL;
1191 		}
1192 
1193 		if (!asoc->assoc_id) {
1194 			/* get a new association id since we don't have one
1195 			 * yet.
1196 			 */
1197 			sctp_assoc_set_id(asoc, GFP_ATOMIC);
1198 		}
1199 	}
1200 
1201 	/* SCTP-AUTH: Save the peer parameters from the new associations
1202 	 * and also move the association shared keys over
1203 	 */
1204 	kfree(asoc->peer.peer_random);
1205 	asoc->peer.peer_random = new->peer.peer_random;
1206 	new->peer.peer_random = NULL;
1207 
1208 	kfree(asoc->peer.peer_chunks);
1209 	asoc->peer.peer_chunks = new->peer.peer_chunks;
1210 	new->peer.peer_chunks = NULL;
1211 
1212 	kfree(asoc->peer.peer_hmacs);
1213 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1214 	new->peer.peer_hmacs = NULL;
1215 
1216 	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1217 }
1218 
1219 /* Update the retran path for sending a retransmitted packet.
1220  * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1221  *
1222  *   When there is outbound data to send and the primary path
1223  *   becomes inactive (e.g., due to failures), or where the
1224  *   SCTP user explicitly requests to send data to an
1225  *   inactive destination transport address, before reporting
1226  *   an error to its ULP, the SCTP endpoint should try to send
1227  *   the data to an alternate active destination transport
1228  *   address if one exists.
1229  *
1230  *   When retransmitting data that timed out, if the endpoint
1231  *   is multihomed, it should consider each source-destination
1232  *   address pair in its retransmission selection policy.
1233  *   When retransmitting timed-out data, the endpoint should
1234  *   attempt to pick the most divergent source-destination
1235  *   pair from the original source-destination pair to which
1236  *   the packet was transmitted.
1237  *
1238  *   Note: Rules for picking the most divergent source-destination
1239  *   pair are an implementation decision and are not specified
1240  *   within this document.
1241  *
1242  * Our basic strategy is to round-robin transports in priorities
1243  * according to sctp_trans_score() e.g., if no such
1244  * transport with state SCTP_ACTIVE exists, round-robin through
1245  * SCTP_UNKNOWN, etc. You get the picture.
1246  */
sctp_trans_score(const struct sctp_transport * trans)1247 static u8 sctp_trans_score(const struct sctp_transport *trans)
1248 {
1249 	switch (trans->state) {
1250 	case SCTP_ACTIVE:
1251 		return 3;	/* best case */
1252 	case SCTP_UNKNOWN:
1253 		return 2;
1254 	case SCTP_PF:
1255 		return 1;
1256 	default: /* case SCTP_INACTIVE */
1257 		return 0;	/* worst case */
1258 	}
1259 }
1260 
sctp_trans_elect_tie(struct sctp_transport * trans1,struct sctp_transport * trans2)1261 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1262 						   struct sctp_transport *trans2)
1263 {
1264 	if (trans1->error_count > trans2->error_count) {
1265 		return trans2;
1266 	} else if (trans1->error_count == trans2->error_count &&
1267 		   ktime_after(trans2->last_time_heard,
1268 			       trans1->last_time_heard)) {
1269 		return trans2;
1270 	} else {
1271 		return trans1;
1272 	}
1273 }
1274 
sctp_trans_elect_best(struct sctp_transport * curr,struct sctp_transport * best)1275 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1276 						    struct sctp_transport *best)
1277 {
1278 	u8 score_curr, score_best;
1279 
1280 	if (best == NULL || curr == best)
1281 		return curr;
1282 
1283 	score_curr = sctp_trans_score(curr);
1284 	score_best = sctp_trans_score(best);
1285 
1286 	/* First, try a score-based selection if both transport states
1287 	 * differ. If we're in a tie, lets try to make a more clever
1288 	 * decision here based on error counts and last time heard.
1289 	 */
1290 	if (score_curr > score_best)
1291 		return curr;
1292 	else if (score_curr == score_best)
1293 		return sctp_trans_elect_tie(best, curr);
1294 	else
1295 		return best;
1296 }
1297 
sctp_assoc_update_retran_path(struct sctp_association * asoc)1298 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1299 {
1300 	struct sctp_transport *trans = asoc->peer.retran_path;
1301 	struct sctp_transport *trans_next = NULL;
1302 
1303 	/* We're done as we only have the one and only path. */
1304 	if (asoc->peer.transport_count == 1)
1305 		return;
1306 	/* If active_path and retran_path are the same and active,
1307 	 * then this is the only active path. Use it.
1308 	 */
1309 	if (asoc->peer.active_path == asoc->peer.retran_path &&
1310 	    asoc->peer.active_path->state == SCTP_ACTIVE)
1311 		return;
1312 
1313 	/* Iterate from retran_path's successor back to retran_path. */
1314 	for (trans = list_next_entry(trans, transports); 1;
1315 	     trans = list_next_entry(trans, transports)) {
1316 		/* Manually skip the head element. */
1317 		if (&trans->transports == &asoc->peer.transport_addr_list)
1318 			continue;
1319 		if (trans->state == SCTP_UNCONFIRMED)
1320 			continue;
1321 		trans_next = sctp_trans_elect_best(trans, trans_next);
1322 		/* Active is good enough for immediate return. */
1323 		if (trans_next->state == SCTP_ACTIVE)
1324 			break;
1325 		/* We've reached the end, time to update path. */
1326 		if (trans == asoc->peer.retran_path)
1327 			break;
1328 	}
1329 
1330 	asoc->peer.retran_path = trans_next;
1331 
1332 	pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1333 		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1334 }
1335 
sctp_select_active_and_retran_path(struct sctp_association * asoc)1336 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1337 {
1338 	struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1339 	struct sctp_transport *trans_pf = NULL;
1340 
1341 	/* Look for the two most recently used active transports. */
1342 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1343 			    transports) {
1344 		/* Skip uninteresting transports. */
1345 		if (trans->state == SCTP_INACTIVE ||
1346 		    trans->state == SCTP_UNCONFIRMED)
1347 			continue;
1348 		/* Keep track of the best PF transport from our
1349 		 * list in case we don't find an active one.
1350 		 */
1351 		if (trans->state == SCTP_PF) {
1352 			trans_pf = sctp_trans_elect_best(trans, trans_pf);
1353 			continue;
1354 		}
1355 		/* For active transports, pick the most recent ones. */
1356 		if (trans_pri == NULL ||
1357 		    ktime_after(trans->last_time_heard,
1358 				trans_pri->last_time_heard)) {
1359 			trans_sec = trans_pri;
1360 			trans_pri = trans;
1361 		} else if (trans_sec == NULL ||
1362 			   ktime_after(trans->last_time_heard,
1363 				       trans_sec->last_time_heard)) {
1364 			trans_sec = trans;
1365 		}
1366 	}
1367 
1368 	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1369 	 *
1370 	 * By default, an endpoint should always transmit to the primary
1371 	 * path, unless the SCTP user explicitly specifies the
1372 	 * destination transport address (and possibly source transport
1373 	 * address) to use. [If the primary is active but not most recent,
1374 	 * bump the most recently used transport.]
1375 	 */
1376 	if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1377 	     asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1378 	     asoc->peer.primary_path != trans_pri) {
1379 		trans_sec = trans_pri;
1380 		trans_pri = asoc->peer.primary_path;
1381 	}
1382 
1383 	/* We did not find anything useful for a possible retransmission
1384 	 * path; either primary path that we found is the the same as
1385 	 * the current one, or we didn't generally find an active one.
1386 	 */
1387 	if (trans_sec == NULL)
1388 		trans_sec = trans_pri;
1389 
1390 	/* If we failed to find a usable transport, just camp on the
1391 	 * active or pick a PF iff it's the better choice.
1392 	 */
1393 	if (trans_pri == NULL) {
1394 		trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1395 		trans_sec = trans_pri;
1396 	}
1397 
1398 	/* Set the active and retran transports. */
1399 	asoc->peer.active_path = trans_pri;
1400 	asoc->peer.retran_path = trans_sec;
1401 }
1402 
1403 struct sctp_transport *
sctp_assoc_choose_alter_transport(struct sctp_association * asoc,struct sctp_transport * last_sent_to)1404 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1405 				  struct sctp_transport *last_sent_to)
1406 {
1407 	/* If this is the first time packet is sent, use the active path,
1408 	 * else use the retran path. If the last packet was sent over the
1409 	 * retran path, update the retran path and use it.
1410 	 */
1411 	if (last_sent_to == NULL) {
1412 		return asoc->peer.active_path;
1413 	} else {
1414 		if (last_sent_to == asoc->peer.retran_path)
1415 			sctp_assoc_update_retran_path(asoc);
1416 
1417 		return asoc->peer.retran_path;
1418 	}
1419 }
1420 
1421 /* Update the association's pmtu and frag_point by going through all the
1422  * transports. This routine is called when a transport's PMTU has changed.
1423  */
sctp_assoc_sync_pmtu(struct sock * sk,struct sctp_association * asoc)1424 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1425 {
1426 	struct sctp_transport *t;
1427 	__u32 pmtu = 0;
1428 
1429 	if (!asoc)
1430 		return;
1431 
1432 	/* Get the lowest pmtu of all the transports. */
1433 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
1434 				transports) {
1435 		if (t->pmtu_pending && t->dst) {
1436 			sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1437 			t->pmtu_pending = 0;
1438 		}
1439 		if (!pmtu || (t->pathmtu < pmtu))
1440 			pmtu = t->pathmtu;
1441 	}
1442 
1443 	if (pmtu) {
1444 		asoc->pathmtu = pmtu;
1445 		asoc->frag_point = sctp_frag_point(asoc, pmtu);
1446 	}
1447 
1448 	pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1449 		 asoc->pathmtu, asoc->frag_point);
1450 }
1451 
1452 /* Should we send a SACK to update our peer? */
sctp_peer_needs_update(struct sctp_association * asoc)1453 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1454 {
1455 	struct net *net = sock_net(asoc->base.sk);
1456 	switch (asoc->state) {
1457 	case SCTP_STATE_ESTABLISHED:
1458 	case SCTP_STATE_SHUTDOWN_PENDING:
1459 	case SCTP_STATE_SHUTDOWN_RECEIVED:
1460 	case SCTP_STATE_SHUTDOWN_SENT:
1461 		if ((asoc->rwnd > asoc->a_rwnd) &&
1462 		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1463 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1464 			   asoc->pathmtu)))
1465 			return true;
1466 		break;
1467 	default:
1468 		break;
1469 	}
1470 	return false;
1471 }
1472 
1473 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
sctp_assoc_rwnd_increase(struct sctp_association * asoc,unsigned int len)1474 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1475 {
1476 	struct sctp_chunk *sack;
1477 	struct timer_list *timer;
1478 
1479 	if (asoc->rwnd_over) {
1480 		if (asoc->rwnd_over >= len) {
1481 			asoc->rwnd_over -= len;
1482 		} else {
1483 			asoc->rwnd += (len - asoc->rwnd_over);
1484 			asoc->rwnd_over = 0;
1485 		}
1486 	} else {
1487 		asoc->rwnd += len;
1488 	}
1489 
1490 	/* If we had window pressure, start recovering it
1491 	 * once our rwnd had reached the accumulated pressure
1492 	 * threshold.  The idea is to recover slowly, but up
1493 	 * to the initial advertised window.
1494 	 */
1495 	if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1496 		int change = min(asoc->pathmtu, asoc->rwnd_press);
1497 		asoc->rwnd += change;
1498 		asoc->rwnd_press -= change;
1499 	}
1500 
1501 	pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1502 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1503 		 asoc->a_rwnd);
1504 
1505 	/* Send a window update SACK if the rwnd has increased by at least the
1506 	 * minimum of the association's PMTU and half of the receive buffer.
1507 	 * The algorithm used is similar to the one described in
1508 	 * Section 4.2.3.3 of RFC 1122.
1509 	 */
1510 	if (sctp_peer_needs_update(asoc)) {
1511 		asoc->a_rwnd = asoc->rwnd;
1512 
1513 		pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1514 			 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1515 			 asoc->a_rwnd);
1516 
1517 		sack = sctp_make_sack(asoc);
1518 		if (!sack)
1519 			return;
1520 
1521 		asoc->peer.sack_needed = 0;
1522 
1523 		sctp_outq_tail(&asoc->outqueue, sack);
1524 
1525 		/* Stop the SACK timer.  */
1526 		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1527 		if (del_timer(timer))
1528 			sctp_association_put(asoc);
1529 	}
1530 }
1531 
1532 /* Decrease asoc's rwnd by len. */
sctp_assoc_rwnd_decrease(struct sctp_association * asoc,unsigned int len)1533 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1534 {
1535 	int rx_count;
1536 	int over = 0;
1537 
1538 	if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1539 		pr_debug("%s: association:%p has asoc->rwnd:%u, "
1540 			 "asoc->rwnd_over:%u!\n", __func__, asoc,
1541 			 asoc->rwnd, asoc->rwnd_over);
1542 
1543 	if (asoc->ep->rcvbuf_policy)
1544 		rx_count = atomic_read(&asoc->rmem_alloc);
1545 	else
1546 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1547 
1548 	/* If we've reached or overflowed our receive buffer, announce
1549 	 * a 0 rwnd if rwnd would still be positive.  Store the
1550 	 * the potential pressure overflow so that the window can be restored
1551 	 * back to original value.
1552 	 */
1553 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1554 		over = 1;
1555 
1556 	if (asoc->rwnd >= len) {
1557 		asoc->rwnd -= len;
1558 		if (over) {
1559 			asoc->rwnd_press += asoc->rwnd;
1560 			asoc->rwnd = 0;
1561 		}
1562 	} else {
1563 		asoc->rwnd_over = len - asoc->rwnd;
1564 		asoc->rwnd = 0;
1565 	}
1566 
1567 	pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1568 		 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1569 		 asoc->rwnd_press);
1570 }
1571 
1572 /* Build the bind address list for the association based on info from the
1573  * local endpoint and the remote peer.
1574  */
sctp_assoc_set_bind_addr_from_ep(struct sctp_association * asoc,sctp_scope_t scope,gfp_t gfp)1575 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1576 				     sctp_scope_t scope, gfp_t gfp)
1577 {
1578 	struct sock *sk = asoc->base.sk;
1579 	int flags;
1580 
1581 	/* Use scoping rules to determine the subset of addresses from
1582 	 * the endpoint.
1583 	 */
1584 	flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1585 	if (!inet_v6_ipv6only(sk))
1586 		flags |= SCTP_ADDR4_ALLOWED;
1587 	if (asoc->peer.ipv4_address)
1588 		flags |= SCTP_ADDR4_PEERSUPP;
1589 	if (asoc->peer.ipv6_address)
1590 		flags |= SCTP_ADDR6_PEERSUPP;
1591 
1592 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1593 				   &asoc->base.bind_addr,
1594 				   &asoc->ep->base.bind_addr,
1595 				   scope, gfp, flags);
1596 }
1597 
1598 /* Build the association's bind address list from the cookie.  */
sctp_assoc_set_bind_addr_from_cookie(struct sctp_association * asoc,struct sctp_cookie * cookie,gfp_t gfp)1599 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1600 					 struct sctp_cookie *cookie,
1601 					 gfp_t gfp)
1602 {
1603 	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1604 	int var_size3 = cookie->raw_addr_list_len;
1605 	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1606 
1607 	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1608 				      asoc->ep->base.bind_addr.port, gfp);
1609 }
1610 
1611 /* Lookup laddr in the bind address list of an association. */
sctp_assoc_lookup_laddr(struct sctp_association * asoc,const union sctp_addr * laddr)1612 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1613 			    const union sctp_addr *laddr)
1614 {
1615 	int found = 0;
1616 
1617 	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1618 	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1619 				 sctp_sk(asoc->base.sk)))
1620 		found = 1;
1621 
1622 	return found;
1623 }
1624 
1625 /* Set an association id for a given association */
sctp_assoc_set_id(struct sctp_association * asoc,gfp_t gfp)1626 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1627 {
1628 	bool preload = gfpflags_allow_blocking(gfp);
1629 	int ret;
1630 
1631 	/* If the id is already assigned, keep it. */
1632 	if (asoc->assoc_id)
1633 		return 0;
1634 
1635 	if (preload)
1636 		idr_preload(gfp);
1637 	spin_lock_bh(&sctp_assocs_id_lock);
1638 	/* 0 is not a valid assoc_id, must be >= 1 */
1639 	ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1640 	spin_unlock_bh(&sctp_assocs_id_lock);
1641 	if (preload)
1642 		idr_preload_end();
1643 	if (ret < 0)
1644 		return ret;
1645 
1646 	asoc->assoc_id = (sctp_assoc_t)ret;
1647 	return 0;
1648 }
1649 
1650 /* Free the ASCONF queue */
sctp_assoc_free_asconf_queue(struct sctp_association * asoc)1651 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1652 {
1653 	struct sctp_chunk *asconf;
1654 	struct sctp_chunk *tmp;
1655 
1656 	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1657 		list_del_init(&asconf->list);
1658 		sctp_chunk_free(asconf);
1659 	}
1660 }
1661 
1662 /* Free asconf_ack cache */
sctp_assoc_free_asconf_acks(struct sctp_association * asoc)1663 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1664 {
1665 	struct sctp_chunk *ack;
1666 	struct sctp_chunk *tmp;
1667 
1668 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1669 				transmitted_list) {
1670 		list_del_init(&ack->transmitted_list);
1671 		sctp_chunk_free(ack);
1672 	}
1673 }
1674 
1675 /* Clean up the ASCONF_ACK queue */
sctp_assoc_clean_asconf_ack_cache(const struct sctp_association * asoc)1676 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1677 {
1678 	struct sctp_chunk *ack;
1679 	struct sctp_chunk *tmp;
1680 
1681 	/* We can remove all the entries from the queue up to
1682 	 * the "Peer-Sequence-Number".
1683 	 */
1684 	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1685 				transmitted_list) {
1686 		if (ack->subh.addip_hdr->serial ==
1687 				htonl(asoc->peer.addip_serial))
1688 			break;
1689 
1690 		list_del_init(&ack->transmitted_list);
1691 		sctp_chunk_free(ack);
1692 	}
1693 }
1694 
1695 /* Find the ASCONF_ACK whose serial number matches ASCONF */
sctp_assoc_lookup_asconf_ack(const struct sctp_association * asoc,__be32 serial)1696 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1697 					const struct sctp_association *asoc,
1698 					__be32 serial)
1699 {
1700 	struct sctp_chunk *ack;
1701 
1702 	/* Walk through the list of cached ASCONF-ACKs and find the
1703 	 * ack chunk whose serial number matches that of the request.
1704 	 */
1705 	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1706 		if (sctp_chunk_pending(ack))
1707 			continue;
1708 		if (ack->subh.addip_hdr->serial == serial) {
1709 			sctp_chunk_hold(ack);
1710 			return ack;
1711 		}
1712 	}
1713 
1714 	return NULL;
1715 }
1716 
sctp_asconf_queue_teardown(struct sctp_association * asoc)1717 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1718 {
1719 	/* Free any cached ASCONF_ACK chunk. */
1720 	sctp_assoc_free_asconf_acks(asoc);
1721 
1722 	/* Free the ASCONF queue. */
1723 	sctp_assoc_free_asconf_queue(asoc);
1724 
1725 	/* Free any cached ASCONF chunk. */
1726 	if (asoc->addip_last_asconf)
1727 		sctp_chunk_free(asoc->addip_last_asconf);
1728 }
1729