• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SCTP kernel implementation
2   * (C) Copyright IBM Corp. 2001, 2004
3   * Copyright (c) 1999-2000 Cisco, Inc.
4   * Copyright (c) 1999-2001 Motorola, Inc.
5   * Copyright (c) 2001 Intel Corp.
6   * Copyright (c) 2001 La Monte H.P. Yarroll
7   *
8   * This file is part of the SCTP kernel implementation
9   *
10   * This module provides the abstraction for an SCTP association.
11   *
12   * This SCTP implementation is free software;
13   * you can redistribute it and/or modify it under the terms of
14   * the GNU General Public License as published by
15   * the Free Software Foundation; either version 2, or (at your option)
16   * any later version.
17   *
18   * This SCTP implementation is distributed in the hope that it
19   * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20   *                 ************************
21   * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22   * See the GNU General Public License for more details.
23   *
24   * You should have received a copy of the GNU General Public License
25   * along with GNU CC; see the file COPYING.  If not, write to
26   * the Free Software Foundation, 59 Temple Place - Suite 330,
27   * Boston, MA 02111-1307, USA.
28   *
29   * Please send any bug reports or fixes you make to the
30   * email address(es):
31   *    lksctp developers <lksctp-developers@lists.sourceforge.net>
32   *
33   * Or submit a bug report through the following website:
34   *    http://www.sf.net/projects/lksctp
35   *
36   * Written or modified by:
37   *    La Monte H.P. Yarroll <piggy@acm.org>
38   *    Karl Knutson          <karl@athena.chicago.il.us>
39   *    Jon Grimm             <jgrimm@us.ibm.com>
40   *    Xingang Guo           <xingang.guo@intel.com>
41   *    Hui Huang             <hui.huang@nokia.com>
42   *    Sridhar Samudrala	    <sri@us.ibm.com>
43   *    Daisy Chang	    <daisyc@us.ibm.com>
44   *    Ryan Layer	    <rmlayer@us.ibm.com>
45   *    Kevin Gao             <kevin.gao@intel.com>
46   *
47   * Any bugs reported given to us we will try to fix... any fixes shared will
48   * be incorporated into the next SCTP release.
49   */
50  
51  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52  
53  #include <linux/types.h>
54  #include <linux/fcntl.h>
55  #include <linux/poll.h>
56  #include <linux/init.h>
57  
58  #include <linux/slab.h>
59  #include <linux/in.h>
60  #include <net/ipv6.h>
61  #include <net/sctp/sctp.h>
62  #include <net/sctp/sm.h>
63  
64  /* Forward declarations for internal functions. */
65  static void sctp_assoc_bh_rcv(struct work_struct *work);
66  static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67  static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
68  
69  /* 1st Level Abstractions. */
70  
71  /* Initialize a new association from provided memory. */
sctp_association_init(struct sctp_association * asoc,const struct sctp_endpoint * ep,const struct sock * sk,sctp_scope_t scope,gfp_t gfp)72  static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
73  					  const struct sctp_endpoint *ep,
74  					  const struct sock *sk,
75  					  sctp_scope_t scope,
76  					  gfp_t gfp)
77  {
78  	struct net *net = sock_net(sk);
79  	struct sctp_sock *sp;
80  	int i;
81  	sctp_paramhdr_t *p;
82  	int err;
83  
84  	/* Retrieve the SCTP per socket area.  */
85  	sp = sctp_sk((struct sock *)sk);
86  
87  	/* Discarding const is appropriate here.  */
88  	asoc->ep = (struct sctp_endpoint *)ep;
89  	sctp_endpoint_hold(asoc->ep);
90  
91  	/* Hold the sock.  */
92  	asoc->base.sk = (struct sock *)sk;
93  	sock_hold(asoc->base.sk);
94  
95  	/* Initialize the common base substructure.  */
96  	asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
97  
98  	/* Initialize the object handling fields.  */
99  	atomic_set(&asoc->base.refcnt, 1);
100  	asoc->base.dead = false;
101  
102  	/* Initialize the bind addr area.  */
103  	sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
104  
105  	asoc->state = SCTP_STATE_CLOSED;
106  
107  	/* Set these values from the socket values, a conversion between
108  	 * millsecons to seconds/microseconds must also be done.
109  	 */
110  	asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
111  	asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
112  					* 1000;
113  	asoc->frag_point = 0;
114  	asoc->user_frag = sp->user_frag;
115  
116  	/* Set the association max_retrans and RTO values from the
117  	 * socket values.
118  	 */
119  	asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
120  	asoc->pf_retrans  = net->sctp.pf_retrans;
121  
122  	asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
123  	asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
124  	asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
125  
126  	asoc->overall_error_count = 0;
127  
128  	/* Initialize the association's heartbeat interval based on the
129  	 * sock configured value.
130  	 */
131  	asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
132  
133  	/* Initialize path max retrans value. */
134  	asoc->pathmaxrxt = sp->pathmaxrxt;
135  
136  	/* Initialize default path MTU. */
137  	asoc->pathmtu = sp->pathmtu;
138  
139  	/* Set association default SACK delay */
140  	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
141  	asoc->sackfreq = sp->sackfreq;
142  
143  	/* Set the association default flags controlling
144  	 * Heartbeat, SACK delay, and Path MTU Discovery.
145  	 */
146  	asoc->param_flags = sp->param_flags;
147  
148  	/* Initialize the maximum mumber of new data packets that can be sent
149  	 * in a burst.
150  	 */
151  	asoc->max_burst = sp->max_burst;
152  
153  	/* initialize association timers */
154  	asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
155  	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
156  	asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
157  	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
158  	asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
159  	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
160  
161  	/* sctpimpguide Section 2.12.2
162  	 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
163  	 * recommended value of 5 times 'RTO.Max'.
164  	 */
165  	asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
166  		= 5 * asoc->rto_max;
167  
168  	asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
169  	asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
170  	asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
171  		min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
172  
173  	/* Initializes the timers */
174  	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
175  		setup_timer(&asoc->timers[i], sctp_timer_events[i],
176  				(unsigned long)asoc);
177  
178  	/* Pull default initialization values from the sock options.
179  	 * Note: This assumes that the values have already been
180  	 * validated in the sock.
181  	 */
182  	asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
183  	asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
184  	asoc->max_init_attempts	= sp->initmsg.sinit_max_attempts;
185  
186  	asoc->max_init_timeo =
187  		 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
188  
189  	/* Allocate storage for the ssnmap after the inbound and outbound
190  	 * streams have been negotiated during Init.
191  	 */
192  	asoc->ssnmap = NULL;
193  
194  	/* Set the local window size for receive.
195  	 * This is also the rcvbuf space per association.
196  	 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
197  	 * 1500 bytes in one SCTP packet.
198  	 */
199  	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
200  		asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
201  	else
202  		asoc->rwnd = sk->sk_rcvbuf/2;
203  
204  	asoc->a_rwnd = asoc->rwnd;
205  
206  	asoc->rwnd_over = 0;
207  	asoc->rwnd_press = 0;
208  
209  	/* Use my own max window until I learn something better.  */
210  	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
211  
212  	/* Set the sndbuf size for transmit.  */
213  	asoc->sndbuf_used = 0;
214  
215  	/* Initialize the receive memory counter */
216  	atomic_set(&asoc->rmem_alloc, 0);
217  
218  	init_waitqueue_head(&asoc->wait);
219  
220  	asoc->c.my_vtag = sctp_generate_tag(ep);
221  	asoc->peer.i.init_tag = 0;     /* INIT needs a vtag of 0. */
222  	asoc->c.peer_vtag = 0;
223  	asoc->c.my_ttag   = 0;
224  	asoc->c.peer_ttag = 0;
225  	asoc->c.my_port = ep->base.bind_addr.port;
226  
227  	asoc->c.initial_tsn = sctp_generate_tsn(ep);
228  
229  	asoc->next_tsn = asoc->c.initial_tsn;
230  
231  	asoc->ctsn_ack_point = asoc->next_tsn - 1;
232  	asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
233  	asoc->highest_sacked = asoc->ctsn_ack_point;
234  	asoc->last_cwr_tsn = asoc->ctsn_ack_point;
235  	asoc->unack_data = 0;
236  
237  	/* ADDIP Section 4.1 Asconf Chunk Procedures
238  	 *
239  	 * When an endpoint has an ASCONF signaled change to be sent to the
240  	 * remote endpoint it should do the following:
241  	 * ...
242  	 * A2) a serial number should be assigned to the chunk. The serial
243  	 * number SHOULD be a monotonically increasing number. The serial
244  	 * numbers SHOULD be initialized at the start of the
245  	 * association to the same value as the initial TSN.
246  	 */
247  	asoc->addip_serial = asoc->c.initial_tsn;
248  
249  	INIT_LIST_HEAD(&asoc->addip_chunk_list);
250  	INIT_LIST_HEAD(&asoc->asconf_ack_list);
251  
252  	/* Make an empty list of remote transport addresses.  */
253  	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
254  	asoc->peer.transport_count = 0;
255  
256  	/* RFC 2960 5.1 Normal Establishment of an Association
257  	 *
258  	 * After the reception of the first data chunk in an
259  	 * association the endpoint must immediately respond with a
260  	 * sack to acknowledge the data chunk.  Subsequent
261  	 * acknowledgements should be done as described in Section
262  	 * 6.2.
263  	 *
264  	 * [We implement this by telling a new association that it
265  	 * already received one packet.]
266  	 */
267  	asoc->peer.sack_needed = 1;
268  	asoc->peer.sack_cnt = 0;
269  	asoc->peer.sack_generation = 1;
270  
271  	/* Assume that the peer will tell us if he recognizes ASCONF
272  	 * as part of INIT exchange.
273  	 * The sctp_addip_noauth option is there for backward compatibilty
274  	 * and will revert old behavior.
275  	 */
276  	asoc->peer.asconf_capable = 0;
277  	if (net->sctp.addip_noauth)
278  		asoc->peer.asconf_capable = 1;
279  	asoc->asconf_addr_del_pending = NULL;
280  	asoc->src_out_of_asoc_ok = 0;
281  	asoc->new_transport = NULL;
282  
283  	/* Create an input queue.  */
284  	sctp_inq_init(&asoc->base.inqueue);
285  	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
286  
287  	/* Create an output queue.  */
288  	sctp_outq_init(asoc, &asoc->outqueue);
289  
290  	if (!sctp_ulpq_init(&asoc->ulpq, asoc))
291  		goto fail_init;
292  
293  	memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
294  
295  	asoc->need_ecne = 0;
296  
297  	asoc->assoc_id = 0;
298  
299  	/* Assume that peer would support both address types unless we are
300  	 * told otherwise.
301  	 */
302  	asoc->peer.ipv4_address = 1;
303  	if (asoc->base.sk->sk_family == PF_INET6)
304  		asoc->peer.ipv6_address = 1;
305  	INIT_LIST_HEAD(&asoc->asocs);
306  
307  	asoc->autoclose = sp->autoclose;
308  
309  	asoc->default_stream = sp->default_stream;
310  	asoc->default_ppid = sp->default_ppid;
311  	asoc->default_flags = sp->default_flags;
312  	asoc->default_context = sp->default_context;
313  	asoc->default_timetolive = sp->default_timetolive;
314  	asoc->default_rcv_context = sp->default_rcv_context;
315  
316  	/* SCTP_GET_ASSOC_STATS COUNTERS */
317  	memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
318  
319  	/* AUTH related initializations */
320  	INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
321  	err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
322  	if (err)
323  		goto fail_init;
324  
325  	asoc->active_key_id = ep->active_key_id;
326  	asoc->asoc_shared_key = NULL;
327  
328  	asoc->default_hmac_id = 0;
329  	/* Save the hmacs and chunks list into this association */
330  	if (ep->auth_hmacs_list)
331  		memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
332  			ntohs(ep->auth_hmacs_list->param_hdr.length));
333  	if (ep->auth_chunk_list)
334  		memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
335  			ntohs(ep->auth_chunk_list->param_hdr.length));
336  
337  	/* Get the AUTH random number for this association */
338  	p = (sctp_paramhdr_t *)asoc->c.auth_random;
339  	p->type = SCTP_PARAM_RANDOM;
340  	p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
341  	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
342  
343  	return asoc;
344  
345  fail_init:
346  	sctp_endpoint_put(asoc->ep);
347  	sock_put(asoc->base.sk);
348  	return NULL;
349  }
350  
351  /* Allocate and initialize a new association */
sctp_association_new(const struct sctp_endpoint * ep,const struct sock * sk,sctp_scope_t scope,gfp_t gfp)352  struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
353  					 const struct sock *sk,
354  					 sctp_scope_t scope,
355  					 gfp_t gfp)
356  {
357  	struct sctp_association *asoc;
358  
359  	asoc = t_new(struct sctp_association, gfp);
360  	if (!asoc)
361  		goto fail;
362  
363  	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
364  		goto fail_init;
365  
366  	SCTP_DBG_OBJCNT_INC(assoc);
367  	SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
368  
369  	return asoc;
370  
371  fail_init:
372  	kfree(asoc);
373  fail:
374  	return NULL;
375  }
376  
377  /* Free this association if possible.  There may still be users, so
378   * the actual deallocation may be delayed.
379   */
sctp_association_free(struct sctp_association * asoc)380  void sctp_association_free(struct sctp_association *asoc)
381  {
382  	struct sock *sk = asoc->base.sk;
383  	struct sctp_transport *transport;
384  	struct list_head *pos, *temp;
385  	int i;
386  
387  	/* Only real associations count against the endpoint, so
388  	 * don't bother for if this is a temporary association.
389  	 */
390  	if (!asoc->temp) {
391  		list_del(&asoc->asocs);
392  
393  		/* Decrement the backlog value for a TCP-style listening
394  		 * socket.
395  		 */
396  		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
397  			sk->sk_ack_backlog--;
398  	}
399  
400  	/* Mark as dead, so other users can know this structure is
401  	 * going away.
402  	 */
403  	asoc->base.dead = true;
404  
405  	/* Dispose of any data lying around in the outqueue. */
406  	sctp_outq_free(&asoc->outqueue);
407  
408  	/* Dispose of any pending messages for the upper layer. */
409  	sctp_ulpq_free(&asoc->ulpq);
410  
411  	/* Dispose of any pending chunks on the inqueue. */
412  	sctp_inq_free(&asoc->base.inqueue);
413  
414  	sctp_tsnmap_free(&asoc->peer.tsn_map);
415  
416  	/* Free ssnmap storage. */
417  	sctp_ssnmap_free(asoc->ssnmap);
418  
419  	/* Clean up the bound address list. */
420  	sctp_bind_addr_free(&asoc->base.bind_addr);
421  
422  	/* Do we need to go through all of our timers and
423  	 * delete them?   To be safe we will try to delete all, but we
424  	 * should be able to go through and make a guess based
425  	 * on our state.
426  	 */
427  	for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
428  		if (del_timer(&asoc->timers[i]))
429  			sctp_association_put(asoc);
430  	}
431  
432  	/* Free peer's cached cookie. */
433  	kfree(asoc->peer.cookie);
434  	kfree(asoc->peer.peer_random);
435  	kfree(asoc->peer.peer_chunks);
436  	kfree(asoc->peer.peer_hmacs);
437  
438  	/* Release the transport structures. */
439  	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
440  		transport = list_entry(pos, struct sctp_transport, transports);
441  		list_del_rcu(pos);
442  		sctp_transport_free(transport);
443  	}
444  
445  	asoc->peer.transport_count = 0;
446  
447  	sctp_asconf_queue_teardown(asoc);
448  
449  	/* Free pending address space being deleted */
450  	if (asoc->asconf_addr_del_pending != NULL)
451  		kfree(asoc->asconf_addr_del_pending);
452  
453  	/* AUTH - Free the endpoint shared keys */
454  	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
455  
456  	/* AUTH - Free the association shared key */
457  	sctp_auth_key_put(asoc->asoc_shared_key);
458  
459  	sctp_association_put(asoc);
460  }
461  
462  /* Cleanup and free up an association. */
sctp_association_destroy(struct sctp_association * asoc)463  static void sctp_association_destroy(struct sctp_association *asoc)
464  {
465  	SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
466  
467  	sctp_endpoint_put(asoc->ep);
468  	sock_put(asoc->base.sk);
469  
470  	if (asoc->assoc_id != 0) {
471  		spin_lock_bh(&sctp_assocs_id_lock);
472  		idr_remove(&sctp_assocs_id, asoc->assoc_id);
473  		spin_unlock_bh(&sctp_assocs_id_lock);
474  	}
475  
476  	WARN_ON(atomic_read(&asoc->rmem_alloc));
477  
478  	kfree(asoc);
479  	SCTP_DBG_OBJCNT_DEC(assoc);
480  }
481  
482  /* Change the primary destination address for the peer. */
sctp_assoc_set_primary(struct sctp_association * asoc,struct sctp_transport * transport)483  void sctp_assoc_set_primary(struct sctp_association *asoc,
484  			    struct sctp_transport *transport)
485  {
486  	int changeover = 0;
487  
488  	/* it's a changeover only if we already have a primary path
489  	 * that we are changing
490  	 */
491  	if (asoc->peer.primary_path != NULL &&
492  	    asoc->peer.primary_path != transport)
493  		changeover = 1 ;
494  
495  	asoc->peer.primary_path = transport;
496  
497  	/* Set a default msg_name for events. */
498  	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
499  	       sizeof(union sctp_addr));
500  
501  	/* If the primary path is changing, assume that the
502  	 * user wants to use this new path.
503  	 */
504  	if ((transport->state == SCTP_ACTIVE) ||
505  	    (transport->state == SCTP_UNKNOWN))
506  		asoc->peer.active_path = transport;
507  
508  	/*
509  	 * SFR-CACC algorithm:
510  	 * Upon the receipt of a request to change the primary
511  	 * destination address, on the data structure for the new
512  	 * primary destination, the sender MUST do the following:
513  	 *
514  	 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
515  	 * to this destination address earlier. The sender MUST set
516  	 * CYCLING_CHANGEOVER to indicate that this switch is a
517  	 * double switch to the same destination address.
518  	 *
519  	 * Really, only bother is we have data queued or outstanding on
520  	 * the association.
521  	 */
522  	if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
523  		return;
524  
525  	if (transport->cacc.changeover_active)
526  		transport->cacc.cycling_changeover = changeover;
527  
528  	/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
529  	 * a changeover has occurred.
530  	 */
531  	transport->cacc.changeover_active = changeover;
532  
533  	/* 3) The sender MUST store the next TSN to be sent in
534  	 * next_tsn_at_change.
535  	 */
536  	transport->cacc.next_tsn_at_change = asoc->next_tsn;
537  }
538  
539  /* Remove a transport from an association.  */
sctp_assoc_rm_peer(struct sctp_association * asoc,struct sctp_transport * peer)540  void sctp_assoc_rm_peer(struct sctp_association *asoc,
541  			struct sctp_transport *peer)
542  {
543  	struct list_head	*pos;
544  	struct sctp_transport	*transport;
545  
546  	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
547  				 " port: %d\n",
548  				 asoc,
549  				 (&peer->ipaddr),
550  				 ntohs(peer->ipaddr.v4.sin_port));
551  
552  	/* If we are to remove the current retran_path, update it
553  	 * to the next peer before removing this peer from the list.
554  	 */
555  	if (asoc->peer.retran_path == peer)
556  		sctp_assoc_update_retran_path(asoc);
557  
558  	/* Remove this peer from the list. */
559  	list_del_rcu(&peer->transports);
560  
561  	/* Get the first transport of asoc. */
562  	pos = asoc->peer.transport_addr_list.next;
563  	transport = list_entry(pos, struct sctp_transport, transports);
564  
565  	/* Update any entries that match the peer to be deleted. */
566  	if (asoc->peer.primary_path == peer)
567  		sctp_assoc_set_primary(asoc, transport);
568  	if (asoc->peer.active_path == peer)
569  		asoc->peer.active_path = transport;
570  	if (asoc->peer.retran_path == peer)
571  		asoc->peer.retran_path = transport;
572  	if (asoc->peer.last_data_from == peer)
573  		asoc->peer.last_data_from = transport;
574  
575  	/* If we remove the transport an INIT was last sent to, set it to
576  	 * NULL. Combined with the update of the retran path above, this
577  	 * will cause the next INIT to be sent to the next available
578  	 * transport, maintaining the cycle.
579  	 */
580  	if (asoc->init_last_sent_to == peer)
581  		asoc->init_last_sent_to = NULL;
582  
583  	/* If we remove the transport an SHUTDOWN was last sent to, set it
584  	 * to NULL. Combined with the update of the retran path above, this
585  	 * will cause the next SHUTDOWN to be sent to the next available
586  	 * transport, maintaining the cycle.
587  	 */
588  	if (asoc->shutdown_last_sent_to == peer)
589  		asoc->shutdown_last_sent_to = NULL;
590  
591  	/* If we remove the transport an ASCONF was last sent to, set it to
592  	 * NULL.
593  	 */
594  	if (asoc->addip_last_asconf &&
595  	    asoc->addip_last_asconf->transport == peer)
596  		asoc->addip_last_asconf->transport = NULL;
597  
598  	/* If we have something on the transmitted list, we have to
599  	 * save it off.  The best place is the active path.
600  	 */
601  	if (!list_empty(&peer->transmitted)) {
602  		struct sctp_transport *active = asoc->peer.active_path;
603  		struct sctp_chunk *ch;
604  
605  		/* Reset the transport of each chunk on this list */
606  		list_for_each_entry(ch, &peer->transmitted,
607  					transmitted_list) {
608  			ch->transport = NULL;
609  			ch->rtt_in_progress = 0;
610  		}
611  
612  		list_splice_tail_init(&peer->transmitted,
613  					&active->transmitted);
614  
615  		/* Start a T3 timer here in case it wasn't running so
616  		 * that these migrated packets have a chance to get
617  		 * retrnasmitted.
618  		 */
619  		if (!timer_pending(&active->T3_rtx_timer))
620  			if (!mod_timer(&active->T3_rtx_timer,
621  					jiffies + active->rto))
622  				sctp_transport_hold(active);
623  	}
624  
625  	asoc->peer.transport_count--;
626  
627  	sctp_transport_free(peer);
628  }
629  
630  /* Add a transport address to an association.  */
sctp_assoc_add_peer(struct sctp_association * asoc,const union sctp_addr * addr,const gfp_t gfp,const int peer_state)631  struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
632  					   const union sctp_addr *addr,
633  					   const gfp_t gfp,
634  					   const int peer_state)
635  {
636  	struct net *net = sock_net(asoc->base.sk);
637  	struct sctp_transport *peer;
638  	struct sctp_sock *sp;
639  	unsigned short port;
640  
641  	sp = sctp_sk(asoc->base.sk);
642  
643  	/* AF_INET and AF_INET6 share common port field. */
644  	port = ntohs(addr->v4.sin_port);
645  
646  	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
647  				 " port: %d state:%d\n",
648  				 asoc,
649  				 addr,
650  				 port,
651  				 peer_state);
652  
653  	/* Set the port if it has not been set yet.  */
654  	if (0 == asoc->peer.port)
655  		asoc->peer.port = port;
656  
657  	/* Check to see if this is a duplicate. */
658  	peer = sctp_assoc_lookup_paddr(asoc, addr);
659  	if (peer) {
660  		/* An UNKNOWN state is only set on transports added by
661  		 * user in sctp_connectx() call.  Such transports should be
662  		 * considered CONFIRMED per RFC 4960, Section 5.4.
663  		 */
664  		if (peer->state == SCTP_UNKNOWN) {
665  			peer->state = SCTP_ACTIVE;
666  		}
667  		return peer;
668  	}
669  
670  	peer = sctp_transport_new(net, addr, gfp);
671  	if (!peer)
672  		return NULL;
673  
674  	sctp_transport_set_owner(peer, asoc);
675  
676  	/* Initialize the peer's heartbeat interval based on the
677  	 * association configured value.
678  	 */
679  	peer->hbinterval = asoc->hbinterval;
680  
681  	/* Set the path max_retrans.  */
682  	peer->pathmaxrxt = asoc->pathmaxrxt;
683  
684  	/* And the partial failure retrnas threshold */
685  	peer->pf_retrans = asoc->pf_retrans;
686  
687  	/* Initialize the peer's SACK delay timeout based on the
688  	 * association configured value.
689  	 */
690  	peer->sackdelay = asoc->sackdelay;
691  	peer->sackfreq = asoc->sackfreq;
692  
693  	/* Enable/disable heartbeat, SACK delay, and path MTU discovery
694  	 * based on association setting.
695  	 */
696  	peer->param_flags = asoc->param_flags;
697  
698  	sctp_transport_route(peer, NULL, sp);
699  
700  	/* Initialize the pmtu of the transport. */
701  	if (peer->param_flags & SPP_PMTUD_DISABLE) {
702  		if (asoc->pathmtu)
703  			peer->pathmtu = asoc->pathmtu;
704  		else
705  			peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
706  	}
707  
708  	/* If this is the first transport addr on this association,
709  	 * initialize the association PMTU to the peer's PMTU.
710  	 * If not and the current association PMTU is higher than the new
711  	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
712  	 */
713  	if (asoc->pathmtu)
714  		asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
715  	else
716  		asoc->pathmtu = peer->pathmtu;
717  
718  	SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
719  			  "%d\n", asoc, asoc->pathmtu);
720  	peer->pmtu_pending = 0;
721  
722  	asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
723  
724  	/* The asoc->peer.port might not be meaningful yet, but
725  	 * initialize the packet structure anyway.
726  	 */
727  	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
728  			 asoc->peer.port);
729  
730  	/* 7.2.1 Slow-Start
731  	 *
732  	 * o The initial cwnd before DATA transmission or after a sufficiently
733  	 *   long idle period MUST be set to
734  	 *      min(4*MTU, max(2*MTU, 4380 bytes))
735  	 *
736  	 * o The initial value of ssthresh MAY be arbitrarily high
737  	 *   (for example, implementations MAY use the size of the
738  	 *   receiver advertised window).
739  	 */
740  	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
741  
742  	/* At this point, we may not have the receiver's advertised window,
743  	 * so initialize ssthresh to the default value and it will be set
744  	 * later when we process the INIT.
745  	 */
746  	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
747  
748  	peer->partial_bytes_acked = 0;
749  	peer->flight_size = 0;
750  	peer->burst_limited = 0;
751  
752  	/* Set the transport's RTO.initial value */
753  	peer->rto = asoc->rto_initial;
754  	sctp_max_rto(asoc, peer);
755  
756  	/* Set the peer's active state. */
757  	peer->state = peer_state;
758  
759  	/* Attach the remote transport to our asoc.  */
760  	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
761  	asoc->peer.transport_count++;
762  
763  	/* If we do not yet have a primary path, set one.  */
764  	if (!asoc->peer.primary_path) {
765  		sctp_assoc_set_primary(asoc, peer);
766  		asoc->peer.retran_path = peer;
767  	}
768  
769  	if (asoc->peer.active_path == asoc->peer.retran_path &&
770  	    peer->state != SCTP_UNCONFIRMED) {
771  		asoc->peer.retran_path = peer;
772  	}
773  
774  	return peer;
775  }
776  
777  /* Delete a transport address from an association.  */
sctp_assoc_del_peer(struct sctp_association * asoc,const union sctp_addr * addr)778  void sctp_assoc_del_peer(struct sctp_association *asoc,
779  			 const union sctp_addr *addr)
780  {
781  	struct list_head	*pos;
782  	struct list_head	*temp;
783  	struct sctp_transport	*transport;
784  
785  	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
786  		transport = list_entry(pos, struct sctp_transport, transports);
787  		if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
788  			/* Do book keeping for removing the peer and free it. */
789  			sctp_assoc_rm_peer(asoc, transport);
790  			break;
791  		}
792  	}
793  }
794  
795  /* Lookup a transport by address. */
sctp_assoc_lookup_paddr(const struct sctp_association * asoc,const union sctp_addr * address)796  struct sctp_transport *sctp_assoc_lookup_paddr(
797  					const struct sctp_association *asoc,
798  					const union sctp_addr *address)
799  {
800  	struct sctp_transport *t;
801  
802  	/* Cycle through all transports searching for a peer address. */
803  
804  	list_for_each_entry(t, &asoc->peer.transport_addr_list,
805  			transports) {
806  		if (sctp_cmp_addr_exact(address, &t->ipaddr))
807  			return t;
808  	}
809  
810  	return NULL;
811  }
812  
813  /* Remove all transports except a give one */
sctp_assoc_del_nonprimary_peers(struct sctp_association * asoc,struct sctp_transport * primary)814  void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
815  				     struct sctp_transport *primary)
816  {
817  	struct sctp_transport	*temp;
818  	struct sctp_transport	*t;
819  
820  	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
821  				 transports) {
822  		/* if the current transport is not the primary one, delete it */
823  		if (t != primary)
824  			sctp_assoc_rm_peer(asoc, t);
825  	}
826  }
827  
828  /* Engage in transport control operations.
829   * Mark the transport up or down and send a notification to the user.
830   * Select and update the new active and retran paths.
831   */
sctp_assoc_control_transport(struct sctp_association * asoc,struct sctp_transport * transport,sctp_transport_cmd_t command,sctp_sn_error_t error)832  void sctp_assoc_control_transport(struct sctp_association *asoc,
833  				  struct sctp_transport *transport,
834  				  sctp_transport_cmd_t command,
835  				  sctp_sn_error_t error)
836  {
837  	struct sctp_transport *t = NULL;
838  	struct sctp_transport *first;
839  	struct sctp_transport *second;
840  	struct sctp_ulpevent *event;
841  	struct sockaddr_storage addr;
842  	int spc_state = 0;
843  	bool ulp_notify = true;
844  
845  	/* Record the transition on the transport.  */
846  	switch (command) {
847  	case SCTP_TRANSPORT_UP:
848  		/* If we are moving from UNCONFIRMED state due
849  		 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
850  		 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
851  		 */
852  		if (SCTP_UNCONFIRMED == transport->state &&
853  		    SCTP_HEARTBEAT_SUCCESS == error)
854  			spc_state = SCTP_ADDR_CONFIRMED;
855  		else
856  			spc_state = SCTP_ADDR_AVAILABLE;
857  		/* Don't inform ULP about transition from PF to
858  		 * active state and set cwnd to 1, see SCTP
859  		 * Quick failover draft section 5.1, point 5
860  		 */
861  		if (transport->state == SCTP_PF) {
862  			ulp_notify = false;
863  			transport->cwnd = 1;
864  		}
865  		transport->state = SCTP_ACTIVE;
866  		break;
867  
868  	case SCTP_TRANSPORT_DOWN:
869  		/* If the transport was never confirmed, do not transition it
870  		 * to inactive state.  Also, release the cached route since
871  		 * there may be a better route next time.
872  		 */
873  		if (transport->state != SCTP_UNCONFIRMED)
874  			transport->state = SCTP_INACTIVE;
875  		else {
876  			dst_release(transport->dst);
877  			transport->dst = NULL;
878  		}
879  
880  		spc_state = SCTP_ADDR_UNREACHABLE;
881  		break;
882  
883  	case SCTP_TRANSPORT_PF:
884  		transport->state = SCTP_PF;
885  		ulp_notify = false;
886  		break;
887  
888  	default:
889  		return;
890  	}
891  
892  	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
893  	 * user.
894  	 */
895  	if (ulp_notify) {
896  		memset(&addr, 0, sizeof(struct sockaddr_storage));
897  		memcpy(&addr, &transport->ipaddr,
898  		       transport->af_specific->sockaddr_len);
899  		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
900  					0, spc_state, error, GFP_ATOMIC);
901  		if (event)
902  			sctp_ulpq_tail_event(&asoc->ulpq, event);
903  	}
904  
905  	/* Select new active and retran paths. */
906  
907  	/* Look for the two most recently used active transports.
908  	 *
909  	 * This code produces the wrong ordering whenever jiffies
910  	 * rolls over, but we still get usable transports, so we don't
911  	 * worry about it.
912  	 */
913  	first = NULL; second = NULL;
914  
915  	list_for_each_entry(t, &asoc->peer.transport_addr_list,
916  			transports) {
917  
918  		if ((t->state == SCTP_INACTIVE) ||
919  		    (t->state == SCTP_UNCONFIRMED) ||
920  		    (t->state == SCTP_PF))
921  			continue;
922  		if (!first || t->last_time_heard > first->last_time_heard) {
923  			second = first;
924  			first = t;
925  		}
926  		if (!second || t->last_time_heard > second->last_time_heard)
927  			second = t;
928  	}
929  
930  	/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
931  	 *
932  	 * By default, an endpoint should always transmit to the
933  	 * primary path, unless the SCTP user explicitly specifies the
934  	 * destination transport address (and possibly source
935  	 * transport address) to use.
936  	 *
937  	 * [If the primary is active but not most recent, bump the most
938  	 * recently used transport.]
939  	 */
940  	if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
941  	     (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
942  	    first != asoc->peer.primary_path) {
943  		second = first;
944  		first = asoc->peer.primary_path;
945  	}
946  
947  	/* If we failed to find a usable transport, just camp on the
948  	 * primary, even if it is inactive.
949  	 */
950  	if (!first) {
951  		first = asoc->peer.primary_path;
952  		second = asoc->peer.primary_path;
953  	}
954  
955  	/* Set the active and retran transports.  */
956  	asoc->peer.active_path = first;
957  	asoc->peer.retran_path = second;
958  }
959  
960  /* Hold a reference to an association. */
sctp_association_hold(struct sctp_association * asoc)961  void sctp_association_hold(struct sctp_association *asoc)
962  {
963  	atomic_inc(&asoc->base.refcnt);
964  }
965  
966  /* Release a reference to an association and cleanup
967   * if there are no more references.
968   */
sctp_association_put(struct sctp_association * asoc)969  void sctp_association_put(struct sctp_association *asoc)
970  {
971  	if (atomic_dec_and_test(&asoc->base.refcnt))
972  		sctp_association_destroy(asoc);
973  }
974  
975  /* Allocate the next TSN, Transmission Sequence Number, for the given
976   * association.
977   */
sctp_association_get_next_tsn(struct sctp_association * asoc)978  __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
979  {
980  	/* From Section 1.6 Serial Number Arithmetic:
981  	 * Transmission Sequence Numbers wrap around when they reach
982  	 * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
983  	 * after transmitting TSN = 2*32 - 1 is TSN = 0.
984  	 */
985  	__u32 retval = asoc->next_tsn;
986  	asoc->next_tsn++;
987  	asoc->unack_data++;
988  
989  	return retval;
990  }
991  
992  /* Compare two addresses to see if they match.  Wildcard addresses
993   * only match themselves.
994   */
sctp_cmp_addr_exact(const union sctp_addr * ss1,const union sctp_addr * ss2)995  int sctp_cmp_addr_exact(const union sctp_addr *ss1,
996  			const union sctp_addr *ss2)
997  {
998  	struct sctp_af *af;
999  
1000  	af = sctp_get_af_specific(ss1->sa.sa_family);
1001  	if (unlikely(!af))
1002  		return 0;
1003  
1004  	return af->cmp_addr(ss1, ss2);
1005  }
1006  
1007  /* Return an ecne chunk to get prepended to a packet.
1008   * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
1009   * No we don't, but we could/should.
1010   */
sctp_get_ecne_prepend(struct sctp_association * asoc)1011  struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
1012  {
1013  	struct sctp_chunk *chunk;
1014  
1015  	/* Send ECNE if needed.
1016  	 * Not being able to allocate a chunk here is not deadly.
1017  	 */
1018  	if (asoc->need_ecne)
1019  		chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
1020  	else
1021  		chunk = NULL;
1022  
1023  	return chunk;
1024  }
1025  
1026  /*
1027   * Find which transport this TSN was sent on.
1028   */
sctp_assoc_lookup_tsn(struct sctp_association * asoc,__u32 tsn)1029  struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1030  					     __u32 tsn)
1031  {
1032  	struct sctp_transport *active;
1033  	struct sctp_transport *match;
1034  	struct sctp_transport *transport;
1035  	struct sctp_chunk *chunk;
1036  	__be32 key = htonl(tsn);
1037  
1038  	match = NULL;
1039  
1040  	/*
1041  	 * FIXME: In general, find a more efficient data structure for
1042  	 * searching.
1043  	 */
1044  
1045  	/*
1046  	 * The general strategy is to search each transport's transmitted
1047  	 * list.   Return which transport this TSN lives on.
1048  	 *
1049  	 * Let's be hopeful and check the active_path first.
1050  	 * Another optimization would be to know if there is only one
1051  	 * outbound path and not have to look for the TSN at all.
1052  	 *
1053  	 */
1054  
1055  	active = asoc->peer.active_path;
1056  
1057  	list_for_each_entry(chunk, &active->transmitted,
1058  			transmitted_list) {
1059  
1060  		if (key == chunk->subh.data_hdr->tsn) {
1061  			match = active;
1062  			goto out;
1063  		}
1064  	}
1065  
1066  	/* If not found, go search all the other transports. */
1067  	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1068  			transports) {
1069  
1070  		if (transport == active)
1071  			continue;
1072  		list_for_each_entry(chunk, &transport->transmitted,
1073  				transmitted_list) {
1074  			if (key == chunk->subh.data_hdr->tsn) {
1075  				match = transport;
1076  				goto out;
1077  			}
1078  		}
1079  	}
1080  out:
1081  	return match;
1082  }
1083  
1084  /* Is this the association we are looking for? */
sctp_assoc_is_match(struct sctp_association * asoc,struct net * net,const union sctp_addr * laddr,const union sctp_addr * paddr)1085  struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1086  					   struct net *net,
1087  					   const union sctp_addr *laddr,
1088  					   const union sctp_addr *paddr)
1089  {
1090  	struct sctp_transport *transport;
1091  
1092  	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1093  	    (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1094  	    net_eq(sock_net(asoc->base.sk), net)) {
1095  		transport = sctp_assoc_lookup_paddr(asoc, paddr);
1096  		if (!transport)
1097  			goto out;
1098  
1099  		if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1100  					 sctp_sk(asoc->base.sk)))
1101  			goto out;
1102  	}
1103  	transport = NULL;
1104  
1105  out:
1106  	return transport;
1107  }
1108  
1109  /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
sctp_assoc_bh_rcv(struct work_struct * work)1110  static void sctp_assoc_bh_rcv(struct work_struct *work)
1111  {
1112  	struct sctp_association *asoc =
1113  		container_of(work, struct sctp_association,
1114  			     base.inqueue.immediate);
1115  	struct net *net = sock_net(asoc->base.sk);
1116  	struct sctp_endpoint *ep;
1117  	struct sctp_chunk *chunk;
1118  	struct sctp_inq *inqueue;
1119  	int state;
1120  	sctp_subtype_t subtype;
1121  	int error = 0;
1122  
1123  	/* The association should be held so we should be safe. */
1124  	ep = asoc->ep;
1125  
1126  	inqueue = &asoc->base.inqueue;
1127  	sctp_association_hold(asoc);
1128  	while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1129  		state = asoc->state;
1130  		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1131  
1132  		/* SCTP-AUTH, Section 6.3:
1133  		 *    The receiver has a list of chunk types which it expects
1134  		 *    to be received only after an AUTH-chunk.  This list has
1135  		 *    been sent to the peer during the association setup.  It
1136  		 *    MUST silently discard these chunks if they are not placed
1137  		 *    after an AUTH chunk in the packet.
1138  		 */
1139  		if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1140  			continue;
1141  
1142  		/* Remember where the last DATA chunk came from so we
1143  		 * know where to send the SACK.
1144  		 */
1145  		if (sctp_chunk_is_data(chunk))
1146  			asoc->peer.last_data_from = chunk->transport;
1147  		else {
1148  			SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1149  			asoc->stats.ictrlchunks++;
1150  			if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1151  				asoc->stats.isacks++;
1152  		}
1153  
1154  		if (chunk->transport)
1155  			chunk->transport->last_time_heard = jiffies;
1156  
1157  		/* Run through the state machine. */
1158  		error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1159  				   state, ep, asoc, chunk, GFP_ATOMIC);
1160  
1161  		/* Check to see if the association is freed in response to
1162  		 * the incoming chunk.  If so, get out of the while loop.
1163  		 */
1164  		if (asoc->base.dead)
1165  			break;
1166  
1167  		/* If there is an error on chunk, discard this packet. */
1168  		if (error && chunk)
1169  			chunk->pdiscard = 1;
1170  	}
1171  	sctp_association_put(asoc);
1172  }
1173  
1174  /* This routine moves an association from its old sk to a new sk.  */
sctp_assoc_migrate(struct sctp_association * assoc,struct sock * newsk)1175  void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1176  {
1177  	struct sctp_sock *newsp = sctp_sk(newsk);
1178  	struct sock *oldsk = assoc->base.sk;
1179  
1180  	/* Delete the association from the old endpoint's list of
1181  	 * associations.
1182  	 */
1183  	list_del_init(&assoc->asocs);
1184  
1185  	/* Decrement the backlog value for a TCP-style socket. */
1186  	if (sctp_style(oldsk, TCP))
1187  		oldsk->sk_ack_backlog--;
1188  
1189  	/* Release references to the old endpoint and the sock.  */
1190  	sctp_endpoint_put(assoc->ep);
1191  	sock_put(assoc->base.sk);
1192  
1193  	/* Get a reference to the new endpoint.  */
1194  	assoc->ep = newsp->ep;
1195  	sctp_endpoint_hold(assoc->ep);
1196  
1197  	/* Get a reference to the new sock.  */
1198  	assoc->base.sk = newsk;
1199  	sock_hold(assoc->base.sk);
1200  
1201  	/* Add the association to the new endpoint's list of associations.  */
1202  	sctp_endpoint_add_asoc(newsp->ep, assoc);
1203  }
1204  
1205  /* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
sctp_assoc_update(struct sctp_association * asoc,struct sctp_association * new)1206  void sctp_assoc_update(struct sctp_association *asoc,
1207  		       struct sctp_association *new)
1208  {
1209  	struct sctp_transport *trans;
1210  	struct list_head *pos, *temp;
1211  
1212  	/* Copy in new parameters of peer. */
1213  	asoc->c = new->c;
1214  	asoc->peer.rwnd = new->peer.rwnd;
1215  	asoc->peer.sack_needed = new->peer.sack_needed;
1216  	asoc->peer.i = new->peer.i;
1217  	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1218  			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1219  
1220  	/* Remove any peer addresses not present in the new association. */
1221  	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1222  		trans = list_entry(pos, struct sctp_transport, transports);
1223  		if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1224  			sctp_assoc_rm_peer(asoc, trans);
1225  			continue;
1226  		}
1227  
1228  		if (asoc->state >= SCTP_STATE_ESTABLISHED)
1229  			sctp_transport_reset(trans);
1230  	}
1231  
1232  	/* If the case is A (association restart), use
1233  	 * initial_tsn as next_tsn. If the case is B, use
1234  	 * current next_tsn in case data sent to peer
1235  	 * has been discarded and needs retransmission.
1236  	 */
1237  	if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1238  		asoc->next_tsn = new->next_tsn;
1239  		asoc->ctsn_ack_point = new->ctsn_ack_point;
1240  		asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1241  
1242  		/* Reinitialize SSN for both local streams
1243  		 * and peer's streams.
1244  		 */
1245  		sctp_ssnmap_clear(asoc->ssnmap);
1246  
1247  		/* Flush the ULP reassembly and ordered queue.
1248  		 * Any data there will now be stale and will
1249  		 * cause problems.
1250  		 */
1251  		sctp_ulpq_flush(&asoc->ulpq);
1252  
1253  		/* reset the overall association error count so
1254  		 * that the restarted association doesn't get torn
1255  		 * down on the next retransmission timer.
1256  		 */
1257  		asoc->overall_error_count = 0;
1258  
1259  	} else {
1260  		/* Add any peer addresses from the new association. */
1261  		list_for_each_entry(trans, &new->peer.transport_addr_list,
1262  				transports) {
1263  			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1264  				sctp_assoc_add_peer(asoc, &trans->ipaddr,
1265  						    GFP_ATOMIC, trans->state);
1266  		}
1267  
1268  		asoc->ctsn_ack_point = asoc->next_tsn - 1;
1269  		asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1270  		if (!asoc->ssnmap) {
1271  			/* Move the ssnmap. */
1272  			asoc->ssnmap = new->ssnmap;
1273  			new->ssnmap = NULL;
1274  		}
1275  
1276  		if (!asoc->assoc_id) {
1277  			/* get a new association id since we don't have one
1278  			 * yet.
1279  			 */
1280  			sctp_assoc_set_id(asoc, GFP_ATOMIC);
1281  		}
1282  	}
1283  
1284  	/* SCTP-AUTH: Save the peer parameters from the new assocaitions
1285  	 * and also move the association shared keys over
1286  	 */
1287  	kfree(asoc->peer.peer_random);
1288  	asoc->peer.peer_random = new->peer.peer_random;
1289  	new->peer.peer_random = NULL;
1290  
1291  	kfree(asoc->peer.peer_chunks);
1292  	asoc->peer.peer_chunks = new->peer.peer_chunks;
1293  	new->peer.peer_chunks = NULL;
1294  
1295  	kfree(asoc->peer.peer_hmacs);
1296  	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1297  	new->peer.peer_hmacs = NULL;
1298  
1299  	sctp_auth_key_put(asoc->asoc_shared_key);
1300  	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1301  }
1302  
1303  /* Update the retran path for sending a retransmitted packet.
1304   * Round-robin through the active transports, else round-robin
1305   * through the inactive transports as this is the next best thing
1306   * we can try.
1307   */
sctp_assoc_update_retran_path(struct sctp_association * asoc)1308  void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1309  {
1310  	struct sctp_transport *t, *next;
1311  	struct list_head *head = &asoc->peer.transport_addr_list;
1312  	struct list_head *pos;
1313  
1314  	if (asoc->peer.transport_count == 1)
1315  		return;
1316  
1317  	/* Find the next transport in a round-robin fashion. */
1318  	t = asoc->peer.retran_path;
1319  	pos = &t->transports;
1320  	next = NULL;
1321  
1322  	while (1) {
1323  		/* Skip the head. */
1324  		if (pos->next == head)
1325  			pos = head->next;
1326  		else
1327  			pos = pos->next;
1328  
1329  		t = list_entry(pos, struct sctp_transport, transports);
1330  
1331  		/* We have exhausted the list, but didn't find any
1332  		 * other active transports.  If so, use the next
1333  		 * transport.
1334  		 */
1335  		if (t == asoc->peer.retran_path) {
1336  			t = next;
1337  			break;
1338  		}
1339  
1340  		/* Try to find an active transport. */
1341  
1342  		if ((t->state == SCTP_ACTIVE) ||
1343  		    (t->state == SCTP_UNKNOWN)) {
1344  			break;
1345  		} else {
1346  			/* Keep track of the next transport in case
1347  			 * we don't find any active transport.
1348  			 */
1349  			if (t->state != SCTP_UNCONFIRMED && !next)
1350  				next = t;
1351  		}
1352  	}
1353  
1354  	if (t)
1355  		asoc->peer.retran_path = t;
1356  	else
1357  		t = asoc->peer.retran_path;
1358  
1359  	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1360  				 " %p addr: ",
1361  				 " port: %d\n",
1362  				 asoc,
1363  				 (&t->ipaddr),
1364  				 ntohs(t->ipaddr.v4.sin_port));
1365  }
1366  
1367  /* Choose the transport for sending retransmit packet.  */
sctp_assoc_choose_alter_transport(struct sctp_association * asoc,struct sctp_transport * last_sent_to)1368  struct sctp_transport *sctp_assoc_choose_alter_transport(
1369  	struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1370  {
1371  	/* If this is the first time packet is sent, use the active path,
1372  	 * else use the retran path. If the last packet was sent over the
1373  	 * retran path, update the retran path and use it.
1374  	 */
1375  	if (!last_sent_to)
1376  		return asoc->peer.active_path;
1377  	else {
1378  		if (last_sent_to == asoc->peer.retran_path)
1379  			sctp_assoc_update_retran_path(asoc);
1380  		return asoc->peer.retran_path;
1381  	}
1382  }
1383  
1384  /* Update the association's pmtu and frag_point by going through all the
1385   * transports. This routine is called when a transport's PMTU has changed.
1386   */
sctp_assoc_sync_pmtu(struct sock * sk,struct sctp_association * asoc)1387  void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1388  {
1389  	struct sctp_transport *t;
1390  	__u32 pmtu = 0;
1391  
1392  	if (!asoc)
1393  		return;
1394  
1395  	/* Get the lowest pmtu of all the transports. */
1396  	list_for_each_entry(t, &asoc->peer.transport_addr_list,
1397  				transports) {
1398  		if (t->pmtu_pending && t->dst) {
1399  			sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1400  			t->pmtu_pending = 0;
1401  		}
1402  		if (!pmtu || (t->pathmtu < pmtu))
1403  			pmtu = t->pathmtu;
1404  	}
1405  
1406  	if (pmtu) {
1407  		asoc->pathmtu = pmtu;
1408  		asoc->frag_point = sctp_frag_point(asoc, pmtu);
1409  	}
1410  
1411  	SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1412  			  __func__, asoc, asoc->pathmtu, asoc->frag_point);
1413  }
1414  
1415  /* Should we send a SACK to update our peer? */
sctp_peer_needs_update(struct sctp_association * asoc)1416  static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1417  {
1418  	struct net *net = sock_net(asoc->base.sk);
1419  	switch (asoc->state) {
1420  	case SCTP_STATE_ESTABLISHED:
1421  	case SCTP_STATE_SHUTDOWN_PENDING:
1422  	case SCTP_STATE_SHUTDOWN_RECEIVED:
1423  	case SCTP_STATE_SHUTDOWN_SENT:
1424  		if ((asoc->rwnd > asoc->a_rwnd) &&
1425  		    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1426  			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1427  			   asoc->pathmtu)))
1428  			return 1;
1429  		break;
1430  	default:
1431  		break;
1432  	}
1433  	return 0;
1434  }
1435  
1436  /* Increase asoc's rwnd by len and send any window update SACK if needed. */
sctp_assoc_rwnd_increase(struct sctp_association * asoc,unsigned int len)1437  void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1438  {
1439  	struct sctp_chunk *sack;
1440  	struct timer_list *timer;
1441  
1442  	if (asoc->rwnd_over) {
1443  		if (asoc->rwnd_over >= len) {
1444  			asoc->rwnd_over -= len;
1445  		} else {
1446  			asoc->rwnd += (len - asoc->rwnd_over);
1447  			asoc->rwnd_over = 0;
1448  		}
1449  	} else {
1450  		asoc->rwnd += len;
1451  	}
1452  
1453  	/* If we had window pressure, start recovering it
1454  	 * once our rwnd had reached the accumulated pressure
1455  	 * threshold.  The idea is to recover slowly, but up
1456  	 * to the initial advertised window.
1457  	 */
1458  	if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1459  		int change = min(asoc->pathmtu, asoc->rwnd_press);
1460  		asoc->rwnd += change;
1461  		asoc->rwnd_press -= change;
1462  	}
1463  
1464  	SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1465  			  "- %u\n", __func__, asoc, len, asoc->rwnd,
1466  			  asoc->rwnd_over, asoc->a_rwnd);
1467  
1468  	/* Send a window update SACK if the rwnd has increased by at least the
1469  	 * minimum of the association's PMTU and half of the receive buffer.
1470  	 * The algorithm used is similar to the one described in
1471  	 * Section 4.2.3.3 of RFC 1122.
1472  	 */
1473  	if (sctp_peer_needs_update(asoc)) {
1474  		asoc->a_rwnd = asoc->rwnd;
1475  		SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1476  				  "rwnd: %u a_rwnd: %u\n", __func__,
1477  				  asoc, asoc->rwnd, asoc->a_rwnd);
1478  		sack = sctp_make_sack(asoc);
1479  		if (!sack)
1480  			return;
1481  
1482  		asoc->peer.sack_needed = 0;
1483  
1484  		sctp_outq_tail(&asoc->outqueue, sack);
1485  
1486  		/* Stop the SACK timer.  */
1487  		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1488  		if (del_timer(timer))
1489  			sctp_association_put(asoc);
1490  	}
1491  }
1492  
1493  /* Decrease asoc's rwnd by len. */
sctp_assoc_rwnd_decrease(struct sctp_association * asoc,unsigned int len)1494  void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1495  {
1496  	int rx_count;
1497  	int over = 0;
1498  
1499  	SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1500  	SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1501  
1502  	if (asoc->ep->rcvbuf_policy)
1503  		rx_count = atomic_read(&asoc->rmem_alloc);
1504  	else
1505  		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1506  
1507  	/* If we've reached or overflowed our receive buffer, announce
1508  	 * a 0 rwnd if rwnd would still be positive.  Store the
1509  	 * the pottential pressure overflow so that the window can be restored
1510  	 * back to original value.
1511  	 */
1512  	if (rx_count >= asoc->base.sk->sk_rcvbuf)
1513  		over = 1;
1514  
1515  	if (asoc->rwnd >= len) {
1516  		asoc->rwnd -= len;
1517  		if (over) {
1518  			asoc->rwnd_press += asoc->rwnd;
1519  			asoc->rwnd = 0;
1520  		}
1521  	} else {
1522  		asoc->rwnd_over = len - asoc->rwnd;
1523  		asoc->rwnd = 0;
1524  	}
1525  	SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1526  			  __func__, asoc, len, asoc->rwnd,
1527  			  asoc->rwnd_over, asoc->rwnd_press);
1528  }
1529  
1530  /* Build the bind address list for the association based on info from the
1531   * local endpoint and the remote peer.
1532   */
sctp_assoc_set_bind_addr_from_ep(struct sctp_association * asoc,sctp_scope_t scope,gfp_t gfp)1533  int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1534  				     sctp_scope_t scope, gfp_t gfp)
1535  {
1536  	int flags;
1537  
1538  	/* Use scoping rules to determine the subset of addresses from
1539  	 * the endpoint.
1540  	 */
1541  	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1542  	if (asoc->peer.ipv4_address)
1543  		flags |= SCTP_ADDR4_PEERSUPP;
1544  	if (asoc->peer.ipv6_address)
1545  		flags |= SCTP_ADDR6_PEERSUPP;
1546  
1547  	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1548  				   &asoc->base.bind_addr,
1549  				   &asoc->ep->base.bind_addr,
1550  				   scope, gfp, flags);
1551  }
1552  
1553  /* Build the association's bind address list from the cookie.  */
sctp_assoc_set_bind_addr_from_cookie(struct sctp_association * asoc,struct sctp_cookie * cookie,gfp_t gfp)1554  int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1555  					 struct sctp_cookie *cookie,
1556  					 gfp_t gfp)
1557  {
1558  	int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1559  	int var_size3 = cookie->raw_addr_list_len;
1560  	__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1561  
1562  	return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1563  				      asoc->ep->base.bind_addr.port, gfp);
1564  }
1565  
1566  /* Lookup laddr in the bind address list of an association. */
sctp_assoc_lookup_laddr(struct sctp_association * asoc,const union sctp_addr * laddr)1567  int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1568  			    const union sctp_addr *laddr)
1569  {
1570  	int found = 0;
1571  
1572  	if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1573  	    sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1574  				 sctp_sk(asoc->base.sk)))
1575  		found = 1;
1576  
1577  	return found;
1578  }
1579  
1580  /* Set an association id for a given association */
sctp_assoc_set_id(struct sctp_association * asoc,gfp_t gfp)1581  int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1582  {
1583  	bool preload = gfp & __GFP_WAIT;
1584  	int ret;
1585  
1586  	/* If the id is already assigned, keep it. */
1587  	if (asoc->assoc_id)
1588  		return 0;
1589  
1590  	if (preload)
1591  		idr_preload(gfp);
1592  	spin_lock_bh(&sctp_assocs_id_lock);
1593  	/* 0 is not a valid assoc_id, must be >= 1 */
1594  	ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1595  	spin_unlock_bh(&sctp_assocs_id_lock);
1596  	if (preload)
1597  		idr_preload_end();
1598  	if (ret < 0)
1599  		return ret;
1600  
1601  	asoc->assoc_id = (sctp_assoc_t)ret;
1602  	return 0;
1603  }
1604  
1605  /* Free the ASCONF queue */
sctp_assoc_free_asconf_queue(struct sctp_association * asoc)1606  static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1607  {
1608  	struct sctp_chunk *asconf;
1609  	struct sctp_chunk *tmp;
1610  
1611  	list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1612  		list_del_init(&asconf->list);
1613  		sctp_chunk_free(asconf);
1614  	}
1615  }
1616  
1617  /* Free asconf_ack cache */
sctp_assoc_free_asconf_acks(struct sctp_association * asoc)1618  static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1619  {
1620  	struct sctp_chunk *ack;
1621  	struct sctp_chunk *tmp;
1622  
1623  	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1624  				transmitted_list) {
1625  		list_del_init(&ack->transmitted_list);
1626  		sctp_chunk_free(ack);
1627  	}
1628  }
1629  
1630  /* Clean up the ASCONF_ACK queue */
sctp_assoc_clean_asconf_ack_cache(const struct sctp_association * asoc)1631  void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1632  {
1633  	struct sctp_chunk *ack;
1634  	struct sctp_chunk *tmp;
1635  
1636  	/* We can remove all the entries from the queue up to
1637  	 * the "Peer-Sequence-Number".
1638  	 */
1639  	list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1640  				transmitted_list) {
1641  		if (ack->subh.addip_hdr->serial ==
1642  				htonl(asoc->peer.addip_serial))
1643  			break;
1644  
1645  		list_del_init(&ack->transmitted_list);
1646  		sctp_chunk_free(ack);
1647  	}
1648  }
1649  
1650  /* Find the ASCONF_ACK whose serial number matches ASCONF */
sctp_assoc_lookup_asconf_ack(const struct sctp_association * asoc,__be32 serial)1651  struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1652  					const struct sctp_association *asoc,
1653  					__be32 serial)
1654  {
1655  	struct sctp_chunk *ack;
1656  
1657  	/* Walk through the list of cached ASCONF-ACKs and find the
1658  	 * ack chunk whose serial number matches that of the request.
1659  	 */
1660  	list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1661  		if (ack->subh.addip_hdr->serial == serial) {
1662  			sctp_chunk_hold(ack);
1663  			return ack;
1664  		}
1665  	}
1666  
1667  	return NULL;
1668  }
1669  
sctp_asconf_queue_teardown(struct sctp_association * asoc)1670  void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1671  {
1672  	/* Free any cached ASCONF_ACK chunk. */
1673  	sctp_assoc_free_asconf_acks(asoc);
1674  
1675  	/* Free the ASCONF queue. */
1676  	sctp_assoc_free_asconf_queue(asoc);
1677  
1678  	/* Free any cached ASCONF chunk. */
1679  	if (asoc->addip_last_asconf)
1680  		sctp_chunk_free(asoc->addip_last_asconf);
1681  }
1682