• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  *
6  * This file is part of the SCTP kernel implementation
7  *
8  * These functions work with the state functions in sctp_sm_statefuns.c
9  * to implement that state operations.  These functions implement the
10  * steps which require modifying existing data structures.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Jon Grimm             <jgrimm@austin.ibm.com>
36  *    Hui Huang		    <hui.huang@nokia.com>
37  *    Dajiang Zhang	    <dajiang.zhang@nokia.com>
38  *    Daisy Chang	    <daisyc@us.ibm.com>
39  *    Sridhar Samudrala	    <sri@us.ibm.com>
40  *    Ardelle Fan	    <ardelle.fan@intel.com>
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/skbuff.h>
46 #include <linux/types.h>
47 #include <linux/socket.h>
48 #include <linux/ip.h>
49 #include <linux/gfp.h>
50 #include <net/sock.h>
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
53 
54 static int sctp_cmd_interpreter(enum sctp_event event_type,
55 				union sctp_subtype subtype,
56 				enum sctp_state state,
57 				struct sctp_endpoint *ep,
58 				struct sctp_association *asoc,
59 				void *event_arg,
60 				enum sctp_disposition status,
61 				struct sctp_cmd_seq *commands,
62 				gfp_t gfp);
63 static int sctp_side_effects(enum sctp_event event_type,
64 			     union sctp_subtype subtype,
65 			     enum sctp_state state,
66 			     struct sctp_endpoint *ep,
67 			     struct sctp_association **asoc,
68 			     void *event_arg,
69 			     enum sctp_disposition status,
70 			     struct sctp_cmd_seq *commands,
71 			     gfp_t gfp);
72 
73 /********************************************************************
74  * Helper functions
75  ********************************************************************/
76 
77 /* A helper function for delayed processing of INET ECN CE bit. */
sctp_do_ecn_ce_work(struct sctp_association * asoc,__u32 lowest_tsn)78 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
79 				__u32 lowest_tsn)
80 {
81 	/* Save the TSN away for comparison when we receive CWR */
82 
83 	asoc->last_ecne_tsn = lowest_tsn;
84 	asoc->need_ecne = 1;
85 }
86 
87 /* Helper function for delayed processing of SCTP ECNE chunk.  */
88 /* RFC 2960 Appendix A
89  *
90  * RFC 2481 details a specific bit for a sender to send in
91  * the header of its next outbound TCP segment to indicate to
92  * its peer that it has reduced its congestion window.  This
93  * is termed the CWR bit.  For SCTP the same indication is made
94  * by including the CWR chunk.  This chunk contains one data
95  * element, i.e. the TSN number that was sent in the ECNE chunk.
96  * This element represents the lowest TSN number in the datagram
97  * that was originally marked with the CE bit.
98  */
sctp_do_ecn_ecne_work(struct sctp_association * asoc,__u32 lowest_tsn,struct sctp_chunk * chunk)99 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
100 						__u32 lowest_tsn,
101 						struct sctp_chunk *chunk)
102 {
103 	struct sctp_chunk *repl;
104 
105 	/* Our previously transmitted packet ran into some congestion
106 	 * so we should take action by reducing cwnd and ssthresh
107 	 * and then ACK our peer that we we've done so by
108 	 * sending a CWR.
109 	 */
110 
111 	/* First, try to determine if we want to actually lower
112 	 * our cwnd variables.  Only lower them if the ECNE looks more
113 	 * recent than the last response.
114 	 */
115 	if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
116 		struct sctp_transport *transport;
117 
118 		/* Find which transport's congestion variables
119 		 * need to be adjusted.
120 		 */
121 		transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
122 
123 		/* Update the congestion variables. */
124 		if (transport)
125 			sctp_transport_lower_cwnd(transport,
126 						  SCTP_LOWER_CWND_ECNE);
127 		asoc->last_cwr_tsn = lowest_tsn;
128 	}
129 
130 	/* Always try to quiet the other end.  In case of lost CWR,
131 	 * resend last_cwr_tsn.
132 	 */
133 	repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
134 
135 	/* If we run out of memory, it will look like a lost CWR.  We'll
136 	 * get back in sync eventually.
137 	 */
138 	return repl;
139 }
140 
141 /* Helper function to do delayed processing of ECN CWR chunk.  */
sctp_do_ecn_cwr_work(struct sctp_association * asoc,__u32 lowest_tsn)142 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
143 				 __u32 lowest_tsn)
144 {
145 	/* Turn off ECNE getting auto-prepended to every outgoing
146 	 * packet
147 	 */
148 	asoc->need_ecne = 0;
149 }
150 
151 /* Generate SACK if necessary.  We call this at the end of a packet.  */
sctp_gen_sack(struct sctp_association * asoc,int force,struct sctp_cmd_seq * commands)152 static int sctp_gen_sack(struct sctp_association *asoc, int force,
153 			 struct sctp_cmd_seq *commands)
154 {
155 	struct sctp_transport *trans = asoc->peer.last_data_from;
156 	__u32 ctsn, max_tsn_seen;
157 	struct sctp_chunk *sack;
158 	int error = 0;
159 
160 	if (force ||
161 	    (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
162 	    (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
163 		asoc->peer.sack_needed = 1;
164 
165 	ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
166 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
167 
168 	/* From 12.2 Parameters necessary per association (i.e. the TCB):
169 	 *
170 	 * Ack State : This flag indicates if the next received packet
171 	 * 	     : is to be responded to with a SACK. ...
172 	 *	     : When DATA chunks are out of order, SACK's
173 	 *           : are not delayed (see Section 6).
174 	 *
175 	 * [This is actually not mentioned in Section 6, but we
176 	 * implement it here anyway. --piggy]
177 	 */
178 	if (max_tsn_seen != ctsn)
179 		asoc->peer.sack_needed = 1;
180 
181 	/* From 6.2  Acknowledgement on Reception of DATA Chunks:
182 	 *
183 	 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
184 	 * an acknowledgement SHOULD be generated for at least every
185 	 * second packet (not every second DATA chunk) received, and
186 	 * SHOULD be generated within 200 ms of the arrival of any
187 	 * unacknowledged DATA chunk. ...
188 	 */
189 	if (!asoc->peer.sack_needed) {
190 		asoc->peer.sack_cnt++;
191 
192 		/* Set the SACK delay timeout based on the
193 		 * SACK delay for the last transport
194 		 * data was received from, or the default
195 		 * for the association.
196 		 */
197 		if (trans) {
198 			/* We will need a SACK for the next packet.  */
199 			if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
200 				asoc->peer.sack_needed = 1;
201 
202 			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
203 				trans->sackdelay;
204 		} else {
205 			/* We will need a SACK for the next packet.  */
206 			if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
207 				asoc->peer.sack_needed = 1;
208 
209 			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
210 				asoc->sackdelay;
211 		}
212 
213 		/* Restart the SACK timer. */
214 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
215 				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
216 	} else {
217 		__u32 old_a_rwnd = asoc->a_rwnd;
218 
219 		asoc->a_rwnd = asoc->rwnd;
220 		sack = sctp_make_sack(asoc);
221 		if (!sack) {
222 			asoc->a_rwnd = old_a_rwnd;
223 			goto nomem;
224 		}
225 
226 		asoc->peer.sack_needed = 0;
227 		asoc->peer.sack_cnt = 0;
228 
229 		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
230 
231 		/* Stop the SACK timer.  */
232 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
233 				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
234 	}
235 
236 	return error;
237 nomem:
238 	error = -ENOMEM;
239 	return error;
240 }
241 
242 /* When the T3-RTX timer expires, it calls this function to create the
243  * relevant state machine event.
244  */
sctp_generate_t3_rtx_event(unsigned long peer)245 void sctp_generate_t3_rtx_event(unsigned long peer)
246 {
247 	struct sctp_transport *transport = (struct sctp_transport *) peer;
248 	struct sctp_association *asoc = transport->asoc;
249 	struct sock *sk = asoc->base.sk;
250 	struct net *net = sock_net(sk);
251 	int error;
252 
253 	/* Check whether a task is in the sock.  */
254 
255 	bh_lock_sock(sk);
256 	if (sock_owned_by_user(sk)) {
257 		pr_debug("%s: sock is busy\n", __func__);
258 
259 		/* Try again later.  */
260 		if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
261 			sctp_transport_hold(transport);
262 		goto out_unlock;
263 	}
264 
265 	/* Run through the state machine.  */
266 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
267 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
268 			   asoc->state,
269 			   asoc->ep, asoc,
270 			   transport, GFP_ATOMIC);
271 
272 	if (error)
273 		sk->sk_err = -error;
274 
275 out_unlock:
276 	bh_unlock_sock(sk);
277 	sctp_transport_put(transport);
278 }
279 
280 /* This is a sa interface for producing timeout events.  It works
281  * for timeouts which use the association as their parameter.
282  */
sctp_generate_timeout_event(struct sctp_association * asoc,enum sctp_event_timeout timeout_type)283 static void sctp_generate_timeout_event(struct sctp_association *asoc,
284 					enum sctp_event_timeout timeout_type)
285 {
286 	struct sock *sk = asoc->base.sk;
287 	struct net *net = sock_net(sk);
288 	int error = 0;
289 
290 	bh_lock_sock(sk);
291 	if (sock_owned_by_user(sk)) {
292 		pr_debug("%s: sock is busy: timer %d\n", __func__,
293 			 timeout_type);
294 
295 		/* Try again later.  */
296 		if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
297 			sctp_association_hold(asoc);
298 		goto out_unlock;
299 	}
300 
301 	/* Is this association really dead and just waiting around for
302 	 * the timer to let go of the reference?
303 	 */
304 	if (asoc->base.dead)
305 		goto out_unlock;
306 
307 	/* Run through the state machine.  */
308 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
309 			   SCTP_ST_TIMEOUT(timeout_type),
310 			   asoc->state, asoc->ep, asoc,
311 			   (void *)timeout_type, GFP_ATOMIC);
312 
313 	if (error)
314 		sk->sk_err = -error;
315 
316 out_unlock:
317 	bh_unlock_sock(sk);
318 	sctp_association_put(asoc);
319 }
320 
sctp_generate_t1_cookie_event(unsigned long data)321 static void sctp_generate_t1_cookie_event(unsigned long data)
322 {
323 	struct sctp_association *asoc = (struct sctp_association *) data;
324 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
325 }
326 
sctp_generate_t1_init_event(unsigned long data)327 static void sctp_generate_t1_init_event(unsigned long data)
328 {
329 	struct sctp_association *asoc = (struct sctp_association *) data;
330 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
331 }
332 
sctp_generate_t2_shutdown_event(unsigned long data)333 static void sctp_generate_t2_shutdown_event(unsigned long data)
334 {
335 	struct sctp_association *asoc = (struct sctp_association *) data;
336 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
337 }
338 
sctp_generate_t4_rto_event(unsigned long data)339 static void sctp_generate_t4_rto_event(unsigned long data)
340 {
341 	struct sctp_association *asoc = (struct sctp_association *) data;
342 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
343 }
344 
sctp_generate_t5_shutdown_guard_event(unsigned long data)345 static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
346 {
347 	struct sctp_association *asoc = (struct sctp_association *)data;
348 	sctp_generate_timeout_event(asoc,
349 				    SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
350 
351 } /* sctp_generate_t5_shutdown_guard_event() */
352 
sctp_generate_autoclose_event(unsigned long data)353 static void sctp_generate_autoclose_event(unsigned long data)
354 {
355 	struct sctp_association *asoc = (struct sctp_association *) data;
356 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
357 }
358 
359 /* Generate a heart beat event.  If the sock is busy, reschedule.   Make
360  * sure that the transport is still valid.
361  */
sctp_generate_heartbeat_event(unsigned long data)362 void sctp_generate_heartbeat_event(unsigned long data)
363 {
364 	struct sctp_transport *transport = (struct sctp_transport *) data;
365 	struct sctp_association *asoc = transport->asoc;
366 	struct sock *sk = asoc->base.sk;
367 	struct net *net = sock_net(sk);
368 	u32 elapsed, timeout;
369 	int error = 0;
370 
371 	bh_lock_sock(sk);
372 	if (sock_owned_by_user(sk)) {
373 		pr_debug("%s: sock is busy\n", __func__);
374 
375 		/* Try again later.  */
376 		if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
377 			sctp_transport_hold(transport);
378 		goto out_unlock;
379 	}
380 
381 	/* Check if we should still send the heartbeat or reschedule */
382 	elapsed = jiffies - transport->last_time_sent;
383 	timeout = sctp_transport_timeout(transport);
384 	if (elapsed < timeout) {
385 		elapsed = timeout - elapsed;
386 		if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
387 			sctp_transport_hold(transport);
388 		goto out_unlock;
389 	}
390 
391 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
392 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
393 			   asoc->state, asoc->ep, asoc,
394 			   transport, GFP_ATOMIC);
395 
396 	if (error)
397 		sk->sk_err = -error;
398 
399 out_unlock:
400 	bh_unlock_sock(sk);
401 	sctp_transport_put(transport);
402 }
403 
404 /* Handle the timeout of the ICMP protocol unreachable timer.  Trigger
405  * the correct state machine transition that will close the association.
406  */
sctp_generate_proto_unreach_event(unsigned long data)407 void sctp_generate_proto_unreach_event(unsigned long data)
408 {
409 	struct sctp_transport *transport = (struct sctp_transport *)data;
410 	struct sctp_association *asoc = transport->asoc;
411 	struct sock *sk = asoc->base.sk;
412 	struct net *net = sock_net(sk);
413 
414 	bh_lock_sock(sk);
415 	if (sock_owned_by_user(sk)) {
416 		pr_debug("%s: sock is busy\n", __func__);
417 
418 		/* Try again later.  */
419 		if (!mod_timer(&transport->proto_unreach_timer,
420 				jiffies + (HZ/20)))
421 			sctp_association_hold(asoc);
422 		goto out_unlock;
423 	}
424 
425 	/* Is this structure just waiting around for us to actually
426 	 * get destroyed?
427 	 */
428 	if (asoc->base.dead)
429 		goto out_unlock;
430 
431 	sctp_do_sm(net, SCTP_EVENT_T_OTHER,
432 		   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
433 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
434 
435 out_unlock:
436 	bh_unlock_sock(sk);
437 	sctp_association_put(asoc);
438 }
439 
440  /* Handle the timeout of the RE-CONFIG timer. */
sctp_generate_reconf_event(unsigned long data)441 void sctp_generate_reconf_event(unsigned long data)
442 {
443 	struct sctp_transport *transport = (struct sctp_transport *)data;
444 	struct sctp_association *asoc = transport->asoc;
445 	struct sock *sk = asoc->base.sk;
446 	struct net *net = sock_net(sk);
447 	int error = 0;
448 
449 	bh_lock_sock(sk);
450 	if (sock_owned_by_user(sk)) {
451 		pr_debug("%s: sock is busy\n", __func__);
452 
453 		/* Try again later.  */
454 		if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
455 			sctp_transport_hold(transport);
456 		goto out_unlock;
457 	}
458 
459 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
460 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
461 			   asoc->state, asoc->ep, asoc,
462 			   transport, GFP_ATOMIC);
463 
464 	if (error)
465 		sk->sk_err = -error;
466 
467 out_unlock:
468 	bh_unlock_sock(sk);
469 	sctp_transport_put(transport);
470 }
471 
472 /* Inject a SACK Timeout event into the state machine.  */
sctp_generate_sack_event(unsigned long data)473 static void sctp_generate_sack_event(unsigned long data)
474 {
475 	struct sctp_association *asoc = (struct sctp_association *)data;
476 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
477 }
478 
479 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
480 	NULL,
481 	sctp_generate_t1_cookie_event,
482 	sctp_generate_t1_init_event,
483 	sctp_generate_t2_shutdown_event,
484 	NULL,
485 	sctp_generate_t4_rto_event,
486 	sctp_generate_t5_shutdown_guard_event,
487 	NULL,
488 	NULL,
489 	sctp_generate_sack_event,
490 	sctp_generate_autoclose_event,
491 };
492 
493 
494 /* RFC 2960 8.2 Path Failure Detection
495  *
496  * When its peer endpoint is multi-homed, an endpoint should keep a
497  * error counter for each of the destination transport addresses of the
498  * peer endpoint.
499  *
500  * Each time the T3-rtx timer expires on any address, or when a
501  * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
502  * the error counter of that destination address will be incremented.
503  * When the value in the error counter exceeds the protocol parameter
504  * 'Path.Max.Retrans' of that destination address, the endpoint should
505  * mark the destination transport address as inactive, and a
506  * notification SHOULD be sent to the upper layer.
507  *
508  */
sctp_do_8_2_transport_strike(struct sctp_cmd_seq * commands,struct sctp_association * asoc,struct sctp_transport * transport,int is_hb)509 static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
510 					 struct sctp_association *asoc,
511 					 struct sctp_transport *transport,
512 					 int is_hb)
513 {
514 	struct net *net = sock_net(asoc->base.sk);
515 
516 	/* The check for association's overall error counter exceeding the
517 	 * threshold is done in the state function.
518 	 */
519 	/* We are here due to a timer expiration.  If the timer was
520 	 * not a HEARTBEAT, then normal error tracking is done.
521 	 * If the timer was a heartbeat, we only increment error counts
522 	 * when we already have an outstanding HEARTBEAT that has not
523 	 * been acknowledged.
524 	 * Additionally, some tranport states inhibit error increments.
525 	 */
526 	if (!is_hb) {
527 		asoc->overall_error_count++;
528 		if (transport->state != SCTP_INACTIVE)
529 			transport->error_count++;
530 	 } else if (transport->hb_sent) {
531 		if (transport->state != SCTP_UNCONFIRMED)
532 			asoc->overall_error_count++;
533 		if (transport->state != SCTP_INACTIVE)
534 			transport->error_count++;
535 	}
536 
537 	/* If the transport error count is greater than the pf_retrans
538 	 * threshold, and less than pathmaxrtx, and if the current state
539 	 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
540 	 * see SCTP Quick Failover Draft, section 5.1
541 	 */
542 	if (net->sctp.pf_enable &&
543 	   (transport->state == SCTP_ACTIVE) &&
544 	   (transport->error_count < transport->pathmaxrxt) &&
545 	   (transport->error_count > transport->pf_retrans)) {
546 
547 		sctp_assoc_control_transport(asoc, transport,
548 					     SCTP_TRANSPORT_PF,
549 					     0);
550 
551 		/* Update the hb timer to resend a heartbeat every rto */
552 		sctp_transport_reset_hb_timer(transport);
553 	}
554 
555 	if (transport->state != SCTP_INACTIVE &&
556 	    (transport->error_count > transport->pathmaxrxt)) {
557 		pr_debug("%s: association:%p transport addr:%pISpc failed\n",
558 			 __func__, asoc, &transport->ipaddr.sa);
559 
560 		sctp_assoc_control_transport(asoc, transport,
561 					     SCTP_TRANSPORT_DOWN,
562 					     SCTP_FAILED_THRESHOLD);
563 	}
564 
565 	/* E2) For the destination address for which the timer
566 	 * expires, set RTO <- RTO * 2 ("back off the timer").  The
567 	 * maximum value discussed in rule C7 above (RTO.max) may be
568 	 * used to provide an upper bound to this doubling operation.
569 	 *
570 	 * Special Case:  the first HB doesn't trigger exponential backoff.
571 	 * The first unacknowledged HB triggers it.  We do this with a flag
572 	 * that indicates that we have an outstanding HB.
573 	 */
574 	if (!is_hb || transport->hb_sent) {
575 		transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
576 		sctp_max_rto(asoc, transport);
577 	}
578 }
579 
580 /* Worker routine to handle INIT command failure.  */
sctp_cmd_init_failed(struct sctp_cmd_seq * commands,struct sctp_association * asoc,unsigned int error)581 static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
582 				 struct sctp_association *asoc,
583 				 unsigned int error)
584 {
585 	struct sctp_ulpevent *event;
586 
587 	event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
588 						(__u16)error, 0, 0, NULL,
589 						GFP_ATOMIC);
590 
591 	if (event)
592 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
593 				SCTP_ULPEVENT(event));
594 
595 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
596 			SCTP_STATE(SCTP_STATE_CLOSED));
597 
598 	/* SEND_FAILED sent later when cleaning up the association. */
599 	asoc->outqueue.error = error;
600 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
601 }
602 
603 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
sctp_cmd_assoc_failed(struct sctp_cmd_seq * commands,struct sctp_association * asoc,enum sctp_event event_type,union sctp_subtype subtype,struct sctp_chunk * chunk,unsigned int error)604 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
605 				  struct sctp_association *asoc,
606 				  enum sctp_event event_type,
607 				  union sctp_subtype subtype,
608 				  struct sctp_chunk *chunk,
609 				  unsigned int error)
610 {
611 	struct sctp_ulpevent *event;
612 	struct sctp_chunk *abort;
613 
614 	/* Cancel any partial delivery in progress. */
615 	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
616 
617 	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
618 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
619 						(__u16)error, 0, 0, chunk,
620 						GFP_ATOMIC);
621 	else
622 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
623 						(__u16)error, 0, 0, NULL,
624 						GFP_ATOMIC);
625 	if (event)
626 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
627 				SCTP_ULPEVENT(event));
628 
629 	if (asoc->overall_error_count >= asoc->max_retrans) {
630 		abort = sctp_make_violation_max_retrans(asoc, chunk);
631 		if (abort)
632 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
633 					SCTP_CHUNK(abort));
634 	}
635 
636 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
637 			SCTP_STATE(SCTP_STATE_CLOSED));
638 
639 	/* SEND_FAILED sent later when cleaning up the association. */
640 	asoc->outqueue.error = error;
641 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
642 }
643 
644 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
645  * inside the cookie.  In reality, this is only used for INIT-ACK processing
646  * since all other cases use "temporary" associations and can do all
647  * their work in statefuns directly.
648  */
sctp_cmd_process_init(struct sctp_cmd_seq * commands,struct sctp_association * asoc,struct sctp_chunk * chunk,struct sctp_init_chunk * peer_init,gfp_t gfp)649 static int sctp_cmd_process_init(struct sctp_cmd_seq *commands,
650 				 struct sctp_association *asoc,
651 				 struct sctp_chunk *chunk,
652 				 struct sctp_init_chunk *peer_init,
653 				 gfp_t gfp)
654 {
655 	int error;
656 
657 	/* We only process the init as a sideeffect in a single
658 	 * case.   This is when we process the INIT-ACK.   If we
659 	 * fail during INIT processing (due to malloc problems),
660 	 * just return the error and stop processing the stack.
661 	 */
662 	if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
663 		error = -ENOMEM;
664 	else
665 		error = 0;
666 
667 	return error;
668 }
669 
670 /* Helper function to break out starting up of heartbeat timers.  */
sctp_cmd_hb_timers_start(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)671 static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds,
672 				     struct sctp_association *asoc)
673 {
674 	struct sctp_transport *t;
675 
676 	/* Start a heartbeat timer for each transport on the association.
677 	 * hold a reference on the transport to make sure none of
678 	 * the needed data structures go away.
679 	 */
680 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
681 		sctp_transport_reset_hb_timer(t);
682 }
683 
sctp_cmd_hb_timers_stop(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)684 static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
685 				    struct sctp_association *asoc)
686 {
687 	struct sctp_transport *t;
688 
689 	/* Stop all heartbeat timers. */
690 
691 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
692 			transports) {
693 		if (del_timer(&t->hb_timer))
694 			sctp_transport_put(t);
695 	}
696 }
697 
698 /* Helper function to stop any pending T3-RTX timers */
sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)699 static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
700 					struct sctp_association *asoc)
701 {
702 	struct sctp_transport *t;
703 
704 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
705 			transports) {
706 		if (del_timer(&t->T3_rtx_timer))
707 			sctp_transport_put(t);
708 	}
709 }
710 
711 
712 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
sctp_cmd_transport_on(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_transport * t,struct sctp_chunk * chunk)713 static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds,
714 				  struct sctp_association *asoc,
715 				  struct sctp_transport *t,
716 				  struct sctp_chunk *chunk)
717 {
718 	struct sctp_sender_hb_info *hbinfo;
719 	int was_unconfirmed = 0;
720 
721 	/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
722 	 * HEARTBEAT should clear the error counter of the destination
723 	 * transport address to which the HEARTBEAT was sent.
724 	 */
725 	t->error_count = 0;
726 
727 	/*
728 	 * Although RFC4960 specifies that the overall error count must
729 	 * be cleared when a HEARTBEAT ACK is received, we make an
730 	 * exception while in SHUTDOWN PENDING. If the peer keeps its
731 	 * window shut forever, we may never be able to transmit our
732 	 * outstanding data and rely on the retransmission limit be reached
733 	 * to shutdown the association.
734 	 */
735 	if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
736 		t->asoc->overall_error_count = 0;
737 
738 	/* Clear the hb_sent flag to signal that we had a good
739 	 * acknowledgement.
740 	 */
741 	t->hb_sent = 0;
742 
743 	/* Mark the destination transport address as active if it is not so
744 	 * marked.
745 	 */
746 	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
747 		was_unconfirmed = 1;
748 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
749 					     SCTP_HEARTBEAT_SUCCESS);
750 	}
751 
752 	if (t->state == SCTP_PF)
753 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
754 					     SCTP_HEARTBEAT_SUCCESS);
755 
756 	/* HB-ACK was received for a the proper HB.  Consider this
757 	 * forward progress.
758 	 */
759 	if (t->dst)
760 		sctp_transport_dst_confirm(t);
761 
762 	/* The receiver of the HEARTBEAT ACK should also perform an
763 	 * RTT measurement for that destination transport address
764 	 * using the time value carried in the HEARTBEAT ACK chunk.
765 	 * If the transport's rto_pending variable has been cleared,
766 	 * it was most likely due to a retransmit.  However, we want
767 	 * to re-enable it to properly update the rto.
768 	 */
769 	if (t->rto_pending == 0)
770 		t->rto_pending = 1;
771 
772 	hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
773 	sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
774 
775 	/* Update the heartbeat timer.  */
776 	sctp_transport_reset_hb_timer(t);
777 
778 	if (was_unconfirmed && asoc->peer.transport_count == 1)
779 		sctp_transport_immediate_rtx(t);
780 }
781 
782 
783 /* Helper function to process the process SACK command.  */
sctp_cmd_process_sack(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)784 static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds,
785 				 struct sctp_association *asoc,
786 				 struct sctp_chunk *chunk)
787 {
788 	int err = 0;
789 
790 	if (sctp_outq_sack(&asoc->outqueue, chunk)) {
791 		struct net *net = sock_net(asoc->base.sk);
792 
793 		/* There are no more TSNs awaiting SACK.  */
794 		err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
795 				 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
796 				 asoc->state, asoc->ep, asoc, NULL,
797 				 GFP_ATOMIC);
798 	}
799 
800 	return err;
801 }
802 
803 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
804  * the transport for a shutdown chunk.
805  */
sctp_cmd_setup_t2(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)806 static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds,
807 			      struct sctp_association *asoc,
808 			      struct sctp_chunk *chunk)
809 {
810 	struct sctp_transport *t;
811 
812 	if (chunk->transport)
813 		t = chunk->transport;
814 	else {
815 		t = sctp_assoc_choose_alter_transport(asoc,
816 					      asoc->shutdown_last_sent_to);
817 		chunk->transport = t;
818 	}
819 	asoc->shutdown_last_sent_to = t;
820 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
821 }
822 
sctp_cmd_assoc_update(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_association * new)823 static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds,
824 				  struct sctp_association *asoc,
825 				  struct sctp_association *new)
826 {
827 	struct net *net = sock_net(asoc->base.sk);
828 	struct sctp_chunk *abort;
829 
830 	if (!sctp_assoc_update(asoc, new))
831 		return;
832 
833 	abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
834 	if (abort) {
835 		sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
836 		sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
837 	}
838 	sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
839 	sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
840 			SCTP_PERR(SCTP_ERROR_RSRC_LOW));
841 	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
842 	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
843 }
844 
845 /* Helper function to change the state of an association. */
sctp_cmd_new_state(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,enum sctp_state state)846 static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
847 			       struct sctp_association *asoc,
848 			       enum sctp_state state)
849 {
850 	struct sock *sk = asoc->base.sk;
851 
852 	asoc->state = state;
853 
854 	pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
855 
856 	if (sctp_style(sk, TCP)) {
857 		/* Change the sk->sk_state of a TCP-style socket that has
858 		 * successfully completed a connect() call.
859 		 */
860 		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
861 			sk->sk_state = SCTP_SS_ESTABLISHED;
862 
863 		/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
864 		if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
865 		    sctp_sstate(sk, ESTABLISHED)) {
866 			sk->sk_state = SCTP_SS_CLOSING;
867 			sk->sk_shutdown |= RCV_SHUTDOWN;
868 		}
869 	}
870 
871 	if (sctp_state(asoc, COOKIE_WAIT)) {
872 		/* Reset init timeouts since they may have been
873 		 * increased due to timer expirations.
874 		 */
875 		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
876 						asoc->rto_initial;
877 		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
878 						asoc->rto_initial;
879 	}
880 
881 	if (sctp_state(asoc, ESTABLISHED)) {
882 		kfree(asoc->peer.cookie);
883 		asoc->peer.cookie = NULL;
884 	}
885 
886 	if (sctp_state(asoc, ESTABLISHED) ||
887 	    sctp_state(asoc, CLOSED) ||
888 	    sctp_state(asoc, SHUTDOWN_RECEIVED)) {
889 		/* Wake up any processes waiting in the asoc's wait queue in
890 		 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
891 		 */
892 		if (waitqueue_active(&asoc->wait))
893 			wake_up_interruptible(&asoc->wait);
894 
895 		/* Wake up any processes waiting in the sk's sleep queue of
896 		 * a TCP-style or UDP-style peeled-off socket in
897 		 * sctp_wait_for_accept() or sctp_wait_for_packet().
898 		 * For a UDP-style socket, the waiters are woken up by the
899 		 * notifications.
900 		 */
901 		if (!sctp_style(sk, UDP))
902 			sk->sk_state_change(sk);
903 	}
904 
905 	if (sctp_state(asoc, SHUTDOWN_PENDING) &&
906 	    !sctp_outq_is_empty(&asoc->outqueue))
907 		sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
908 }
909 
910 /* Helper function to delete an association. */
sctp_cmd_delete_tcb(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)911 static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds,
912 				struct sctp_association *asoc)
913 {
914 	struct sock *sk = asoc->base.sk;
915 
916 	/* If it is a non-temporary association belonging to a TCP-style
917 	 * listening socket that is not closed, do not free it so that accept()
918 	 * can pick it up later.
919 	 */
920 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
921 	    (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
922 		return;
923 
924 	sctp_association_free(asoc);
925 }
926 
927 /*
928  * ADDIP Section 4.1 ASCONF Chunk Procedures
929  * A4) Start a T-4 RTO timer, using the RTO value of the selected
930  * destination address (we use active path instead of primary path just
931  * because primary path may be inactive.
932  */
sctp_cmd_setup_t4(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)933 static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds,
934 			      struct sctp_association *asoc,
935 			      struct sctp_chunk *chunk)
936 {
937 	struct sctp_transport *t;
938 
939 	t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
940 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
941 	chunk->transport = t;
942 }
943 
944 /* Process an incoming Operation Error Chunk. */
sctp_cmd_process_operr(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)945 static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
946 				   struct sctp_association *asoc,
947 				   struct sctp_chunk *chunk)
948 {
949 	struct sctp_errhdr *err_hdr;
950 	struct sctp_ulpevent *ev;
951 
952 	while (chunk->chunk_end > chunk->skb->data) {
953 		err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
954 
955 		ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
956 						     GFP_ATOMIC);
957 		if (!ev)
958 			return;
959 
960 		sctp_ulpq_tail_event(&asoc->ulpq, ev);
961 
962 		switch (err_hdr->cause) {
963 		case SCTP_ERROR_UNKNOWN_CHUNK:
964 		{
965 			struct sctp_chunkhdr *unk_chunk_hdr;
966 
967 			unk_chunk_hdr = (struct sctp_chunkhdr *)
968 							err_hdr->variable;
969 			switch (unk_chunk_hdr->type) {
970 			/* ADDIP 4.1 A9) If the peer responds to an ASCONF with
971 			 * an ERROR chunk reporting that it did not recognized
972 			 * the ASCONF chunk type, the sender of the ASCONF MUST
973 			 * NOT send any further ASCONF chunks and MUST stop its
974 			 * T-4 timer.
975 			 */
976 			case SCTP_CID_ASCONF:
977 				if (asoc->peer.asconf_capable == 0)
978 					break;
979 
980 				asoc->peer.asconf_capable = 0;
981 				sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
982 					SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
983 				break;
984 			default:
985 				break;
986 			}
987 			break;
988 		}
989 		default:
990 			break;
991 		}
992 	}
993 }
994 
995 /* Process variable FWDTSN chunk information. */
sctp_cmd_process_fwdtsn(struct sctp_ulpq * ulpq,struct sctp_chunk * chunk)996 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
997 				    struct sctp_chunk *chunk)
998 {
999 	struct sctp_fwdtsn_skip *skip;
1000 
1001 	/* Walk through all the skipped SSNs */
1002 	sctp_walk_fwdtsn(skip, chunk) {
1003 		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1004 	}
1005 }
1006 
1007 /* Helper function to remove the association non-primary peer
1008  * transports.
1009  */
sctp_cmd_del_non_primary(struct sctp_association * asoc)1010 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
1011 {
1012 	struct sctp_transport *t;
1013 	struct list_head *temp;
1014 	struct list_head *pos;
1015 
1016 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1017 		t = list_entry(pos, struct sctp_transport, transports);
1018 		if (!sctp_cmp_addr_exact(&t->ipaddr,
1019 					 &asoc->peer.primary_addr)) {
1020 			sctp_assoc_rm_peer(asoc, t);
1021 		}
1022 	}
1023 }
1024 
1025 /* Helper function to set sk_err on a 1-1 style socket. */
sctp_cmd_set_sk_err(struct sctp_association * asoc,int error)1026 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
1027 {
1028 	struct sock *sk = asoc->base.sk;
1029 
1030 	if (!sctp_style(sk, UDP))
1031 		sk->sk_err = error;
1032 }
1033 
1034 /* Helper function to generate an association change event */
sctp_cmd_assoc_change(struct sctp_cmd_seq * commands,struct sctp_association * asoc,u8 state)1035 static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
1036 				  struct sctp_association *asoc,
1037 				  u8 state)
1038 {
1039 	struct sctp_ulpevent *ev;
1040 
1041 	ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
1042 					    asoc->c.sinit_num_ostreams,
1043 					    asoc->c.sinit_max_instreams,
1044 					    NULL, GFP_ATOMIC);
1045 	if (ev)
1046 		sctp_ulpq_tail_event(&asoc->ulpq, ev);
1047 }
1048 
1049 /* Helper function to generate an adaptation indication event */
sctp_cmd_adaptation_ind(struct sctp_cmd_seq * commands,struct sctp_association * asoc)1050 static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
1051 				    struct sctp_association *asoc)
1052 {
1053 	struct sctp_ulpevent *ev;
1054 
1055 	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
1056 
1057 	if (ev)
1058 		sctp_ulpq_tail_event(&asoc->ulpq, ev);
1059 }
1060 
1061 
sctp_cmd_t1_timer_update(struct sctp_association * asoc,enum sctp_event_timeout timer,char * name)1062 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1063 				     enum sctp_event_timeout timer,
1064 				     char *name)
1065 {
1066 	struct sctp_transport *t;
1067 
1068 	t = asoc->init_last_sent_to;
1069 	asoc->init_err_counter++;
1070 
1071 	if (t->init_sent_count > (asoc->init_cycle + 1)) {
1072 		asoc->timeouts[timer] *= 2;
1073 		if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1074 			asoc->timeouts[timer] = asoc->max_init_timeo;
1075 		}
1076 		asoc->init_cycle++;
1077 
1078 		pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1079 			 " cycle:%d timeout:%ld\n", __func__, name,
1080 			 asoc->init_err_counter, asoc->init_cycle,
1081 			 asoc->timeouts[timer]);
1082 	}
1083 
1084 }
1085 
1086 /* Send the whole message, chunk by chunk, to the outqueue.
1087  * This way the whole message is queued up and bundling if
1088  * encouraged for small fragments.
1089  */
sctp_cmd_send_msg(struct sctp_association * asoc,struct sctp_datamsg * msg,gfp_t gfp)1090 static void sctp_cmd_send_msg(struct sctp_association *asoc,
1091 			      struct sctp_datamsg *msg, gfp_t gfp)
1092 {
1093 	struct sctp_chunk *chunk;
1094 
1095 	list_for_each_entry(chunk, &msg->chunks, frag_list)
1096 		sctp_outq_tail(&asoc->outqueue, chunk, gfp);
1097 }
1098 
1099 
1100 /* These three macros allow us to pull the debugging code out of the
1101  * main flow of sctp_do_sm() to keep attention focused on the real
1102  * functionality there.
1103  */
1104 #define debug_pre_sfn() \
1105 	pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1106 		 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype),   \
1107 		 asoc, sctp_state_tbl[state], state_fn->name)
1108 
1109 #define debug_post_sfn() \
1110 	pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1111 		 sctp_status_tbl[status])
1112 
1113 #define debug_post_sfx() \
1114 	pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1115 		 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1116 		 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1117 
1118 /*
1119  * This is the master state machine processing function.
1120  *
1121  * If you want to understand all of lksctp, this is a
1122  * good place to start.
1123  */
sctp_do_sm(struct net * net,enum sctp_event event_type,union sctp_subtype subtype,enum sctp_state state,struct sctp_endpoint * ep,struct sctp_association * asoc,void * event_arg,gfp_t gfp)1124 int sctp_do_sm(struct net *net, enum sctp_event event_type,
1125 	       union sctp_subtype subtype, enum sctp_state state,
1126 	       struct sctp_endpoint *ep, struct sctp_association *asoc,
1127 	       void *event_arg, gfp_t gfp)
1128 {
1129 	typedef const char *(printfn_t)(union sctp_subtype);
1130 	static printfn_t *table[] = {
1131 		NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1132 	};
1133 	printfn_t *debug_fn  __attribute__ ((unused)) = table[event_type];
1134 	const struct sctp_sm_table_entry *state_fn;
1135 	struct sctp_cmd_seq commands;
1136 	enum sctp_disposition status;
1137 	int error = 0;
1138 
1139 	/* Look up the state function, run it, and then process the
1140 	 * side effects.  These three steps are the heart of lksctp.
1141 	 */
1142 	state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1143 
1144 	sctp_init_cmd_seq(&commands);
1145 
1146 	debug_pre_sfn();
1147 	status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1148 	debug_post_sfn();
1149 
1150 	error = sctp_side_effects(event_type, subtype, state,
1151 				  ep, &asoc, event_arg, status,
1152 				  &commands, gfp);
1153 	debug_post_sfx();
1154 
1155 	return error;
1156 }
1157 
1158 /*****************************************************************
1159  * This the master state function side effect processing function.
1160  *****************************************************************/
sctp_side_effects(enum sctp_event event_type,union sctp_subtype subtype,enum sctp_state state,struct sctp_endpoint * ep,struct sctp_association ** asoc,void * event_arg,enum sctp_disposition status,struct sctp_cmd_seq * commands,gfp_t gfp)1161 static int sctp_side_effects(enum sctp_event event_type,
1162 			     union sctp_subtype subtype,
1163 			     enum sctp_state state,
1164 			     struct sctp_endpoint *ep,
1165 			     struct sctp_association **asoc,
1166 			     void *event_arg,
1167 			     enum sctp_disposition status,
1168 			     struct sctp_cmd_seq *commands,
1169 			     gfp_t gfp)
1170 {
1171 	int error;
1172 
1173 	/* FIXME - Most of the dispositions left today would be categorized
1174 	 * as "exceptional" dispositions.  For those dispositions, it
1175 	 * may not be proper to run through any of the commands at all.
1176 	 * For example, the command interpreter might be run only with
1177 	 * disposition SCTP_DISPOSITION_CONSUME.
1178 	 */
1179 	if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1180 					       ep, *asoc,
1181 					       event_arg, status,
1182 					       commands, gfp)))
1183 		goto bail;
1184 
1185 	switch (status) {
1186 	case SCTP_DISPOSITION_DISCARD:
1187 		pr_debug("%s: ignored sctp protocol event - state:%d, "
1188 			 "event_type:%d, event_id:%d\n", __func__, state,
1189 			 event_type, subtype.chunk);
1190 		break;
1191 
1192 	case SCTP_DISPOSITION_NOMEM:
1193 		/* We ran out of memory, so we need to discard this
1194 		 * packet.
1195 		 */
1196 		/* BUG--we should now recover some memory, probably by
1197 		 * reneging...
1198 		 */
1199 		error = -ENOMEM;
1200 		break;
1201 
1202 	case SCTP_DISPOSITION_DELETE_TCB:
1203 	case SCTP_DISPOSITION_ABORT:
1204 		/* This should now be a command. */
1205 		*asoc = NULL;
1206 		break;
1207 
1208 	case SCTP_DISPOSITION_CONSUME:
1209 		/*
1210 		 * We should no longer have much work to do here as the
1211 		 * real work has been done as explicit commands above.
1212 		 */
1213 		break;
1214 
1215 	case SCTP_DISPOSITION_VIOLATION:
1216 		net_err_ratelimited("protocol violation state %d chunkid %d\n",
1217 				    state, subtype.chunk);
1218 		break;
1219 
1220 	case SCTP_DISPOSITION_NOT_IMPL:
1221 		pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1222 			state, event_type, subtype.chunk);
1223 		break;
1224 
1225 	case SCTP_DISPOSITION_BUG:
1226 		pr_err("bug in state %d, event_type %d, event_id %d\n",
1227 		       state, event_type, subtype.chunk);
1228 		BUG();
1229 		break;
1230 
1231 	default:
1232 		pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1233 		       status, state, event_type, subtype.chunk);
1234 		BUG();
1235 		break;
1236 	}
1237 
1238 bail:
1239 	return error;
1240 }
1241 
1242 /********************************************************************
1243  * 2nd Level Abstractions
1244  ********************************************************************/
1245 
1246 /* This is the side-effect interpreter.  */
sctp_cmd_interpreter(enum sctp_event event_type,union sctp_subtype subtype,enum sctp_state state,struct sctp_endpoint * ep,struct sctp_association * asoc,void * event_arg,enum sctp_disposition status,struct sctp_cmd_seq * commands,gfp_t gfp)1247 static int sctp_cmd_interpreter(enum sctp_event event_type,
1248 				union sctp_subtype subtype,
1249 				enum sctp_state state,
1250 				struct sctp_endpoint *ep,
1251 				struct sctp_association *asoc,
1252 				void *event_arg,
1253 				enum sctp_disposition status,
1254 				struct sctp_cmd_seq *commands,
1255 				gfp_t gfp)
1256 {
1257 	struct sctp_sock *sp = sctp_sk(ep->base.sk);
1258 	struct sctp_chunk *chunk = NULL, *new_obj;
1259 	struct sctp_packet *packet;
1260 	struct sctp_sackhdr sackh;
1261 	struct timer_list *timer;
1262 	struct sctp_transport *t;
1263 	unsigned long timeout;
1264 	struct sctp_cmd *cmd;
1265 	int local_cork = 0;
1266 	int error = 0;
1267 	int force;
1268 
1269 	if (SCTP_EVENT_T_TIMEOUT != event_type)
1270 		chunk = event_arg;
1271 
1272 	/* Note:  This whole file is a huge candidate for rework.
1273 	 * For example, each command could either have its own handler, so
1274 	 * the loop would look like:
1275 	 *     while (cmds)
1276 	 *         cmd->handle(x, y, z)
1277 	 * --jgrimm
1278 	 */
1279 	while (NULL != (cmd = sctp_next_cmd(commands))) {
1280 		switch (cmd->verb) {
1281 		case SCTP_CMD_NOP:
1282 			/* Do nothing. */
1283 			break;
1284 
1285 		case SCTP_CMD_NEW_ASOC:
1286 			/* Register a new association.  */
1287 			if (local_cork) {
1288 				sctp_outq_uncork(&asoc->outqueue, gfp);
1289 				local_cork = 0;
1290 			}
1291 
1292 			/* Register with the endpoint.  */
1293 			asoc = cmd->obj.asoc;
1294 			BUG_ON(asoc->peer.primary_path == NULL);
1295 			sctp_endpoint_add_asoc(ep, asoc);
1296 			break;
1297 
1298 		case SCTP_CMD_UPDATE_ASSOC:
1299 		       sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc);
1300 		       break;
1301 
1302 		case SCTP_CMD_PURGE_OUTQUEUE:
1303 		       sctp_outq_teardown(&asoc->outqueue);
1304 		       break;
1305 
1306 		case SCTP_CMD_DELETE_TCB:
1307 			if (local_cork) {
1308 				sctp_outq_uncork(&asoc->outqueue, gfp);
1309 				local_cork = 0;
1310 			}
1311 			/* Delete the current association.  */
1312 			sctp_cmd_delete_tcb(commands, asoc);
1313 			asoc = NULL;
1314 			break;
1315 
1316 		case SCTP_CMD_NEW_STATE:
1317 			/* Enter a new state.  */
1318 			sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1319 			break;
1320 
1321 		case SCTP_CMD_REPORT_TSN:
1322 			/* Record the arrival of a TSN.  */
1323 			error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1324 						 cmd->obj.u32, NULL);
1325 			break;
1326 
1327 		case SCTP_CMD_REPORT_FWDTSN:
1328 			/* Move the Cumulattive TSN Ack ahead. */
1329 			sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1330 
1331 			/* purge the fragmentation queue */
1332 			sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1333 
1334 			/* Abort any in progress partial delivery. */
1335 			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1336 			break;
1337 
1338 		case SCTP_CMD_PROCESS_FWDTSN:
1339 			sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
1340 			break;
1341 
1342 		case SCTP_CMD_GEN_SACK:
1343 			/* Generate a Selective ACK.
1344 			 * The argument tells us whether to just count
1345 			 * the packet and MAYBE generate a SACK, or
1346 			 * force a SACK out.
1347 			 */
1348 			force = cmd->obj.i32;
1349 			error = sctp_gen_sack(asoc, force, commands);
1350 			break;
1351 
1352 		case SCTP_CMD_PROCESS_SACK:
1353 			/* Process an inbound SACK.  */
1354 			error = sctp_cmd_process_sack(commands, asoc,
1355 						      cmd->obj.chunk);
1356 			break;
1357 
1358 		case SCTP_CMD_GEN_INIT_ACK:
1359 			/* Generate an INIT ACK chunk.  */
1360 			new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1361 						     0);
1362 			if (!new_obj) {
1363 				error = -ENOMEM;
1364 				break;
1365 			}
1366 
1367 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1368 					SCTP_CHUNK(new_obj));
1369 			break;
1370 
1371 		case SCTP_CMD_PEER_INIT:
1372 			/* Process a unified INIT from the peer.
1373 			 * Note: Only used during INIT-ACK processing.  If
1374 			 * there is an error just return to the outter
1375 			 * layer which will bail.
1376 			 */
1377 			error = sctp_cmd_process_init(commands, asoc, chunk,
1378 						      cmd->obj.init, gfp);
1379 			break;
1380 
1381 		case SCTP_CMD_GEN_COOKIE_ECHO:
1382 			/* Generate a COOKIE ECHO chunk.  */
1383 			new_obj = sctp_make_cookie_echo(asoc, chunk);
1384 			if (!new_obj) {
1385 				if (cmd->obj.chunk)
1386 					sctp_chunk_free(cmd->obj.chunk);
1387 				error = -ENOMEM;
1388 				break;
1389 			}
1390 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1391 					SCTP_CHUNK(new_obj));
1392 
1393 			/* If there is an ERROR chunk to be sent along with
1394 			 * the COOKIE_ECHO, send it, too.
1395 			 */
1396 			if (cmd->obj.chunk)
1397 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1398 						SCTP_CHUNK(cmd->obj.chunk));
1399 
1400 			if (new_obj->transport) {
1401 				new_obj->transport->init_sent_count++;
1402 				asoc->init_last_sent_to = new_obj->transport;
1403 			}
1404 
1405 			/* FIXME - Eventually come up with a cleaner way to
1406 			 * enabling COOKIE-ECHO + DATA bundling during
1407 			 * multihoming stale cookie scenarios, the following
1408 			 * command plays with asoc->peer.retran_path to
1409 			 * avoid the problem of sending the COOKIE-ECHO and
1410 			 * DATA in different paths, which could result
1411 			 * in the association being ABORTed if the DATA chunk
1412 			 * is processed first by the server.  Checking the
1413 			 * init error counter simply causes this command
1414 			 * to be executed only during failed attempts of
1415 			 * association establishment.
1416 			 */
1417 			if ((asoc->peer.retran_path !=
1418 			     asoc->peer.primary_path) &&
1419 			    (asoc->init_err_counter > 0)) {
1420 				sctp_add_cmd_sf(commands,
1421 						SCTP_CMD_FORCE_PRIM_RETRAN,
1422 						SCTP_NULL());
1423 			}
1424 
1425 			break;
1426 
1427 		case SCTP_CMD_GEN_SHUTDOWN:
1428 			/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1429 			 * Reset error counts.
1430 			 */
1431 			asoc->overall_error_count = 0;
1432 
1433 			/* Generate a SHUTDOWN chunk.  */
1434 			new_obj = sctp_make_shutdown(asoc, chunk);
1435 			if (!new_obj) {
1436 				error = -ENOMEM;
1437 				break;
1438 			}
1439 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1440 					SCTP_CHUNK(new_obj));
1441 			break;
1442 
1443 		case SCTP_CMD_CHUNK_ULP:
1444 			/* Send a chunk to the sockets layer.  */
1445 			pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1446 				 __func__, cmd->obj.chunk, &asoc->ulpq);
1447 
1448 			sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
1449 					    GFP_ATOMIC);
1450 			break;
1451 
1452 		case SCTP_CMD_EVENT_ULP:
1453 			/* Send a notification to the sockets layer.  */
1454 			pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1455 				 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1456 
1457 			sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
1458 			break;
1459 
1460 		case SCTP_CMD_REPLY:
1461 			/* If an caller has not already corked, do cork. */
1462 			if (!asoc->outqueue.cork) {
1463 				sctp_outq_cork(&asoc->outqueue);
1464 				local_cork = 1;
1465 			}
1466 			/* Send a chunk to our peer.  */
1467 			sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
1468 			break;
1469 
1470 		case SCTP_CMD_SEND_PKT:
1471 			/* Send a full packet to our peer.  */
1472 			packet = cmd->obj.packet;
1473 			sctp_packet_transmit(packet, gfp);
1474 			sctp_ootb_pkt_free(packet);
1475 			break;
1476 
1477 		case SCTP_CMD_T1_RETRAN:
1478 			/* Mark a transport for retransmission.  */
1479 			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1480 					SCTP_RTXR_T1_RTX);
1481 			break;
1482 
1483 		case SCTP_CMD_RETRAN:
1484 			/* Mark a transport for retransmission.  */
1485 			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1486 					SCTP_RTXR_T3_RTX);
1487 			break;
1488 
1489 		case SCTP_CMD_ECN_CE:
1490 			/* Do delayed CE processing.   */
1491 			sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1492 			break;
1493 
1494 		case SCTP_CMD_ECN_ECNE:
1495 			/* Do delayed ECNE processing. */
1496 			new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1497 							chunk);
1498 			if (new_obj)
1499 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1500 						SCTP_CHUNK(new_obj));
1501 			break;
1502 
1503 		case SCTP_CMD_ECN_CWR:
1504 			/* Do delayed CWR processing.  */
1505 			sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1506 			break;
1507 
1508 		case SCTP_CMD_SETUP_T2:
1509 			sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1510 			break;
1511 
1512 		case SCTP_CMD_TIMER_START_ONCE:
1513 			timer = &asoc->timers[cmd->obj.to];
1514 
1515 			if (timer_pending(timer))
1516 				break;
1517 			/* fall through */
1518 
1519 		case SCTP_CMD_TIMER_START:
1520 			timer = &asoc->timers[cmd->obj.to];
1521 			timeout = asoc->timeouts[cmd->obj.to];
1522 			BUG_ON(!timeout);
1523 
1524 			timer->expires = jiffies + timeout;
1525 			sctp_association_hold(asoc);
1526 			add_timer(timer);
1527 			break;
1528 
1529 		case SCTP_CMD_TIMER_RESTART:
1530 			timer = &asoc->timers[cmd->obj.to];
1531 			timeout = asoc->timeouts[cmd->obj.to];
1532 			if (!mod_timer(timer, jiffies + timeout))
1533 				sctp_association_hold(asoc);
1534 			break;
1535 
1536 		case SCTP_CMD_TIMER_STOP:
1537 			timer = &asoc->timers[cmd->obj.to];
1538 			if (del_timer(timer))
1539 				sctp_association_put(asoc);
1540 			break;
1541 
1542 		case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1543 			chunk = cmd->obj.chunk;
1544 			t = sctp_assoc_choose_alter_transport(asoc,
1545 						asoc->init_last_sent_to);
1546 			asoc->init_last_sent_to = t;
1547 			chunk->transport = t;
1548 			t->init_sent_count++;
1549 			/* Set the new transport as primary */
1550 			sctp_assoc_set_primary(asoc, t);
1551 			break;
1552 
1553 		case SCTP_CMD_INIT_RESTART:
1554 			/* Do the needed accounting and updates
1555 			 * associated with restarting an initialization
1556 			 * timer. Only multiply the timeout by two if
1557 			 * all transports have been tried at the current
1558 			 * timeout.
1559 			 */
1560 			sctp_cmd_t1_timer_update(asoc,
1561 						SCTP_EVENT_TIMEOUT_T1_INIT,
1562 						"INIT");
1563 
1564 			sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1565 					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1566 			break;
1567 
1568 		case SCTP_CMD_COOKIEECHO_RESTART:
1569 			/* Do the needed accounting and updates
1570 			 * associated with restarting an initialization
1571 			 * timer. Only multiply the timeout by two if
1572 			 * all transports have been tried at the current
1573 			 * timeout.
1574 			 */
1575 			sctp_cmd_t1_timer_update(asoc,
1576 						SCTP_EVENT_TIMEOUT_T1_COOKIE,
1577 						"COOKIE");
1578 
1579 			/* If we've sent any data bundled with
1580 			 * COOKIE-ECHO we need to resend.
1581 			 */
1582 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
1583 					transports) {
1584 				sctp_retransmit_mark(&asoc->outqueue, t,
1585 					    SCTP_RTXR_T1_RTX);
1586 			}
1587 
1588 			sctp_add_cmd_sf(commands,
1589 					SCTP_CMD_TIMER_RESTART,
1590 					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1591 			break;
1592 
1593 		case SCTP_CMD_INIT_FAILED:
1594 			sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
1595 			break;
1596 
1597 		case SCTP_CMD_ASSOC_FAILED:
1598 			sctp_cmd_assoc_failed(commands, asoc, event_type,
1599 					      subtype, chunk, cmd->obj.u32);
1600 			break;
1601 
1602 		case SCTP_CMD_INIT_COUNTER_INC:
1603 			asoc->init_err_counter++;
1604 			break;
1605 
1606 		case SCTP_CMD_INIT_COUNTER_RESET:
1607 			asoc->init_err_counter = 0;
1608 			asoc->init_cycle = 0;
1609 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
1610 					    transports) {
1611 				t->init_sent_count = 0;
1612 			}
1613 			break;
1614 
1615 		case SCTP_CMD_REPORT_DUP:
1616 			sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1617 					     cmd->obj.u32);
1618 			break;
1619 
1620 		case SCTP_CMD_REPORT_BAD_TAG:
1621 			pr_debug("%s: vtag mismatch!\n", __func__);
1622 			break;
1623 
1624 		case SCTP_CMD_STRIKE:
1625 			/* Mark one strike against a transport.  */
1626 			sctp_do_8_2_transport_strike(commands, asoc,
1627 						    cmd->obj.transport, 0);
1628 			break;
1629 
1630 		case SCTP_CMD_TRANSPORT_IDLE:
1631 			t = cmd->obj.transport;
1632 			sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1633 			break;
1634 
1635 		case SCTP_CMD_TRANSPORT_HB_SENT:
1636 			t = cmd->obj.transport;
1637 			sctp_do_8_2_transport_strike(commands, asoc,
1638 						     t, 1);
1639 			t->hb_sent = 1;
1640 			break;
1641 
1642 		case SCTP_CMD_TRANSPORT_ON:
1643 			t = cmd->obj.transport;
1644 			sctp_cmd_transport_on(commands, asoc, t, chunk);
1645 			break;
1646 
1647 		case SCTP_CMD_HB_TIMERS_START:
1648 			sctp_cmd_hb_timers_start(commands, asoc);
1649 			break;
1650 
1651 		case SCTP_CMD_HB_TIMER_UPDATE:
1652 			t = cmd->obj.transport;
1653 			sctp_transport_reset_hb_timer(t);
1654 			break;
1655 
1656 		case SCTP_CMD_HB_TIMERS_STOP:
1657 			sctp_cmd_hb_timers_stop(commands, asoc);
1658 			break;
1659 
1660 		case SCTP_CMD_REPORT_ERROR:
1661 			error = cmd->obj.error;
1662 			break;
1663 
1664 		case SCTP_CMD_PROCESS_CTSN:
1665 			/* Dummy up a SACK for processing. */
1666 			sackh.cum_tsn_ack = cmd->obj.be32;
1667 			sackh.a_rwnd = htonl(asoc->peer.rwnd +
1668 					     asoc->outqueue.outstanding_bytes);
1669 			sackh.num_gap_ack_blocks = 0;
1670 			sackh.num_dup_tsns = 0;
1671 			chunk->subh.sack_hdr = &sackh;
1672 			sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1673 					SCTP_CHUNK(chunk));
1674 			break;
1675 
1676 		case SCTP_CMD_DISCARD_PACKET:
1677 			/* We need to discard the whole packet.
1678 			 * Uncork the queue since there might be
1679 			 * responses pending
1680 			 */
1681 			chunk->pdiscard = 1;
1682 			if (asoc) {
1683 				sctp_outq_uncork(&asoc->outqueue, gfp);
1684 				local_cork = 0;
1685 			}
1686 			break;
1687 
1688 		case SCTP_CMD_RTO_PENDING:
1689 			t = cmd->obj.transport;
1690 			t->rto_pending = 1;
1691 			break;
1692 
1693 		case SCTP_CMD_PART_DELIVER:
1694 			sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
1695 			break;
1696 
1697 		case SCTP_CMD_RENEGE:
1698 			sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
1699 					 GFP_ATOMIC);
1700 			break;
1701 
1702 		case SCTP_CMD_SETUP_T4:
1703 			sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1704 			break;
1705 
1706 		case SCTP_CMD_PROCESS_OPERR:
1707 			sctp_cmd_process_operr(commands, asoc, chunk);
1708 			break;
1709 		case SCTP_CMD_CLEAR_INIT_TAG:
1710 			asoc->peer.i.init_tag = 0;
1711 			break;
1712 		case SCTP_CMD_DEL_NON_PRIMARY:
1713 			sctp_cmd_del_non_primary(asoc);
1714 			break;
1715 		case SCTP_CMD_T3_RTX_TIMERS_STOP:
1716 			sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1717 			break;
1718 		case SCTP_CMD_FORCE_PRIM_RETRAN:
1719 			t = asoc->peer.retran_path;
1720 			asoc->peer.retran_path = asoc->peer.primary_path;
1721 			sctp_outq_uncork(&asoc->outqueue, gfp);
1722 			local_cork = 0;
1723 			asoc->peer.retran_path = t;
1724 			break;
1725 		case SCTP_CMD_SET_SK_ERR:
1726 			sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1727 			break;
1728 		case SCTP_CMD_ASSOC_CHANGE:
1729 			sctp_cmd_assoc_change(commands, asoc,
1730 					      cmd->obj.u8);
1731 			break;
1732 		case SCTP_CMD_ADAPTATION_IND:
1733 			sctp_cmd_adaptation_ind(commands, asoc);
1734 			break;
1735 
1736 		case SCTP_CMD_ASSOC_SHKEY:
1737 			error = sctp_auth_asoc_init_active_key(asoc,
1738 						GFP_ATOMIC);
1739 			break;
1740 		case SCTP_CMD_UPDATE_INITTAG:
1741 			asoc->peer.i.init_tag = cmd->obj.u32;
1742 			break;
1743 		case SCTP_CMD_SEND_MSG:
1744 			if (!asoc->outqueue.cork) {
1745 				sctp_outq_cork(&asoc->outqueue);
1746 				local_cork = 1;
1747 			}
1748 			sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1749 			break;
1750 		case SCTP_CMD_PURGE_ASCONF_QUEUE:
1751 			sctp_asconf_queue_teardown(asoc);
1752 			break;
1753 
1754 		case SCTP_CMD_SET_ASOC:
1755 			if (asoc && local_cork) {
1756 				sctp_outq_uncork(&asoc->outqueue, gfp);
1757 				local_cork = 0;
1758 			}
1759 			asoc = cmd->obj.asoc;
1760 			break;
1761 
1762 		default:
1763 			pr_warn("Impossible command: %u\n",
1764 				cmd->verb);
1765 			break;
1766 		}
1767 
1768 		if (error) {
1769 			cmd = sctp_next_cmd(commands);
1770 			while (cmd) {
1771 				if (cmd->verb == SCTP_CMD_REPLY)
1772 					sctp_chunk_free(cmd->obj.chunk);
1773 				cmd = sctp_next_cmd(commands);
1774 			}
1775 			break;
1776 		}
1777 	}
1778 
1779 	/* If this is in response to a received chunk, wait until
1780 	 * we are done with the packet to open the queue so that we don't
1781 	 * send multiple packets in response to a single request.
1782 	 */
1783 	if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1784 		if (chunk->end_of_packet || chunk->singleton)
1785 			sctp_outq_uncork(&asoc->outqueue, gfp);
1786 	} else if (local_cork)
1787 		sctp_outq_uncork(&asoc->outqueue, gfp);
1788 
1789 	if (sp->data_ready_signalled)
1790 		sp->data_ready_signalled = 0;
1791 
1792 	return error;
1793 }
1794 
1795