1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 *
7 * This file is part of the SCTP kernel implementation
8 *
9 * These functions work with the state functions in sctp_sm_statefuns.c
10 * to implement that state operations. These functions implement the
11 * steps which require modifying existing data structures.
12 *
13 * Please send any bug reports or fixes you make to the
14 * email address(es):
15 * lksctp developers <linux-sctp@vger.kernel.org>
16 *
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Jon Grimm <jgrimm@austin.ibm.com>
21 * Hui Huang <hui.huang@nokia.com>
22 * Dajiang Zhang <dajiang.zhang@nokia.com>
23 * Daisy Chang <daisyc@us.ibm.com>
24 * Sridhar Samudrala <sri@us.ibm.com>
25 * Ardelle Fan <ardelle.fan@intel.com>
26 */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #include <linux/skbuff.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/ip.h>
34 #include <linux/gfp.h>
35 #include <net/sock.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
39
40 static int sctp_cmd_interpreter(enum sctp_event_type event_type,
41 union sctp_subtype subtype,
42 enum sctp_state state,
43 struct sctp_endpoint *ep,
44 struct sctp_association *asoc,
45 void *event_arg,
46 enum sctp_disposition status,
47 struct sctp_cmd_seq *commands,
48 gfp_t gfp);
49 static int sctp_side_effects(enum sctp_event_type event_type,
50 union sctp_subtype subtype,
51 enum sctp_state state,
52 struct sctp_endpoint *ep,
53 struct sctp_association **asoc,
54 void *event_arg,
55 enum sctp_disposition status,
56 struct sctp_cmd_seq *commands,
57 gfp_t gfp);
58
59 /********************************************************************
60 * Helper functions
61 ********************************************************************/
62
63 /* A helper function for delayed processing of INET ECN CE bit. */
sctp_do_ecn_ce_work(struct sctp_association * asoc,__u32 lowest_tsn)64 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
65 __u32 lowest_tsn)
66 {
67 /* Save the TSN away for comparison when we receive CWR */
68
69 asoc->last_ecne_tsn = lowest_tsn;
70 asoc->need_ecne = 1;
71 }
72
73 /* Helper function for delayed processing of SCTP ECNE chunk. */
74 /* RFC 2960 Appendix A
75 *
76 * RFC 2481 details a specific bit for a sender to send in
77 * the header of its next outbound TCP segment to indicate to
78 * its peer that it has reduced its congestion window. This
79 * is termed the CWR bit. For SCTP the same indication is made
80 * by including the CWR chunk. This chunk contains one data
81 * element, i.e. the TSN number that was sent in the ECNE chunk.
82 * This element represents the lowest TSN number in the datagram
83 * that was originally marked with the CE bit.
84 */
sctp_do_ecn_ecne_work(struct sctp_association * asoc,__u32 lowest_tsn,struct sctp_chunk * chunk)85 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
86 __u32 lowest_tsn,
87 struct sctp_chunk *chunk)
88 {
89 struct sctp_chunk *repl;
90
91 /* Our previously transmitted packet ran into some congestion
92 * so we should take action by reducing cwnd and ssthresh
93 * and then ACK our peer that we we've done so by
94 * sending a CWR.
95 */
96
97 /* First, try to determine if we want to actually lower
98 * our cwnd variables. Only lower them if the ECNE looks more
99 * recent than the last response.
100 */
101 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
102 struct sctp_transport *transport;
103
104 /* Find which transport's congestion variables
105 * need to be adjusted.
106 */
107 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
108
109 /* Update the congestion variables. */
110 if (transport)
111 sctp_transport_lower_cwnd(transport,
112 SCTP_LOWER_CWND_ECNE);
113 asoc->last_cwr_tsn = lowest_tsn;
114 }
115
116 /* Always try to quiet the other end. In case of lost CWR,
117 * resend last_cwr_tsn.
118 */
119 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
120
121 /* If we run out of memory, it will look like a lost CWR. We'll
122 * get back in sync eventually.
123 */
124 return repl;
125 }
126
127 /* Helper function to do delayed processing of ECN CWR chunk. */
sctp_do_ecn_cwr_work(struct sctp_association * asoc,__u32 lowest_tsn)128 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
129 __u32 lowest_tsn)
130 {
131 /* Turn off ECNE getting auto-prepended to every outgoing
132 * packet
133 */
134 asoc->need_ecne = 0;
135 }
136
137 /* Generate SACK if necessary. We call this at the end of a packet. */
sctp_gen_sack(struct sctp_association * asoc,int force,struct sctp_cmd_seq * commands)138 static int sctp_gen_sack(struct sctp_association *asoc, int force,
139 struct sctp_cmd_seq *commands)
140 {
141 struct sctp_transport *trans = asoc->peer.last_data_from;
142 __u32 ctsn, max_tsn_seen;
143 struct sctp_chunk *sack;
144 int error = 0;
145
146 if (force ||
147 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
148 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
149 asoc->peer.sack_needed = 1;
150
151 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
152 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
153
154 /* From 12.2 Parameters necessary per association (i.e. the TCB):
155 *
156 * Ack State : This flag indicates if the next received packet
157 * : is to be responded to with a SACK. ...
158 * : When DATA chunks are out of order, SACK's
159 * : are not delayed (see Section 6).
160 *
161 * [This is actually not mentioned in Section 6, but we
162 * implement it here anyway. --piggy]
163 */
164 if (max_tsn_seen != ctsn)
165 asoc->peer.sack_needed = 1;
166
167 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
168 *
169 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
170 * an acknowledgement SHOULD be generated for at least every
171 * second packet (not every second DATA chunk) received, and
172 * SHOULD be generated within 200 ms of the arrival of any
173 * unacknowledged DATA chunk. ...
174 */
175 if (!asoc->peer.sack_needed) {
176 asoc->peer.sack_cnt++;
177
178 /* Set the SACK delay timeout based on the
179 * SACK delay for the last transport
180 * data was received from, or the default
181 * for the association.
182 */
183 if (trans) {
184 /* We will need a SACK for the next packet. */
185 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
186 asoc->peer.sack_needed = 1;
187
188 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
189 trans->sackdelay;
190 } else {
191 /* We will need a SACK for the next packet. */
192 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
193 asoc->peer.sack_needed = 1;
194
195 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
196 asoc->sackdelay;
197 }
198
199 /* Restart the SACK timer. */
200 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
201 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
202 } else {
203 __u32 old_a_rwnd = asoc->a_rwnd;
204
205 asoc->a_rwnd = asoc->rwnd;
206 sack = sctp_make_sack(asoc);
207 if (!sack) {
208 asoc->a_rwnd = old_a_rwnd;
209 goto nomem;
210 }
211
212 asoc->peer.sack_needed = 0;
213 asoc->peer.sack_cnt = 0;
214
215 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
216
217 /* Stop the SACK timer. */
218 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
219 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
220 }
221
222 return error;
223 nomem:
224 error = -ENOMEM;
225 return error;
226 }
227
228 /* When the T3-RTX timer expires, it calls this function to create the
229 * relevant state machine event.
230 */
sctp_generate_t3_rtx_event(struct timer_list * t)231 void sctp_generate_t3_rtx_event(struct timer_list *t)
232 {
233 struct sctp_transport *transport =
234 from_timer(transport, t, T3_rtx_timer);
235 struct sctp_association *asoc = transport->asoc;
236 struct sock *sk = asoc->base.sk;
237 struct net *net = sock_net(sk);
238 int error;
239
240 /* Check whether a task is in the sock. */
241
242 bh_lock_sock(sk);
243 if (sock_owned_by_user(sk)) {
244 pr_debug("%s: sock is busy\n", __func__);
245
246 /* Try again later. */
247 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
248 sctp_transport_hold(transport);
249 goto out_unlock;
250 }
251
252 /* Run through the state machine. */
253 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
254 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
255 asoc->state,
256 asoc->ep, asoc,
257 transport, GFP_ATOMIC);
258
259 if (error)
260 sk->sk_err = -error;
261
262 out_unlock:
263 bh_unlock_sock(sk);
264 sctp_transport_put(transport);
265 }
266
267 /* This is a sa interface for producing timeout events. It works
268 * for timeouts which use the association as their parameter.
269 */
sctp_generate_timeout_event(struct sctp_association * asoc,enum sctp_event_timeout timeout_type)270 static void sctp_generate_timeout_event(struct sctp_association *asoc,
271 enum sctp_event_timeout timeout_type)
272 {
273 struct sock *sk = asoc->base.sk;
274 struct net *net = sock_net(sk);
275 int error = 0;
276
277 bh_lock_sock(sk);
278 if (sock_owned_by_user(sk)) {
279 pr_debug("%s: sock is busy: timer %d\n", __func__,
280 timeout_type);
281
282 /* Try again later. */
283 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
284 sctp_association_hold(asoc);
285 goto out_unlock;
286 }
287
288 /* Is this association really dead and just waiting around for
289 * the timer to let go of the reference?
290 */
291 if (asoc->base.dead)
292 goto out_unlock;
293
294 /* Run through the state machine. */
295 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
296 SCTP_ST_TIMEOUT(timeout_type),
297 asoc->state, asoc->ep, asoc,
298 (void *)timeout_type, GFP_ATOMIC);
299
300 if (error)
301 sk->sk_err = -error;
302
303 out_unlock:
304 bh_unlock_sock(sk);
305 sctp_association_put(asoc);
306 }
307
sctp_generate_t1_cookie_event(struct timer_list * t)308 static void sctp_generate_t1_cookie_event(struct timer_list *t)
309 {
310 struct sctp_association *asoc =
311 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
312
313 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
314 }
315
sctp_generate_t1_init_event(struct timer_list * t)316 static void sctp_generate_t1_init_event(struct timer_list *t)
317 {
318 struct sctp_association *asoc =
319 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
320
321 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
322 }
323
sctp_generate_t2_shutdown_event(struct timer_list * t)324 static void sctp_generate_t2_shutdown_event(struct timer_list *t)
325 {
326 struct sctp_association *asoc =
327 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
328
329 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
330 }
331
sctp_generate_t4_rto_event(struct timer_list * t)332 static void sctp_generate_t4_rto_event(struct timer_list *t)
333 {
334 struct sctp_association *asoc =
335 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
336
337 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
338 }
339
sctp_generate_t5_shutdown_guard_event(struct timer_list * t)340 static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t)
341 {
342 struct sctp_association *asoc =
343 from_timer(asoc, t,
344 timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]);
345
346 sctp_generate_timeout_event(asoc,
347 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
348
349 } /* sctp_generate_t5_shutdown_guard_event() */
350
sctp_generate_autoclose_event(struct timer_list * t)351 static void sctp_generate_autoclose_event(struct timer_list *t)
352 {
353 struct sctp_association *asoc =
354 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
355
356 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
357 }
358
359 /* Generate a heart beat event. If the sock is busy, reschedule. Make
360 * sure that the transport is still valid.
361 */
sctp_generate_heartbeat_event(struct timer_list * t)362 void sctp_generate_heartbeat_event(struct timer_list *t)
363 {
364 struct sctp_transport *transport = from_timer(transport, t, hb_timer);
365 struct sctp_association *asoc = transport->asoc;
366 struct sock *sk = asoc->base.sk;
367 struct net *net = sock_net(sk);
368 u32 elapsed, timeout;
369 int error = 0;
370
371 bh_lock_sock(sk);
372 if (sock_owned_by_user(sk)) {
373 pr_debug("%s: sock is busy\n", __func__);
374
375 /* Try again later. */
376 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
377 sctp_transport_hold(transport);
378 goto out_unlock;
379 }
380
381 /* Check if we should still send the heartbeat or reschedule */
382 elapsed = jiffies - transport->last_time_sent;
383 timeout = sctp_transport_timeout(transport);
384 if (elapsed < timeout) {
385 elapsed = timeout - elapsed;
386 if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
387 sctp_transport_hold(transport);
388 goto out_unlock;
389 }
390
391 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
392 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
393 asoc->state, asoc->ep, asoc,
394 transport, GFP_ATOMIC);
395
396 if (error)
397 sk->sk_err = -error;
398
399 out_unlock:
400 bh_unlock_sock(sk);
401 sctp_transport_put(transport);
402 }
403
404 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger
405 * the correct state machine transition that will close the association.
406 */
sctp_generate_proto_unreach_event(struct timer_list * t)407 void sctp_generate_proto_unreach_event(struct timer_list *t)
408 {
409 struct sctp_transport *transport =
410 from_timer(transport, t, proto_unreach_timer);
411 struct sctp_association *asoc = transport->asoc;
412 struct sock *sk = asoc->base.sk;
413 struct net *net = sock_net(sk);
414
415 bh_lock_sock(sk);
416 if (sock_owned_by_user(sk)) {
417 pr_debug("%s: sock is busy\n", __func__);
418
419 /* Try again later. */
420 if (!mod_timer(&transport->proto_unreach_timer,
421 jiffies + (HZ/20)))
422 sctp_transport_hold(transport);
423 goto out_unlock;
424 }
425
426 /* Is this structure just waiting around for us to actually
427 * get destroyed?
428 */
429 if (asoc->base.dead)
430 goto out_unlock;
431
432 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
433 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
434 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
435
436 out_unlock:
437 bh_unlock_sock(sk);
438 sctp_transport_put(transport);
439 }
440
441 /* Handle the timeout of the RE-CONFIG timer. */
sctp_generate_reconf_event(struct timer_list * t)442 void sctp_generate_reconf_event(struct timer_list *t)
443 {
444 struct sctp_transport *transport =
445 from_timer(transport, t, reconf_timer);
446 struct sctp_association *asoc = transport->asoc;
447 struct sock *sk = asoc->base.sk;
448 struct net *net = sock_net(sk);
449 int error = 0;
450
451 bh_lock_sock(sk);
452 if (sock_owned_by_user(sk)) {
453 pr_debug("%s: sock is busy\n", __func__);
454
455 /* Try again later. */
456 if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
457 sctp_transport_hold(transport);
458 goto out_unlock;
459 }
460
461 /* This happens when the response arrives after the timer is triggered. */
462 if (!asoc->strreset_chunk)
463 goto out_unlock;
464
465 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
466 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
467 asoc->state, asoc->ep, asoc,
468 transport, GFP_ATOMIC);
469
470 if (error)
471 sk->sk_err = -error;
472
473 out_unlock:
474 bh_unlock_sock(sk);
475 sctp_transport_put(transport);
476 }
477
478 /* Inject a SACK Timeout event into the state machine. */
sctp_generate_sack_event(struct timer_list * t)479 static void sctp_generate_sack_event(struct timer_list *t)
480 {
481 struct sctp_association *asoc =
482 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
483
484 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
485 }
486
487 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
488 [SCTP_EVENT_TIMEOUT_NONE] = NULL,
489 [SCTP_EVENT_TIMEOUT_T1_COOKIE] = sctp_generate_t1_cookie_event,
490 [SCTP_EVENT_TIMEOUT_T1_INIT] = sctp_generate_t1_init_event,
491 [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = sctp_generate_t2_shutdown_event,
492 [SCTP_EVENT_TIMEOUT_T3_RTX] = NULL,
493 [SCTP_EVENT_TIMEOUT_T4_RTO] = sctp_generate_t4_rto_event,
494 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] =
495 sctp_generate_t5_shutdown_guard_event,
496 [SCTP_EVENT_TIMEOUT_HEARTBEAT] = NULL,
497 [SCTP_EVENT_TIMEOUT_RECONF] = NULL,
498 [SCTP_EVENT_TIMEOUT_SACK] = sctp_generate_sack_event,
499 [SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sctp_generate_autoclose_event,
500 };
501
502
503 /* RFC 2960 8.2 Path Failure Detection
504 *
505 * When its peer endpoint is multi-homed, an endpoint should keep a
506 * error counter for each of the destination transport addresses of the
507 * peer endpoint.
508 *
509 * Each time the T3-rtx timer expires on any address, or when a
510 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
511 * the error counter of that destination address will be incremented.
512 * When the value in the error counter exceeds the protocol parameter
513 * 'Path.Max.Retrans' of that destination address, the endpoint should
514 * mark the destination transport address as inactive, and a
515 * notification SHOULD be sent to the upper layer.
516 *
517 */
sctp_do_8_2_transport_strike(struct sctp_cmd_seq * commands,struct sctp_association * asoc,struct sctp_transport * transport,int is_hb)518 static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
519 struct sctp_association *asoc,
520 struct sctp_transport *transport,
521 int is_hb)
522 {
523 /* The check for association's overall error counter exceeding the
524 * threshold is done in the state function.
525 */
526 /* We are here due to a timer expiration. If the timer was
527 * not a HEARTBEAT, then normal error tracking is done.
528 * If the timer was a heartbeat, we only increment error counts
529 * when we already have an outstanding HEARTBEAT that has not
530 * been acknowledged.
531 * Additionally, some tranport states inhibit error increments.
532 */
533 if (!is_hb) {
534 asoc->overall_error_count++;
535 if (transport->state != SCTP_INACTIVE)
536 transport->error_count++;
537 } else if (transport->hb_sent) {
538 if (transport->state != SCTP_UNCONFIRMED)
539 asoc->overall_error_count++;
540 if (transport->state != SCTP_INACTIVE)
541 transport->error_count++;
542 }
543
544 /* If the transport error count is greater than the pf_retrans
545 * threshold, and less than pathmaxrtx, and if the current state
546 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
547 * see SCTP Quick Failover Draft, section 5.1
548 */
549 if (asoc->base.net->sctp.pf_enable &&
550 transport->state == SCTP_ACTIVE &&
551 transport->error_count < transport->pathmaxrxt &&
552 transport->error_count > transport->pf_retrans) {
553
554 sctp_assoc_control_transport(asoc, transport,
555 SCTP_TRANSPORT_PF,
556 0);
557
558 /* Update the hb timer to resend a heartbeat every rto */
559 sctp_transport_reset_hb_timer(transport);
560 }
561
562 if (transport->state != SCTP_INACTIVE &&
563 (transport->error_count > transport->pathmaxrxt)) {
564 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
565 __func__, asoc, &transport->ipaddr.sa);
566
567 sctp_assoc_control_transport(asoc, transport,
568 SCTP_TRANSPORT_DOWN,
569 SCTP_FAILED_THRESHOLD);
570 }
571
572 if (transport->error_count > transport->ps_retrans &&
573 asoc->peer.primary_path == transport &&
574 asoc->peer.active_path != transport)
575 sctp_assoc_set_primary(asoc, asoc->peer.active_path);
576
577 /* E2) For the destination address for which the timer
578 * expires, set RTO <- RTO * 2 ("back off the timer"). The
579 * maximum value discussed in rule C7 above (RTO.max) may be
580 * used to provide an upper bound to this doubling operation.
581 *
582 * Special Case: the first HB doesn't trigger exponential backoff.
583 * The first unacknowledged HB triggers it. We do this with a flag
584 * that indicates that we have an outstanding HB.
585 */
586 if (!is_hb || transport->hb_sent) {
587 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
588 sctp_max_rto(asoc, transport);
589 }
590 }
591
592 /* Worker routine to handle INIT command failure. */
sctp_cmd_init_failed(struct sctp_cmd_seq * commands,struct sctp_association * asoc,unsigned int error)593 static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
594 struct sctp_association *asoc,
595 unsigned int error)
596 {
597 struct sctp_ulpevent *event;
598
599 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
600 (__u16)error, 0, 0, NULL,
601 GFP_ATOMIC);
602
603 if (event)
604 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
605 SCTP_ULPEVENT(event));
606
607 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
608 SCTP_STATE(SCTP_STATE_CLOSED));
609
610 /* SEND_FAILED sent later when cleaning up the association. */
611 asoc->outqueue.error = error;
612 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
613 }
614
615 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
sctp_cmd_assoc_failed(struct sctp_cmd_seq * commands,struct sctp_association * asoc,enum sctp_event_type event_type,union sctp_subtype subtype,struct sctp_chunk * chunk,unsigned int error)616 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
617 struct sctp_association *asoc,
618 enum sctp_event_type event_type,
619 union sctp_subtype subtype,
620 struct sctp_chunk *chunk,
621 unsigned int error)
622 {
623 struct sctp_ulpevent *event;
624 struct sctp_chunk *abort;
625
626 /* Cancel any partial delivery in progress. */
627 asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
628
629 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
630 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
631 (__u16)error, 0, 0, chunk,
632 GFP_ATOMIC);
633 else
634 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
635 (__u16)error, 0, 0, NULL,
636 GFP_ATOMIC);
637 if (event)
638 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
639 SCTP_ULPEVENT(event));
640
641 if (asoc->overall_error_count >= asoc->max_retrans) {
642 abort = sctp_make_violation_max_retrans(asoc, chunk);
643 if (abort)
644 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
645 SCTP_CHUNK(abort));
646 }
647
648 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
649 SCTP_STATE(SCTP_STATE_CLOSED));
650
651 /* SEND_FAILED sent later when cleaning up the association. */
652 asoc->outqueue.error = error;
653 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
654 }
655
656 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
657 * inside the cookie. In reality, this is only used for INIT-ACK processing
658 * since all other cases use "temporary" associations and can do all
659 * their work in statefuns directly.
660 */
sctp_cmd_process_init(struct sctp_cmd_seq * commands,struct sctp_association * asoc,struct sctp_chunk * chunk,struct sctp_init_chunk * peer_init,gfp_t gfp)661 static int sctp_cmd_process_init(struct sctp_cmd_seq *commands,
662 struct sctp_association *asoc,
663 struct sctp_chunk *chunk,
664 struct sctp_init_chunk *peer_init,
665 gfp_t gfp)
666 {
667 int error;
668
669 /* We only process the init as a sideeffect in a single
670 * case. This is when we process the INIT-ACK. If we
671 * fail during INIT processing (due to malloc problems),
672 * just return the error and stop processing the stack.
673 */
674 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
675 error = -ENOMEM;
676 else
677 error = 0;
678
679 return error;
680 }
681
682 /* Helper function to break out starting up of heartbeat timers. */
sctp_cmd_hb_timers_start(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)683 static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds,
684 struct sctp_association *asoc)
685 {
686 struct sctp_transport *t;
687
688 /* Start a heartbeat timer for each transport on the association.
689 * hold a reference on the transport to make sure none of
690 * the needed data structures go away.
691 */
692 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
693 sctp_transport_reset_hb_timer(t);
694 }
695
sctp_cmd_hb_timers_stop(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)696 static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
697 struct sctp_association *asoc)
698 {
699 struct sctp_transport *t;
700
701 /* Stop all heartbeat timers. */
702
703 list_for_each_entry(t, &asoc->peer.transport_addr_list,
704 transports) {
705 if (del_timer(&t->hb_timer))
706 sctp_transport_put(t);
707 }
708 }
709
710 /* Helper function to stop any pending T3-RTX timers */
sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)711 static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
712 struct sctp_association *asoc)
713 {
714 struct sctp_transport *t;
715
716 list_for_each_entry(t, &asoc->peer.transport_addr_list,
717 transports) {
718 if (del_timer(&t->T3_rtx_timer))
719 sctp_transport_put(t);
720 }
721 }
722
723
724 /* Helper function to handle the reception of an HEARTBEAT ACK. */
sctp_cmd_transport_on(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_transport * t,struct sctp_chunk * chunk)725 static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds,
726 struct sctp_association *asoc,
727 struct sctp_transport *t,
728 struct sctp_chunk *chunk)
729 {
730 struct sctp_sender_hb_info *hbinfo;
731 int was_unconfirmed = 0;
732
733 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
734 * HEARTBEAT should clear the error counter of the destination
735 * transport address to which the HEARTBEAT was sent.
736 */
737 t->error_count = 0;
738
739 /*
740 * Although RFC4960 specifies that the overall error count must
741 * be cleared when a HEARTBEAT ACK is received, we make an
742 * exception while in SHUTDOWN PENDING. If the peer keeps its
743 * window shut forever, we may never be able to transmit our
744 * outstanding data and rely on the retransmission limit be reached
745 * to shutdown the association.
746 */
747 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
748 t->asoc->overall_error_count = 0;
749
750 /* Clear the hb_sent flag to signal that we had a good
751 * acknowledgement.
752 */
753 t->hb_sent = 0;
754
755 /* Mark the destination transport address as active if it is not so
756 * marked.
757 */
758 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
759 was_unconfirmed = 1;
760 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
761 SCTP_HEARTBEAT_SUCCESS);
762 }
763
764 if (t->state == SCTP_PF)
765 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
766 SCTP_HEARTBEAT_SUCCESS);
767
768 /* HB-ACK was received for a the proper HB. Consider this
769 * forward progress.
770 */
771 if (t->dst)
772 sctp_transport_dst_confirm(t);
773
774 /* The receiver of the HEARTBEAT ACK should also perform an
775 * RTT measurement for that destination transport address
776 * using the time value carried in the HEARTBEAT ACK chunk.
777 * If the transport's rto_pending variable has been cleared,
778 * it was most likely due to a retransmit. However, we want
779 * to re-enable it to properly update the rto.
780 */
781 if (t->rto_pending == 0)
782 t->rto_pending = 1;
783
784 hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
785 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
786
787 /* Update the heartbeat timer. */
788 sctp_transport_reset_hb_timer(t);
789
790 if (was_unconfirmed && asoc->peer.transport_count == 1)
791 sctp_transport_immediate_rtx(t);
792 }
793
794
795 /* Helper function to process the process SACK command. */
sctp_cmd_process_sack(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)796 static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds,
797 struct sctp_association *asoc,
798 struct sctp_chunk *chunk)
799 {
800 int err = 0;
801
802 if (sctp_outq_sack(&asoc->outqueue, chunk)) {
803 /* There are no more TSNs awaiting SACK. */
804 err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER,
805 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
806 asoc->state, asoc->ep, asoc, NULL,
807 GFP_ATOMIC);
808 }
809
810 return err;
811 }
812
813 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
814 * the transport for a shutdown chunk.
815 */
sctp_cmd_setup_t2(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)816 static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds,
817 struct sctp_association *asoc,
818 struct sctp_chunk *chunk)
819 {
820 struct sctp_transport *t;
821
822 if (chunk->transport)
823 t = chunk->transport;
824 else {
825 t = sctp_assoc_choose_alter_transport(asoc,
826 asoc->shutdown_last_sent_to);
827 chunk->transport = t;
828 }
829 asoc->shutdown_last_sent_to = t;
830 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
831 }
832
sctp_cmd_assoc_update(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_association * new)833 static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds,
834 struct sctp_association *asoc,
835 struct sctp_association *new)
836 {
837 struct net *net = asoc->base.net;
838 struct sctp_chunk *abort;
839
840 if (!sctp_assoc_update(asoc, new))
841 return;
842
843 abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
844 if (abort) {
845 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
846 sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
847 }
848 sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
849 sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
850 SCTP_PERR(SCTP_ERROR_RSRC_LOW));
851 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
852 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
853 }
854
855 /* Helper function to change the state of an association. */
sctp_cmd_new_state(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,enum sctp_state state)856 static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
857 struct sctp_association *asoc,
858 enum sctp_state state)
859 {
860 struct sock *sk = asoc->base.sk;
861
862 asoc->state = state;
863
864 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
865
866 if (sctp_style(sk, TCP)) {
867 /* Change the sk->sk_state of a TCP-style socket that has
868 * successfully completed a connect() call.
869 */
870 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
871 inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
872
873 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
874 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
875 sctp_sstate(sk, ESTABLISHED)) {
876 inet_sk_set_state(sk, SCTP_SS_CLOSING);
877 sk->sk_shutdown |= RCV_SHUTDOWN;
878 }
879 }
880
881 if (sctp_state(asoc, COOKIE_WAIT)) {
882 /* Reset init timeouts since they may have been
883 * increased due to timer expirations.
884 */
885 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
886 asoc->rto_initial;
887 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
888 asoc->rto_initial;
889 }
890
891 if (sctp_state(asoc, ESTABLISHED)) {
892 kfree(asoc->peer.cookie);
893 asoc->peer.cookie = NULL;
894 }
895
896 if (sctp_state(asoc, ESTABLISHED) ||
897 sctp_state(asoc, CLOSED) ||
898 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
899 /* Wake up any processes waiting in the asoc's wait queue in
900 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
901 */
902 if (waitqueue_active(&asoc->wait))
903 wake_up_interruptible(&asoc->wait);
904
905 /* Wake up any processes waiting in the sk's sleep queue of
906 * a TCP-style or UDP-style peeled-off socket in
907 * sctp_wait_for_accept() or sctp_wait_for_packet().
908 * For a UDP-style socket, the waiters are woken up by the
909 * notifications.
910 */
911 if (!sctp_style(sk, UDP))
912 sk->sk_state_change(sk);
913 }
914
915 if (sctp_state(asoc, SHUTDOWN_PENDING) &&
916 !sctp_outq_is_empty(&asoc->outqueue))
917 sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
918 }
919
920 /* Helper function to delete an association. */
sctp_cmd_delete_tcb(struct sctp_cmd_seq * cmds,struct sctp_association * asoc)921 static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds,
922 struct sctp_association *asoc)
923 {
924 struct sock *sk = asoc->base.sk;
925
926 /* If it is a non-temporary association belonging to a TCP-style
927 * listening socket that is not closed, do not free it so that accept()
928 * can pick it up later.
929 */
930 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
931 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
932 return;
933
934 sctp_association_free(asoc);
935 }
936
937 /*
938 * ADDIP Section 4.1 ASCONF Chunk Procedures
939 * A4) Start a T-4 RTO timer, using the RTO value of the selected
940 * destination address (we use active path instead of primary path just
941 * because primary path may be inactive.
942 */
sctp_cmd_setup_t4(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)943 static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds,
944 struct sctp_association *asoc,
945 struct sctp_chunk *chunk)
946 {
947 struct sctp_transport *t;
948
949 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
950 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
951 chunk->transport = t;
952 }
953
954 /* Process an incoming Operation Error Chunk. */
sctp_cmd_process_operr(struct sctp_cmd_seq * cmds,struct sctp_association * asoc,struct sctp_chunk * chunk)955 static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
956 struct sctp_association *asoc,
957 struct sctp_chunk *chunk)
958 {
959 struct sctp_errhdr *err_hdr;
960 struct sctp_ulpevent *ev;
961
962 while (chunk->chunk_end > chunk->skb->data) {
963 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
964
965 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
966 GFP_ATOMIC);
967 if (!ev)
968 return;
969
970 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
971
972 switch (err_hdr->cause) {
973 case SCTP_ERROR_UNKNOWN_CHUNK:
974 {
975 struct sctp_chunkhdr *unk_chunk_hdr;
976
977 unk_chunk_hdr = (struct sctp_chunkhdr *)
978 err_hdr->variable;
979 switch (unk_chunk_hdr->type) {
980 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
981 * an ERROR chunk reporting that it did not recognized
982 * the ASCONF chunk type, the sender of the ASCONF MUST
983 * NOT send any further ASCONF chunks and MUST stop its
984 * T-4 timer.
985 */
986 case SCTP_CID_ASCONF:
987 if (asoc->peer.asconf_capable == 0)
988 break;
989
990 asoc->peer.asconf_capable = 0;
991 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
992 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
993 break;
994 default:
995 break;
996 }
997 break;
998 }
999 default:
1000 break;
1001 }
1002 }
1003 }
1004
1005 /* Helper function to remove the association non-primary peer
1006 * transports.
1007 */
sctp_cmd_del_non_primary(struct sctp_association * asoc)1008 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
1009 {
1010 struct sctp_transport *t;
1011 struct list_head *temp;
1012 struct list_head *pos;
1013
1014 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1015 t = list_entry(pos, struct sctp_transport, transports);
1016 if (!sctp_cmp_addr_exact(&t->ipaddr,
1017 &asoc->peer.primary_addr)) {
1018 sctp_assoc_rm_peer(asoc, t);
1019 }
1020 }
1021 }
1022
1023 /* Helper function to set sk_err on a 1-1 style socket. */
sctp_cmd_set_sk_err(struct sctp_association * asoc,int error)1024 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
1025 {
1026 struct sock *sk = asoc->base.sk;
1027
1028 if (!sctp_style(sk, UDP))
1029 sk->sk_err = error;
1030 }
1031
1032 /* Helper function to generate an association change event */
sctp_cmd_assoc_change(struct sctp_cmd_seq * commands,struct sctp_association * asoc,u8 state)1033 static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
1034 struct sctp_association *asoc,
1035 u8 state)
1036 {
1037 struct sctp_ulpevent *ev;
1038
1039 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
1040 asoc->c.sinit_num_ostreams,
1041 asoc->c.sinit_max_instreams,
1042 NULL, GFP_ATOMIC);
1043 if (ev)
1044 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1045 }
1046
sctp_cmd_peer_no_auth(struct sctp_cmd_seq * commands,struct sctp_association * asoc)1047 static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands,
1048 struct sctp_association *asoc)
1049 {
1050 struct sctp_ulpevent *ev;
1051
1052 ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC);
1053 if (ev)
1054 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1055 }
1056
1057 /* Helper function to generate an adaptation indication event */
sctp_cmd_adaptation_ind(struct sctp_cmd_seq * commands,struct sctp_association * asoc)1058 static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
1059 struct sctp_association *asoc)
1060 {
1061 struct sctp_ulpevent *ev;
1062
1063 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
1064
1065 if (ev)
1066 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1067 }
1068
1069
sctp_cmd_t1_timer_update(struct sctp_association * asoc,enum sctp_event_timeout timer,char * name)1070 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1071 enum sctp_event_timeout timer,
1072 char *name)
1073 {
1074 struct sctp_transport *t;
1075
1076 t = asoc->init_last_sent_to;
1077 asoc->init_err_counter++;
1078
1079 if (t->init_sent_count > (asoc->init_cycle + 1)) {
1080 asoc->timeouts[timer] *= 2;
1081 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1082 asoc->timeouts[timer] = asoc->max_init_timeo;
1083 }
1084 asoc->init_cycle++;
1085
1086 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1087 " cycle:%d timeout:%ld\n", __func__, name,
1088 asoc->init_err_counter, asoc->init_cycle,
1089 asoc->timeouts[timer]);
1090 }
1091
1092 }
1093
1094 /* Send the whole message, chunk by chunk, to the outqueue.
1095 * This way the whole message is queued up and bundling if
1096 * encouraged for small fragments.
1097 */
sctp_cmd_send_msg(struct sctp_association * asoc,struct sctp_datamsg * msg,gfp_t gfp)1098 static void sctp_cmd_send_msg(struct sctp_association *asoc,
1099 struct sctp_datamsg *msg, gfp_t gfp)
1100 {
1101 struct sctp_chunk *chunk;
1102
1103 list_for_each_entry(chunk, &msg->chunks, frag_list)
1104 sctp_outq_tail(&asoc->outqueue, chunk, gfp);
1105
1106 asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
1107 }
1108
1109
1110 /* These three macros allow us to pull the debugging code out of the
1111 * main flow of sctp_do_sm() to keep attention focused on the real
1112 * functionality there.
1113 */
1114 #define debug_pre_sfn() \
1115 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1116 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
1117 asoc, sctp_state_tbl[state], state_fn->name)
1118
1119 #define debug_post_sfn() \
1120 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1121 sctp_status_tbl[status])
1122
1123 #define debug_post_sfx() \
1124 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1125 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1126 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1127
1128 /*
1129 * This is the master state machine processing function.
1130 *
1131 * If you want to understand all of lksctp, this is a
1132 * good place to start.
1133 */
sctp_do_sm(struct net * net,enum sctp_event_type event_type,union sctp_subtype subtype,enum sctp_state state,struct sctp_endpoint * ep,struct sctp_association * asoc,void * event_arg,gfp_t gfp)1134 int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
1135 union sctp_subtype subtype, enum sctp_state state,
1136 struct sctp_endpoint *ep, struct sctp_association *asoc,
1137 void *event_arg, gfp_t gfp)
1138 {
1139 typedef const char *(printfn_t)(union sctp_subtype);
1140 static printfn_t *table[] = {
1141 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1142 };
1143 printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
1144 const struct sctp_sm_table_entry *state_fn;
1145 struct sctp_cmd_seq commands;
1146 enum sctp_disposition status;
1147 int error = 0;
1148
1149 /* Look up the state function, run it, and then process the
1150 * side effects. These three steps are the heart of lksctp.
1151 */
1152 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1153
1154 sctp_init_cmd_seq(&commands);
1155
1156 debug_pre_sfn();
1157 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1158 debug_post_sfn();
1159
1160 error = sctp_side_effects(event_type, subtype, state,
1161 ep, &asoc, event_arg, status,
1162 &commands, gfp);
1163 debug_post_sfx();
1164
1165 return error;
1166 }
1167
1168 /*****************************************************************
1169 * This the master state function side effect processing function.
1170 *****************************************************************/
sctp_side_effects(enum sctp_event_type event_type,union sctp_subtype subtype,enum sctp_state state,struct sctp_endpoint * ep,struct sctp_association ** asoc,void * event_arg,enum sctp_disposition status,struct sctp_cmd_seq * commands,gfp_t gfp)1171 static int sctp_side_effects(enum sctp_event_type event_type,
1172 union sctp_subtype subtype,
1173 enum sctp_state state,
1174 struct sctp_endpoint *ep,
1175 struct sctp_association **asoc,
1176 void *event_arg,
1177 enum sctp_disposition status,
1178 struct sctp_cmd_seq *commands,
1179 gfp_t gfp)
1180 {
1181 int error;
1182
1183 /* FIXME - Most of the dispositions left today would be categorized
1184 * as "exceptional" dispositions. For those dispositions, it
1185 * may not be proper to run through any of the commands at all.
1186 * For example, the command interpreter might be run only with
1187 * disposition SCTP_DISPOSITION_CONSUME.
1188 */
1189 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1190 ep, *asoc,
1191 event_arg, status,
1192 commands, gfp)))
1193 goto bail;
1194
1195 switch (status) {
1196 case SCTP_DISPOSITION_DISCARD:
1197 pr_debug("%s: ignored sctp protocol event - state:%d, "
1198 "event_type:%d, event_id:%d\n", __func__, state,
1199 event_type, subtype.chunk);
1200 break;
1201
1202 case SCTP_DISPOSITION_NOMEM:
1203 /* We ran out of memory, so we need to discard this
1204 * packet.
1205 */
1206 /* BUG--we should now recover some memory, probably by
1207 * reneging...
1208 */
1209 error = -ENOMEM;
1210 break;
1211
1212 case SCTP_DISPOSITION_DELETE_TCB:
1213 case SCTP_DISPOSITION_ABORT:
1214 /* This should now be a command. */
1215 *asoc = NULL;
1216 break;
1217
1218 case SCTP_DISPOSITION_CONSUME:
1219 /*
1220 * We should no longer have much work to do here as the
1221 * real work has been done as explicit commands above.
1222 */
1223 break;
1224
1225 case SCTP_DISPOSITION_VIOLATION:
1226 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1227 state, subtype.chunk);
1228 break;
1229
1230 case SCTP_DISPOSITION_NOT_IMPL:
1231 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1232 state, event_type, subtype.chunk);
1233 break;
1234
1235 case SCTP_DISPOSITION_BUG:
1236 pr_err("bug in state %d, event_type %d, event_id %d\n",
1237 state, event_type, subtype.chunk);
1238 BUG();
1239 break;
1240
1241 default:
1242 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1243 status, state, event_type, subtype.chunk);
1244 BUG();
1245 break;
1246 }
1247
1248 bail:
1249 return error;
1250 }
1251
1252 /********************************************************************
1253 * 2nd Level Abstractions
1254 ********************************************************************/
1255
1256 /* This is the side-effect interpreter. */
sctp_cmd_interpreter(enum sctp_event_type event_type,union sctp_subtype subtype,enum sctp_state state,struct sctp_endpoint * ep,struct sctp_association * asoc,void * event_arg,enum sctp_disposition status,struct sctp_cmd_seq * commands,gfp_t gfp)1257 static int sctp_cmd_interpreter(enum sctp_event_type event_type,
1258 union sctp_subtype subtype,
1259 enum sctp_state state,
1260 struct sctp_endpoint *ep,
1261 struct sctp_association *asoc,
1262 void *event_arg,
1263 enum sctp_disposition status,
1264 struct sctp_cmd_seq *commands,
1265 gfp_t gfp)
1266 {
1267 struct sctp_sock *sp = sctp_sk(ep->base.sk);
1268 struct sctp_chunk *chunk = NULL, *new_obj;
1269 struct sctp_packet *packet;
1270 struct sctp_sackhdr sackh;
1271 struct timer_list *timer;
1272 struct sctp_transport *t;
1273 unsigned long timeout;
1274 struct sctp_cmd *cmd;
1275 int local_cork = 0;
1276 int error = 0;
1277 int force;
1278
1279 if (SCTP_EVENT_T_TIMEOUT != event_type)
1280 chunk = event_arg;
1281
1282 /* Note: This whole file is a huge candidate for rework.
1283 * For example, each command could either have its own handler, so
1284 * the loop would look like:
1285 * while (cmds)
1286 * cmd->handle(x, y, z)
1287 * --jgrimm
1288 */
1289 while (NULL != (cmd = sctp_next_cmd(commands))) {
1290 switch (cmd->verb) {
1291 case SCTP_CMD_NOP:
1292 /* Do nothing. */
1293 break;
1294
1295 case SCTP_CMD_NEW_ASOC:
1296 /* Register a new association. */
1297 if (local_cork) {
1298 sctp_outq_uncork(&asoc->outqueue, gfp);
1299 local_cork = 0;
1300 }
1301
1302 /* Register with the endpoint. */
1303 asoc = cmd->obj.asoc;
1304 BUG_ON(asoc->peer.primary_path == NULL);
1305 sctp_endpoint_add_asoc(ep, asoc);
1306 break;
1307
1308 case SCTP_CMD_UPDATE_ASSOC:
1309 sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc);
1310 break;
1311
1312 case SCTP_CMD_PURGE_OUTQUEUE:
1313 sctp_outq_teardown(&asoc->outqueue);
1314 break;
1315
1316 case SCTP_CMD_DELETE_TCB:
1317 if (local_cork) {
1318 sctp_outq_uncork(&asoc->outqueue, gfp);
1319 local_cork = 0;
1320 }
1321 /* Delete the current association. */
1322 sctp_cmd_delete_tcb(commands, asoc);
1323 asoc = NULL;
1324 break;
1325
1326 case SCTP_CMD_NEW_STATE:
1327 /* Enter a new state. */
1328 sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1329 break;
1330
1331 case SCTP_CMD_REPORT_TSN:
1332 /* Record the arrival of a TSN. */
1333 error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1334 cmd->obj.u32, NULL);
1335 break;
1336
1337 case SCTP_CMD_REPORT_FWDTSN:
1338 asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
1339 break;
1340
1341 case SCTP_CMD_PROCESS_FWDTSN:
1342 asoc->stream.si->handle_ftsn(&asoc->ulpq,
1343 cmd->obj.chunk);
1344 break;
1345
1346 case SCTP_CMD_GEN_SACK:
1347 /* Generate a Selective ACK.
1348 * The argument tells us whether to just count
1349 * the packet and MAYBE generate a SACK, or
1350 * force a SACK out.
1351 */
1352 force = cmd->obj.i32;
1353 error = sctp_gen_sack(asoc, force, commands);
1354 break;
1355
1356 case SCTP_CMD_PROCESS_SACK:
1357 /* Process an inbound SACK. */
1358 error = sctp_cmd_process_sack(commands, asoc,
1359 cmd->obj.chunk);
1360 break;
1361
1362 case SCTP_CMD_GEN_INIT_ACK:
1363 /* Generate an INIT ACK chunk. */
1364 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1365 0);
1366 if (!new_obj) {
1367 error = -ENOMEM;
1368 break;
1369 }
1370
1371 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1372 SCTP_CHUNK(new_obj));
1373 break;
1374
1375 case SCTP_CMD_PEER_INIT:
1376 /* Process a unified INIT from the peer.
1377 * Note: Only used during INIT-ACK processing. If
1378 * there is an error just return to the outter
1379 * layer which will bail.
1380 */
1381 error = sctp_cmd_process_init(commands, asoc, chunk,
1382 cmd->obj.init, gfp);
1383 break;
1384
1385 case SCTP_CMD_GEN_COOKIE_ECHO:
1386 /* Generate a COOKIE ECHO chunk. */
1387 new_obj = sctp_make_cookie_echo(asoc, chunk);
1388 if (!new_obj) {
1389 if (cmd->obj.chunk)
1390 sctp_chunk_free(cmd->obj.chunk);
1391 error = -ENOMEM;
1392 break;
1393 }
1394 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1395 SCTP_CHUNK(new_obj));
1396
1397 /* If there is an ERROR chunk to be sent along with
1398 * the COOKIE_ECHO, send it, too.
1399 */
1400 if (cmd->obj.chunk)
1401 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1402 SCTP_CHUNK(cmd->obj.chunk));
1403
1404 if (new_obj->transport) {
1405 new_obj->transport->init_sent_count++;
1406 asoc->init_last_sent_to = new_obj->transport;
1407 }
1408
1409 /* FIXME - Eventually come up with a cleaner way to
1410 * enabling COOKIE-ECHO + DATA bundling during
1411 * multihoming stale cookie scenarios, the following
1412 * command plays with asoc->peer.retran_path to
1413 * avoid the problem of sending the COOKIE-ECHO and
1414 * DATA in different paths, which could result
1415 * in the association being ABORTed if the DATA chunk
1416 * is processed first by the server. Checking the
1417 * init error counter simply causes this command
1418 * to be executed only during failed attempts of
1419 * association establishment.
1420 */
1421 if ((asoc->peer.retran_path !=
1422 asoc->peer.primary_path) &&
1423 (asoc->init_err_counter > 0)) {
1424 sctp_add_cmd_sf(commands,
1425 SCTP_CMD_FORCE_PRIM_RETRAN,
1426 SCTP_NULL());
1427 }
1428
1429 break;
1430
1431 case SCTP_CMD_GEN_SHUTDOWN:
1432 /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1433 * Reset error counts.
1434 */
1435 asoc->overall_error_count = 0;
1436
1437 /* Generate a SHUTDOWN chunk. */
1438 new_obj = sctp_make_shutdown(asoc, chunk);
1439 if (!new_obj) {
1440 error = -ENOMEM;
1441 break;
1442 }
1443 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1444 SCTP_CHUNK(new_obj));
1445 break;
1446
1447 case SCTP_CMD_CHUNK_ULP:
1448 /* Send a chunk to the sockets layer. */
1449 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1450 __func__, cmd->obj.chunk, &asoc->ulpq);
1451
1452 asoc->stream.si->ulpevent_data(&asoc->ulpq,
1453 cmd->obj.chunk,
1454 GFP_ATOMIC);
1455 break;
1456
1457 case SCTP_CMD_EVENT_ULP:
1458 /* Send a notification to the sockets layer. */
1459 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1460 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1461
1462 asoc->stream.si->enqueue_event(&asoc->ulpq,
1463 cmd->obj.ulpevent);
1464 break;
1465
1466 case SCTP_CMD_REPLY:
1467 /* If an caller has not already corked, do cork. */
1468 if (!asoc->outqueue.cork) {
1469 sctp_outq_cork(&asoc->outqueue);
1470 local_cork = 1;
1471 }
1472 /* Send a chunk to our peer. */
1473 sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
1474 break;
1475
1476 case SCTP_CMD_SEND_PKT:
1477 /* Send a full packet to our peer. */
1478 packet = cmd->obj.packet;
1479 sctp_packet_transmit(packet, gfp);
1480 sctp_ootb_pkt_free(packet);
1481 break;
1482
1483 case SCTP_CMD_T1_RETRAN:
1484 /* Mark a transport for retransmission. */
1485 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1486 SCTP_RTXR_T1_RTX);
1487 break;
1488
1489 case SCTP_CMD_RETRAN:
1490 /* Mark a transport for retransmission. */
1491 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1492 SCTP_RTXR_T3_RTX);
1493 break;
1494
1495 case SCTP_CMD_ECN_CE:
1496 /* Do delayed CE processing. */
1497 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1498 break;
1499
1500 case SCTP_CMD_ECN_ECNE:
1501 /* Do delayed ECNE processing. */
1502 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1503 chunk);
1504 if (new_obj)
1505 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1506 SCTP_CHUNK(new_obj));
1507 break;
1508
1509 case SCTP_CMD_ECN_CWR:
1510 /* Do delayed CWR processing. */
1511 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1512 break;
1513
1514 case SCTP_CMD_SETUP_T2:
1515 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1516 break;
1517
1518 case SCTP_CMD_TIMER_START_ONCE:
1519 timer = &asoc->timers[cmd->obj.to];
1520
1521 if (timer_pending(timer))
1522 break;
1523 fallthrough;
1524
1525 case SCTP_CMD_TIMER_START:
1526 timer = &asoc->timers[cmd->obj.to];
1527 timeout = asoc->timeouts[cmd->obj.to];
1528 BUG_ON(!timeout);
1529
1530 /*
1531 * SCTP has a hard time with timer starts. Because we process
1532 * timer starts as side effects, it can be hard to tell if we
1533 * have already started a timer or not, which leads to BUG
1534 * halts when we call add_timer. So here, instead of just starting
1535 * a timer, if the timer is already started, and just mod
1536 * the timer with the shorter of the two expiration times
1537 */
1538 if (!timer_pending(timer))
1539 sctp_association_hold(asoc);
1540 timer_reduce(timer, jiffies + timeout);
1541 break;
1542
1543 case SCTP_CMD_TIMER_RESTART:
1544 timer = &asoc->timers[cmd->obj.to];
1545 timeout = asoc->timeouts[cmd->obj.to];
1546 if (!mod_timer(timer, jiffies + timeout))
1547 sctp_association_hold(asoc);
1548 break;
1549
1550 case SCTP_CMD_TIMER_STOP:
1551 timer = &asoc->timers[cmd->obj.to];
1552 if (del_timer(timer))
1553 sctp_association_put(asoc);
1554 break;
1555
1556 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1557 chunk = cmd->obj.chunk;
1558 t = sctp_assoc_choose_alter_transport(asoc,
1559 asoc->init_last_sent_to);
1560 asoc->init_last_sent_to = t;
1561 chunk->transport = t;
1562 t->init_sent_count++;
1563 /* Set the new transport as primary */
1564 sctp_assoc_set_primary(asoc, t);
1565 break;
1566
1567 case SCTP_CMD_INIT_RESTART:
1568 /* Do the needed accounting and updates
1569 * associated with restarting an initialization
1570 * timer. Only multiply the timeout by two if
1571 * all transports have been tried at the current
1572 * timeout.
1573 */
1574 sctp_cmd_t1_timer_update(asoc,
1575 SCTP_EVENT_TIMEOUT_T1_INIT,
1576 "INIT");
1577
1578 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1579 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1580 break;
1581
1582 case SCTP_CMD_COOKIEECHO_RESTART:
1583 /* Do the needed accounting and updates
1584 * associated with restarting an initialization
1585 * timer. Only multiply the timeout by two if
1586 * all transports have been tried at the current
1587 * timeout.
1588 */
1589 sctp_cmd_t1_timer_update(asoc,
1590 SCTP_EVENT_TIMEOUT_T1_COOKIE,
1591 "COOKIE");
1592
1593 /* If we've sent any data bundled with
1594 * COOKIE-ECHO we need to resend.
1595 */
1596 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1597 transports) {
1598 sctp_retransmit_mark(&asoc->outqueue, t,
1599 SCTP_RTXR_T1_RTX);
1600 }
1601
1602 sctp_add_cmd_sf(commands,
1603 SCTP_CMD_TIMER_RESTART,
1604 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1605 break;
1606
1607 case SCTP_CMD_INIT_FAILED:
1608 sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
1609 break;
1610
1611 case SCTP_CMD_ASSOC_FAILED:
1612 sctp_cmd_assoc_failed(commands, asoc, event_type,
1613 subtype, chunk, cmd->obj.u16);
1614 break;
1615
1616 case SCTP_CMD_INIT_COUNTER_INC:
1617 asoc->init_err_counter++;
1618 break;
1619
1620 case SCTP_CMD_INIT_COUNTER_RESET:
1621 asoc->init_err_counter = 0;
1622 asoc->init_cycle = 0;
1623 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1624 transports) {
1625 t->init_sent_count = 0;
1626 }
1627 break;
1628
1629 case SCTP_CMD_REPORT_DUP:
1630 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1631 cmd->obj.u32);
1632 break;
1633
1634 case SCTP_CMD_REPORT_BAD_TAG:
1635 pr_debug("%s: vtag mismatch!\n", __func__);
1636 break;
1637
1638 case SCTP_CMD_STRIKE:
1639 /* Mark one strike against a transport. */
1640 sctp_do_8_2_transport_strike(commands, asoc,
1641 cmd->obj.transport, 0);
1642 break;
1643
1644 case SCTP_CMD_TRANSPORT_IDLE:
1645 t = cmd->obj.transport;
1646 sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1647 break;
1648
1649 case SCTP_CMD_TRANSPORT_HB_SENT:
1650 t = cmd->obj.transport;
1651 sctp_do_8_2_transport_strike(commands, asoc,
1652 t, 1);
1653 t->hb_sent = 1;
1654 break;
1655
1656 case SCTP_CMD_TRANSPORT_ON:
1657 t = cmd->obj.transport;
1658 sctp_cmd_transport_on(commands, asoc, t, chunk);
1659 break;
1660
1661 case SCTP_CMD_HB_TIMERS_START:
1662 sctp_cmd_hb_timers_start(commands, asoc);
1663 break;
1664
1665 case SCTP_CMD_HB_TIMER_UPDATE:
1666 t = cmd->obj.transport;
1667 sctp_transport_reset_hb_timer(t);
1668 break;
1669
1670 case SCTP_CMD_HB_TIMERS_STOP:
1671 sctp_cmd_hb_timers_stop(commands, asoc);
1672 break;
1673
1674 case SCTP_CMD_REPORT_ERROR:
1675 error = cmd->obj.error;
1676 break;
1677
1678 case SCTP_CMD_PROCESS_CTSN:
1679 /* Dummy up a SACK for processing. */
1680 sackh.cum_tsn_ack = cmd->obj.be32;
1681 sackh.a_rwnd = htonl(asoc->peer.rwnd +
1682 asoc->outqueue.outstanding_bytes);
1683 sackh.num_gap_ack_blocks = 0;
1684 sackh.num_dup_tsns = 0;
1685 chunk->subh.sack_hdr = &sackh;
1686 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1687 SCTP_CHUNK(chunk));
1688 break;
1689
1690 case SCTP_CMD_DISCARD_PACKET:
1691 /* We need to discard the whole packet.
1692 * Uncork the queue since there might be
1693 * responses pending
1694 */
1695 chunk->pdiscard = 1;
1696 if (asoc) {
1697 sctp_outq_uncork(&asoc->outqueue, gfp);
1698 local_cork = 0;
1699 }
1700 break;
1701
1702 case SCTP_CMD_RTO_PENDING:
1703 t = cmd->obj.transport;
1704 t->rto_pending = 1;
1705 break;
1706
1707 case SCTP_CMD_PART_DELIVER:
1708 asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
1709 break;
1710
1711 case SCTP_CMD_RENEGE:
1712 asoc->stream.si->renege_events(&asoc->ulpq,
1713 cmd->obj.chunk,
1714 GFP_ATOMIC);
1715 break;
1716
1717 case SCTP_CMD_SETUP_T4:
1718 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1719 break;
1720
1721 case SCTP_CMD_PROCESS_OPERR:
1722 sctp_cmd_process_operr(commands, asoc, chunk);
1723 break;
1724 case SCTP_CMD_CLEAR_INIT_TAG:
1725 asoc->peer.i.init_tag = 0;
1726 break;
1727 case SCTP_CMD_DEL_NON_PRIMARY:
1728 sctp_cmd_del_non_primary(asoc);
1729 break;
1730 case SCTP_CMD_T3_RTX_TIMERS_STOP:
1731 sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1732 break;
1733 case SCTP_CMD_FORCE_PRIM_RETRAN:
1734 t = asoc->peer.retran_path;
1735 asoc->peer.retran_path = asoc->peer.primary_path;
1736 sctp_outq_uncork(&asoc->outqueue, gfp);
1737 local_cork = 0;
1738 asoc->peer.retran_path = t;
1739 break;
1740 case SCTP_CMD_SET_SK_ERR:
1741 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1742 break;
1743 case SCTP_CMD_ASSOC_CHANGE:
1744 sctp_cmd_assoc_change(commands, asoc,
1745 cmd->obj.u8);
1746 break;
1747 case SCTP_CMD_ADAPTATION_IND:
1748 sctp_cmd_adaptation_ind(commands, asoc);
1749 break;
1750 case SCTP_CMD_PEER_NO_AUTH:
1751 sctp_cmd_peer_no_auth(commands, asoc);
1752 break;
1753
1754 case SCTP_CMD_ASSOC_SHKEY:
1755 error = sctp_auth_asoc_init_active_key(asoc,
1756 GFP_ATOMIC);
1757 break;
1758 case SCTP_CMD_UPDATE_INITTAG:
1759 asoc->peer.i.init_tag = cmd->obj.u32;
1760 break;
1761 case SCTP_CMD_SEND_MSG:
1762 if (!asoc->outqueue.cork) {
1763 sctp_outq_cork(&asoc->outqueue);
1764 local_cork = 1;
1765 }
1766 sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1767 break;
1768 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1769 sctp_asconf_queue_teardown(asoc);
1770 break;
1771
1772 case SCTP_CMD_SET_ASOC:
1773 if (asoc && local_cork) {
1774 sctp_outq_uncork(&asoc->outqueue, gfp);
1775 local_cork = 0;
1776 }
1777 asoc = cmd->obj.asoc;
1778 break;
1779
1780 default:
1781 pr_warn("Impossible command: %u\n",
1782 cmd->verb);
1783 break;
1784 }
1785
1786 if (error) {
1787 cmd = sctp_next_cmd(commands);
1788 while (cmd) {
1789 if (cmd->verb == SCTP_CMD_REPLY)
1790 sctp_chunk_free(cmd->obj.chunk);
1791 cmd = sctp_next_cmd(commands);
1792 }
1793 break;
1794 }
1795 }
1796
1797 /* If this is in response to a received chunk, wait until
1798 * we are done with the packet to open the queue so that we don't
1799 * send multiple packets in response to a single request.
1800 */
1801 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1802 if (chunk->end_of_packet || chunk->singleton)
1803 sctp_outq_uncork(&asoc->outqueue, gfp);
1804 } else if (local_cork)
1805 sctp_outq_uncork(&asoc->outqueue, gfp);
1806
1807 if (sp->data_ready_signalled)
1808 sp->data_ready_signalled = 0;
1809
1810 return error;
1811 }
1812