• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifdef __FreeBSD__
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 271230 2014-09-07 18:05:37Z tuexen $");
36 #endif
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_indata.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_timer.h>
51 #include <netinet/sctp_crc32.h>
52 #if defined(INET) || defined(INET6)
53 #if !defined(__Userspace_os_Windows)
54 #include <netinet/udp.h>
55 #endif
56 #endif
57 #if defined(__FreeBSD__)
58 #include <sys/smp.h>
59 #endif
60 
61 #if defined(__APPLE__)
62 #define APPLE_FILE_NO 2
63 #endif
64 
65 
66 static void
sctp_stop_all_cookie_timers(struct sctp_tcb * stcb)67 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
68 {
69 	struct sctp_nets *net;
70 
71 	/* This now not only stops all cookie timers
72 	 * it also stops any INIT timers as well. This
73 	 * will make sure that the timers are stopped in
74 	 * all collision cases.
75 	 */
76 	SCTP_TCB_LOCK_ASSERT(stcb);
77 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
78 		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
79 			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
80 					stcb->sctp_ep,
81 					stcb,
82 					net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_1);
83 		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
84 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
85 					stcb->sctp_ep,
86 					stcb,
87 					net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_2);
88 		}
89 	}
90 }
91 
92 /* INIT handler */
93 static void
sctp_handle_init(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_chunk * cp,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int * abort_no_unlock,uint8_t use_mflowid,uint32_t mflowid,uint32_t vrf_id,uint16_t port)94 sctp_handle_init(struct mbuf *m, int iphlen, int offset,
95                  struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
96                  struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
97                  struct sctp_tcb *stcb, int *abort_no_unlock,
98 #if defined(__FreeBSD__)
99                  uint8_t use_mflowid, uint32_t mflowid,
100 #endif
101                  uint32_t vrf_id, uint16_t port)
102 {
103 	struct sctp_init *init;
104 	struct mbuf *op_err;
105 
106 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
107 		(void *)stcb);
108 	if (stcb == NULL) {
109 		SCTP_INP_RLOCK(inp);
110 	}
111 	/* validate length */
112 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
113 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
114 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
115 #if defined(__FreeBSD__)
116 		                       use_mflowid, mflowid,
117 #endif
118 				       vrf_id, port);
119 		if (stcb)
120 			*abort_no_unlock = 1;
121 		goto outnow;
122 	}
123 	/* validate parameters */
124 	init = &cp->init;
125 	if (init->initiate_tag == 0) {
126 		/* protocol error... send abort */
127 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
128 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
129 #if defined(__FreeBSD__)
130 		                       use_mflowid, mflowid,
131 #endif
132 				       vrf_id, port);
133 		if (stcb)
134 			*abort_no_unlock = 1;
135 		goto outnow;
136 	}
137 	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
138 		/* invalid parameter... send abort */
139 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
140 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
141 #if defined(__FreeBSD__)
142 		                       use_mflowid, mflowid,
143 #endif
144 				       vrf_id, port);
145 		if (stcb)
146 			*abort_no_unlock = 1;
147 		goto outnow;
148 	}
149 	if (init->num_inbound_streams == 0) {
150 		/* protocol error... send abort */
151 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
152 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
153 #if defined(__FreeBSD__)
154 		                       use_mflowid, mflowid,
155 #endif
156 				       vrf_id, port);
157 		if (stcb)
158 			*abort_no_unlock = 1;
159 		goto outnow;
160 	}
161 	if (init->num_outbound_streams == 0) {
162 		/* protocol error... send abort */
163 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
164 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
165 #if defined(__FreeBSD__)
166 		                       use_mflowid, mflowid,
167 #endif
168 				       vrf_id, port);
169 		if (stcb)
170 			*abort_no_unlock = 1;
171 		goto outnow;
172 	}
173 	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
174 					   offset + ntohs(cp->ch.chunk_length))) {
175 		/* auth parameter(s) error... send abort */
176 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
177 		                             "Problem with AUTH parameters");
178 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
179 #if defined(__FreeBSD__)
180 		                       use_mflowid, mflowid,
181 #endif
182 		                       vrf_id, port);
183 		if (stcb)
184 			*abort_no_unlock = 1;
185 		goto outnow;
186 	}
187 	/* We are only accepting if we have a socket with positive so_qlimit.*/
188 	if ((stcb == NULL) &&
189 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
190 	     (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
191 	     (inp->sctp_socket == NULL) ||
192 	     (inp->sctp_socket->so_qlimit == 0))) {
193 		/*
194 		 * FIX ME ?? What about TCP model and we have a
195 		 * match/restart case? Actually no fix is needed.
196 		 * the lookup will always find the existing assoc so stcb
197 		 * would not be NULL. It may be questionable to do this
198 		 * since we COULD just send back the INIT-ACK and hope that
199 		 * the app did accept()'s by the time the COOKIE was sent. But
200 		 * there is a price to pay for COOKIE generation and I don't
201 		 * want to pay it on the chance that the app will actually do
202 		 * some accepts(). The App just looses and should NOT be in
203 		 * this state :-)
204 		 */
205 		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
206 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
207 			                             "No listener");
208 			sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
209 #if defined(__FreeBSD__)
210 			                use_mflowid, mflowid,
211 #endif
212 			                vrf_id, port);
213 		}
214 		goto outnow;
215 	}
216 	if ((stcb != NULL) &&
217 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
218 		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
219 		sctp_send_shutdown_ack(stcb, NULL);
220 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
221 	} else {
222 		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
223 		sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, src, dst,
224 		                       sh, cp,
225 #if defined(__FreeBSD__)
226 		                       use_mflowid, mflowid,
227 #endif
228 		                       vrf_id, port,
229 		                       ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
230 	}
231  outnow:
232 	if (stcb == NULL) {
233 		SCTP_INP_RUNLOCK(inp);
234 	}
235 }
236 
237 /*
238  * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
239  */
240 
241 int
sctp_is_there_unsent_data(struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)242 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
243 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
244 	SCTP_UNUSED
245 #endif
246 )
247 {
248 	int unsent_data = 0;
249 	unsigned int i;
250 	struct sctp_stream_queue_pending *sp;
251 	struct sctp_association *asoc;
252 
253 	/* This function returns the number of streams that have
254 	 * true unsent data on them. Note that as it looks through
255 	 * it will clean up any places that have old data that
256 	 * has been sent but left at top of stream queue.
257 	 */
258 	asoc = &stcb->asoc;
259 	SCTP_TCB_SEND_LOCK(stcb);
260 	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
261 		/* Check to see if some data queued */
262 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
263 			/*sa_ignore FREED_MEMORY*/
264 			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
265 			if (sp == NULL) {
266 				continue;
267 			}
268 			if ((sp->msg_is_complete) &&
269 			    (sp->length == 0)  &&
270 			    (sp->sender_all_done)) {
271 				/* We are doing differed cleanup. Last
272 				 * time through when we took all the data
273 				 * the sender_all_done was not set.
274 				 */
275 				if (sp->put_last_out == 0) {
276 					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
277 					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
278 					            sp->sender_all_done,
279 					            sp->length,
280 					            sp->msg_is_complete,
281 					            sp->put_last_out);
282 				}
283 				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
284 				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
285 				if (sp->net) {
286 					sctp_free_remote_addr(sp->net);
287 					sp->net = NULL;
288 				}
289 				if (sp->data) {
290 					sctp_m_freem(sp->data);
291 					sp->data = NULL;
292 				}
293 				sctp_free_a_strmoq(stcb, sp, so_locked);
294 			} else {
295 				unsent_data++;
296 				break;
297 			}
298 		}
299 	}
300 	SCTP_TCB_SEND_UNLOCK(stcb);
301 	return (unsent_data);
302 }
303 
304 static int
sctp_process_init(struct sctp_init_chunk * cp,struct sctp_tcb * stcb)305 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
306 {
307 	struct sctp_init *init;
308 	struct sctp_association *asoc;
309 	struct sctp_nets *lnet;
310 	unsigned int i;
311 
312 	init = &cp->init;
313 	asoc = &stcb->asoc;
314 	/* save off parameters */
315 	asoc->peer_vtag = ntohl(init->initiate_tag);
316 	asoc->peers_rwnd = ntohl(init->a_rwnd);
317 	/* init tsn's */
318 	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
319 
320 	if (!TAILQ_EMPTY(&asoc->nets)) {
321 		/* update any ssthresh's that may have a default */
322 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
323 			lnet->ssthresh = asoc->peers_rwnd;
324 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
325 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
326 			}
327 
328 		}
329 	}
330 	SCTP_TCB_SEND_LOCK(stcb);
331 	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
332 		unsigned int newcnt;
333 		struct sctp_stream_out *outs;
334 		struct sctp_stream_queue_pending *sp, *nsp;
335 		struct sctp_tmit_chunk *chk, *nchk;
336 
337 		/* abandon the upper streams */
338 		newcnt = ntohs(init->num_inbound_streams);
339 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
340 			if (chk->rec.data.stream_number >= newcnt) {
341 				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
342 				asoc->send_queue_cnt--;
343 				if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
344 					asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
345 #ifdef INVARIANTS
346 				} else {
347 					panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
348 #endif
349 				}
350 				if (chk->data != NULL) {
351 					sctp_free_bufspace(stcb, asoc, chk, 1);
352 					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
353 					                0, chk, SCTP_SO_NOT_LOCKED);
354 					if (chk->data) {
355 						sctp_m_freem(chk->data);
356 						chk->data = NULL;
357 					}
358 				}
359 				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
360 				/*sa_ignore FREED_MEMORY*/
361 			}
362 		}
363 		if (asoc->strmout) {
364 			for (i = newcnt; i < asoc->pre_open_streams; i++) {
365 				outs = &asoc->strmout[i];
366 				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
367 					TAILQ_REMOVE(&outs->outqueue, sp, next);
368 					asoc->stream_queue_cnt--;
369 					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
370 					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
371 					if (sp->data) {
372 						sctp_m_freem(sp->data);
373 						sp->data = NULL;
374 					}
375 					if (sp->net) {
376 						sctp_free_remote_addr(sp->net);
377 						sp->net = NULL;
378 					}
379 					/* Free the chunk */
380 					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
381 					/*sa_ignore FREED_MEMORY*/
382 				}
383 			}
384 		}
385 		/* cut back the count */
386 		asoc->pre_open_streams = newcnt;
387 	}
388 	SCTP_TCB_SEND_UNLOCK(stcb);
389 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
390 
391 	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
392 	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
393 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
394 		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
395 	}
396 	/* This is the next one we expect */
397 	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
398 
399 	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
400 	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
401 
402 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
403 	/* open the requested streams */
404 
405 	if (asoc->strmin != NULL) {
406 		/* Free the old ones */
407 		struct sctp_queued_to_read *ctl, *nctl;
408 
409 		for (i = 0; i < asoc->streamincnt; i++) {
410 			TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) {
411 				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
412 				sctp_free_remote_addr(ctl->whoFrom);
413 				ctl->whoFrom = NULL;
414 				sctp_m_freem(ctl->data);
415 				ctl->data = NULL;
416 				sctp_free_a_readq(stcb, ctl);
417 			}
418 		}
419 		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
420 	}
421 	if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
422 		asoc->streamincnt = ntohs(init->num_outbound_streams);
423 	} else {
424 		asoc->streamincnt = asoc->max_inbound_streams;
425 	}
426 	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
427 		    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
428 	if (asoc->strmin == NULL) {
429 		/* we didn't get memory for the streams! */
430 		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
431 		return (-1);
432 	}
433 	for (i = 0; i < asoc->streamincnt; i++) {
434 		asoc->strmin[i].stream_no = i;
435 		asoc->strmin[i].last_sequence_delivered = 0xffff;
436 		TAILQ_INIT(&asoc->strmin[i].inqueue);
437 		asoc->strmin[i].delivery_started = 0;
438 	}
439 	/*
440 	 * load_address_from_init will put the addresses into the
441 	 * association when the COOKIE is processed or the INIT-ACK is
442 	 * processed. Both types of COOKIE's existing and new call this
443 	 * routine. It will remove addresses that are no longer in the
444 	 * association (for the restarting case where addresses are
445 	 * removed). Up front when the INIT arrives we will discard it if it
446 	 * is a restart and new addresses have been added.
447 	 */
448 	/* sa_ignore MEMLEAK */
449 	return (0);
450 }
451 
452 /*
453  * INIT-ACK message processing/consumption returns value < 0 on error
454  */
455 static int
sctp_process_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t use_mflowid,uint32_t mflowid,uint32_t vrf_id)456 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
457                       struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
458                       struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
459                       struct sctp_nets *net, int *abort_no_unlock,
460 #if defined(__FreeBSD__)
461 		      uint8_t use_mflowid, uint32_t mflowid,
462 #endif
463                       uint32_t vrf_id)
464 {
465 	struct sctp_association *asoc;
466 	struct mbuf *op_err;
467 	int retval, abort_flag;
468 	uint32_t initack_limit;
469 	int nat_friendly = 0;
470 
471 	/* First verify that we have no illegal param's */
472 	abort_flag = 0;
473 
474 	op_err = sctp_arethere_unrecognized_parameters(m,
475 						       (offset + sizeof(struct sctp_init_chunk)),
476 						       &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
477 	if (abort_flag) {
478 		/* Send an abort and notify peer */
479 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
480 		*abort_no_unlock = 1;
481 		return (-1);
482 	}
483 	asoc = &stcb->asoc;
484 	asoc->peer_supports_nat = (uint8_t)nat_friendly;
485 	/* process the peer's parameters in the INIT-ACK */
486 	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
487 	if (retval < 0) {
488 		return (retval);
489 	}
490 	initack_limit = offset + ntohs(cp->ch.chunk_length);
491 	/* load all addresses */
492 	if ((retval = sctp_load_addresses_from_init(stcb, m,
493 	    (offset + sizeof(struct sctp_init_chunk)), initack_limit,
494 	    src, dst, NULL))) {
495 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
496 		                             "Problem with address parameters");
497 		SCTPDBG(SCTP_DEBUG_INPUT1,
498 			"Load addresses from INIT causes an abort %d\n",
499 			retval);
500 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
501 		                       src, dst, sh, op_err,
502 #if defined(__FreeBSD__)
503 		                       use_mflowid, mflowid,
504 #endif
505 		                       vrf_id, net->port);
506 		*abort_no_unlock = 1;
507 		return (-1);
508 	}
509 	/* if the peer doesn't support asconf, flush the asconf queue */
510 	if (asoc->asconf_supported == 0) {
511 		struct sctp_asconf_addr *param, *nparam;
512 
513 		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
514 			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
515 			SCTP_FREE(param, SCTP_M_ASC_ADDR);
516 		}
517 	}
518 
519 	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
520 	    stcb->asoc.local_hmacs);
521 	if (op_err) {
522 		sctp_queue_op_err(stcb, op_err);
523 		/* queuing will steal away the mbuf chain to the out queue */
524 		op_err = NULL;
525 	}
526 	/* extract the cookie and queue it to "echo" it back... */
527 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
528 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
529 			       stcb->asoc.overall_error_count,
530 			       0,
531 			       SCTP_FROM_SCTP_INPUT,
532 			       __LINE__);
533 	}
534 	stcb->asoc.overall_error_count = 0;
535 	net->error_count = 0;
536 
537 	/*
538 	 * Cancel the INIT timer, We do this first before queueing the
539 	 * cookie. We always cancel at the primary to assue that we are
540 	 * canceling the timer started by the INIT which always goes to the
541 	 * primary.
542 	 */
543 	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
544 	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT+SCTP_LOC_4);
545 
546 	/* calculate the RTO */
547 	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy,
548 				      SCTP_RTT_FROM_NON_DATA);
549 
550 	retval = sctp_send_cookie_echo(m, offset, stcb, net);
551 	if (retval < 0) {
552 		/*
553 		 * No cookie, we probably should send a op error. But in any
554 		 * case if there is no cookie in the INIT-ACK, we can
555 		 * abandon the peer, its broke.
556 		 */
557 		if (retval == -3) {
558 			/* We abort with an error of missing mandatory param */
559 			op_err = sctp_generate_cause(SCTP_CAUSE_MISSING_PARAM, "");
560 			if (op_err) {
561 				/*
562 				 * Expand beyond to include the mandatory
563 				 * param cookie
564 				 */
565 				struct sctp_inv_mandatory_param *mp;
566 
567 				SCTP_BUF_LEN(op_err) =
568 				    sizeof(struct sctp_inv_mandatory_param);
569 				mp = mtod(op_err,
570 				    struct sctp_inv_mandatory_param *);
571 				/* Subtract the reserved param */
572 				mp->length =
573 				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
574 				mp->num_param = htonl(1);
575 				mp->param = htons(SCTP_STATE_COOKIE);
576 				mp->resv = 0;
577 			}
578 			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
579 			                       src, dst, sh, op_err,
580 #if defined(__FreeBSD__)
581 			                       use_mflowid, mflowid,
582 #endif
583 			                       vrf_id, net->port);
584 			*abort_no_unlock = 1;
585 		}
586 		return (retval);
587 	}
588 
589 	return (0);
590 }
591 
592 static void
sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net)593 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
594     struct sctp_tcb *stcb, struct sctp_nets *net)
595 {
596 	union sctp_sockstore store;
597 	struct sctp_nets *r_net, *f_net;
598 	struct timeval tv;
599 	int req_prim = 0;
600 	uint16_t old_error_counter;
601 
602 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
603 		/* Invalid length */
604 		return;
605 	}
606 
607 	memset(&store, 0, sizeof(store));
608 	switch (cp->heartbeat.hb_info.addr_family) {
609 #ifdef INET
610 	case AF_INET:
611 		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
612 			store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
613 #ifdef HAVE_SIN_LEN
614 			store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
615 #endif
616 			store.sin.sin_port = stcb->rport;
617 			memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
618 			       sizeof(store.sin.sin_addr));
619 		} else {
620 			return;
621 		}
622 		break;
623 #endif
624 #ifdef INET6
625 	case AF_INET6:
626 		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
627 			store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
628 #ifdef HAVE_SIN6_LEN
629 			store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
630 #endif
631 			store.sin6.sin6_port = stcb->rport;
632 			memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
633 		} else {
634 			return;
635 		}
636 		break;
637 #endif
638 #if defined(__Userspace__)
639 	case AF_CONN:
640 		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
641 			store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
642 #ifdef HAVE_SCONN_LEN
643 			store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
644 #endif
645 			store.sconn.sconn_port = stcb->rport;
646 			memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
647 		} else {
648 			return;
649 		}
650 		break;
651 #endif
652 	default:
653 		return;
654 	}
655 	r_net = sctp_findnet(stcb, &store.sa);
656 	if (r_net == NULL) {
657 		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
658 		return;
659 	}
660 	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
661 	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
662 	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
663 		/*
664 		 * If the its a HB and it's random value is correct when can
665 		 * confirm the destination.
666 		 */
667 		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
668 		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
669 			stcb->asoc.primary_destination = r_net;
670 			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
671 			f_net = TAILQ_FIRST(&stcb->asoc.nets);
672 			if (f_net != r_net) {
673 				/* first one on the list is NOT the primary
674 				 * sctp_cmpaddr() is much more efficent if
675 				 * the primary is the first on the list, make it
676 				 * so.
677 				 */
678 				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
679 				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
680 			}
681 			req_prim = 1;
682 		}
683 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
684 		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
685 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
686 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
687 	}
688 	old_error_counter = r_net->error_count;
689 	r_net->error_count = 0;
690 	r_net->hb_responded = 1;
691 	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
692 	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
693 	/* Now lets do a RTO with this */
694 	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy,
695 					SCTP_RTT_FROM_NON_DATA);
696 	if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
697 		r_net->dest_state |= SCTP_ADDR_REACHABLE;
698 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
699 				0, (void *)r_net, SCTP_SO_NOT_LOCKED);
700 	}
701 	if (r_net->dest_state & SCTP_ADDR_PF) {
702 		r_net->dest_state &= ~SCTP_ADDR_PF;
703 		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
704 	}
705 	if (old_error_counter > 0) {
706 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
707 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
708 	}
709 	if (r_net == stcb->asoc.primary_destination) {
710 		if (stcb->asoc.alternate) {
711 			/* release the alternate, primary is good */
712 			sctp_free_remote_addr(stcb->asoc.alternate);
713 			stcb->asoc.alternate = NULL;
714 		}
715 	}
716 	/* Mobility adaptation */
717 	if (req_prim) {
718 		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
719 		                                 SCTP_MOBILITY_BASE) ||
720 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
721 		                                SCTP_MOBILITY_FASTHANDOFF)) &&
722 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
723 		                                SCTP_MOBILITY_PRIM_DELETED)) {
724 
725 			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER+SCTP_LOC_7);
726 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
727 					SCTP_MOBILITY_FASTHANDOFF)) {
728 				sctp_assoc_immediate_retrans(stcb,
729 					stcb->asoc.primary_destination);
730 			}
731 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
732 					SCTP_MOBILITY_BASE)) {
733 				sctp_move_chunks_from_net(stcb,
734 					stcb->asoc.deleted_primary);
735 			}
736 			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
737 					stcb->asoc.deleted_primary);
738 		}
739 	}
740 }
741 
742 static int
sctp_handle_nat_colliding_state(struct sctp_tcb * stcb)743 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
744 {
745 	/* return 0 means we want you to proceed with the abort
746 	 * non-zero means no abort processing
747 	*/
748 	struct sctpasochead *head;
749 
750 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
751 		/* generate a new vtag and send init */
752 		LIST_REMOVE(stcb, sctp_asocs);
753 		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
754 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
755 		/* put it in the bucket in the vtag hash of assoc's for the system */
756 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
757 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
758 		return (1);
759 	}
760 	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
761 		/* treat like a case where the cookie expired i.e.:
762 		* - dump current cookie.
763 		* - generate a new vtag.
764 		* - resend init.
765 		*/
766 		/* generate a new vtag and send init */
767 		LIST_REMOVE(stcb, sctp_asocs);
768 		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
769 		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
770 		sctp_stop_all_cookie_timers(stcb);
771 		sctp_toss_old_cookies(stcb, &stcb->asoc);
772 		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
773 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
774 		/* put it in the bucket in the vtag hash of assoc's for the system */
775 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
776 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
777 		return (1);
778 	}
779 	return (0);
780 }
781 
782 static int
sctp_handle_nat_missing_state(struct sctp_tcb * stcb,struct sctp_nets * net)783 sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
784 			      struct sctp_nets *net)
785 {
786 	/* return 0 means we want you to proceed with the abort
787 	 * non-zero means no abort processing
788 	 */
789 	if (stcb->asoc.auth_supported == 0) {
790 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
791 		return (0);
792 	}
793 	sctp_asconf_send_nat_state_update(stcb, net);
794 	return (1);
795 }
796 
797 
798 static void
sctp_handle_abort(struct sctp_abort_chunk * abort,struct sctp_tcb * stcb,struct sctp_nets * net)799 sctp_handle_abort(struct sctp_abort_chunk *abort,
800     struct sctp_tcb *stcb, struct sctp_nets *net)
801 {
802 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
803 	struct socket *so;
804 #endif
805 	uint16_t len;
806 	uint16_t error;
807 
808 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
809 	if (stcb == NULL)
810 		return;
811 
812 	len = ntohs(abort->ch.chunk_length);
813 	if (len > sizeof (struct sctp_chunkhdr)) {
814 		/* Need to check the cause codes for our
815 		 * two magic nat aborts which don't kill the assoc
816 		 * necessarily.
817 		 */
818 		struct sctp_missing_nat_state *natc;
819 
820 		natc = (struct sctp_missing_nat_state *)(abort + 1);
821 		error = ntohs(natc->cause);
822 		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
823 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
824 			                           abort->ch.chunk_flags);
825 			if (sctp_handle_nat_colliding_state(stcb)) {
826 				return;
827 			}
828 		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
829 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
830 			                           abort->ch.chunk_flags);
831 			if (sctp_handle_nat_missing_state(stcb, net)) {
832 				return;
833 			}
834 		}
835 	} else {
836 		error = 0;
837 	}
838 	/* stop any receive timers */
839 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_6);
840 	/* notify user of the abort and clean up... */
841 	sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
842 	/* free the tcb */
843 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
844 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
845 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
846 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
847 	}
848 #ifdef SCTP_ASOCLOG_OF_TSNS
849 	sctp_print_out_track_log(stcb);
850 #endif
851 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
852 	so = SCTP_INP_SO(stcb->sctp_ep);
853 	atomic_add_int(&stcb->asoc.refcnt, 1);
854 	SCTP_TCB_UNLOCK(stcb);
855 	SCTP_SOCKET_LOCK(so, 1);
856 	SCTP_TCB_LOCK(stcb);
857 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
858 #endif
859 	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
860 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
861 			      SCTP_FROM_SCTP_INPUT+SCTP_LOC_6);
862 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
863 	SCTP_SOCKET_UNLOCK(so, 1);
864 #endif
865 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
866 }
867 
868 static void
sctp_start_net_timers(struct sctp_tcb * stcb)869 sctp_start_net_timers(struct sctp_tcb *stcb)
870 {
871 	uint32_t cnt_hb_sent;
872 	struct sctp_nets *net;
873 
874 	cnt_hb_sent = 0;
875 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
876 		/* For each network start:
877 		 * 1) A pmtu timer.
878 		 * 2) A HB timer
879 		 * 3) If the dest in unconfirmed send
880 		 *    a hb as well if under max_hb_burst have
881 		 *    been sent.
882 		 */
883 		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
884 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
885 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
886 		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
887 			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
888 			cnt_hb_sent++;
889 		}
890 	}
891 	if (cnt_hb_sent) {
892 		sctp_chunk_output(stcb->sctp_ep, stcb,
893 				  SCTP_OUTPUT_FROM_COOKIE_ACK,
894 				  SCTP_SO_NOT_LOCKED);
895 	}
896 }
897 
898 
899 static void
sctp_handle_shutdown(struct sctp_shutdown_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_flag)900 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
901     struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
902 {
903 	struct sctp_association *asoc;
904 	int some_on_streamwheel;
905 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
906 	struct socket *so;
907 #endif
908 
909 	SCTPDBG(SCTP_DEBUG_INPUT2,
910 		"sctp_handle_shutdown: handling SHUTDOWN\n");
911 	if (stcb == NULL)
912 		return;
913 	asoc = &stcb->asoc;
914 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
915 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
916 		return;
917 	}
918 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
919 		/* Shutdown NOT the expected size */
920 		return;
921 	} else {
922 		sctp_update_acked(stcb, cp, abort_flag);
923 		if (*abort_flag) {
924 			return;
925 		}
926 	}
927 	if (asoc->control_pdapi) {
928 		/* With a normal shutdown
929 		 * we assume the end of last record.
930 		 */
931 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
932 		asoc->control_pdapi->end_added = 1;
933 		asoc->control_pdapi->pdapi_aborted = 1;
934 		asoc->control_pdapi = NULL;
935 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
936 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
937 		so = SCTP_INP_SO(stcb->sctp_ep);
938 		atomic_add_int(&stcb->asoc.refcnt, 1);
939 		SCTP_TCB_UNLOCK(stcb);
940 		SCTP_SOCKET_LOCK(so, 1);
941 		SCTP_TCB_LOCK(stcb);
942 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
943 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
944 			/* assoc was freed while we were unlocked */
945 			SCTP_SOCKET_UNLOCK(so, 1);
946 			return;
947 		}
948 #endif
949 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
950 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
951 		SCTP_SOCKET_UNLOCK(so, 1);
952 #endif
953 	}
954 	/* goto SHUTDOWN_RECEIVED state to block new requests */
955 	if (stcb->sctp_socket) {
956 		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
957 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
958 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
959 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
960 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
961 			/* notify upper layer that peer has initiated a shutdown */
962 			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
963 
964 			/* reset time */
965 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
966 		}
967 	}
968 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
969 		/*
970 		 * stop the shutdown timer, since we WILL move to
971 		 * SHUTDOWN-ACK-SENT.
972 		 */
973 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_8);
974 	}
975 	/* Now is there unsent data on a stream somewhere? */
976 	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
977 
978 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
979 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
980 	    some_on_streamwheel) {
981 		/* By returning we will push more data out */
982 		return;
983 	} else {
984 		/* no outstanding data to send, so move on... */
985 		/* send SHUTDOWN-ACK */
986 		/* move to SHUTDOWN-ACK-SENT state */
987 		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
988 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
989 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
990 		}
991 		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
992 		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
993 		sctp_stop_timers_for_shutdown(stcb);
994 		sctp_send_shutdown_ack(stcb, net);
995 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
996 				 stcb, net);
997 	}
998 }
999 
1000 static void
sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk * cp SCTP_UNUSED,struct sctp_tcb * stcb,struct sctp_nets * net)1001 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
1002                          struct sctp_tcb *stcb,
1003                          struct sctp_nets *net)
1004 {
1005 	struct sctp_association *asoc;
1006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1007 	struct socket *so;
1008 
1009 	so = SCTP_INP_SO(stcb->sctp_ep);
1010 #endif
1011 	SCTPDBG(SCTP_DEBUG_INPUT2,
1012 		"sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
1013 	if (stcb == NULL)
1014 		return;
1015 
1016 	asoc = &stcb->asoc;
1017 	/* process according to association state */
1018 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
1019 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
1020 		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
1021 		sctp_send_shutdown_complete(stcb, net, 1);
1022 		SCTP_TCB_UNLOCK(stcb);
1023 		return;
1024 	}
1025 	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
1026 	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1027 		/* unexpected SHUTDOWN-ACK... so ignore... */
1028 		SCTP_TCB_UNLOCK(stcb);
1029 		return;
1030 	}
1031 	if (asoc->control_pdapi) {
1032 		/* With a normal shutdown
1033 		 * we assume the end of last record.
1034 		 */
1035 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1036 		asoc->control_pdapi->end_added = 1;
1037 		asoc->control_pdapi->pdapi_aborted = 1;
1038 		asoc->control_pdapi = NULL;
1039 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1040 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1041 		atomic_add_int(&stcb->asoc.refcnt, 1);
1042 		SCTP_TCB_UNLOCK(stcb);
1043 		SCTP_SOCKET_LOCK(so, 1);
1044 		SCTP_TCB_LOCK(stcb);
1045 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1046 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1047 			/* assoc was freed while we were unlocked */
1048 			SCTP_SOCKET_UNLOCK(so, 1);
1049 			return;
1050 		}
1051 #endif
1052 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1053 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1054 		SCTP_SOCKET_UNLOCK(so, 1);
1055 #endif
1056 	}
1057 #ifdef INVARIANTS
1058 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
1059 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
1060 	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
1061 		panic("Queues are not empty when handling SHUTDOWN-ACK");
1062 	}
1063 #endif
1064 	/* stop the timer */
1065 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_9);
1066 	/* send SHUTDOWN-COMPLETE */
1067 	sctp_send_shutdown_complete(stcb, net, 0);
1068 	/* notify upper layer protocol */
1069 	if (stcb->sctp_socket) {
1070 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1071 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1072 			stcb->sctp_socket->so_snd.sb_cc = 0;
1073 		}
1074 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1075 	}
1076 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1077 	/* free the TCB but first save off the ep */
1078 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1079 	atomic_add_int(&stcb->asoc.refcnt, 1);
1080 	SCTP_TCB_UNLOCK(stcb);
1081 	SCTP_SOCKET_LOCK(so, 1);
1082 	SCTP_TCB_LOCK(stcb);
1083 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
1084 #endif
1085 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1086 			      SCTP_FROM_SCTP_INPUT+SCTP_LOC_10);
1087 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1088 	SCTP_SOCKET_UNLOCK(so, 1);
1089 #endif
1090 }
1091 
1092 /*
1093  * Skip past the param header and then we will find the chunk that caused the
1094  * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1095  * our peer must be broken.
1096  */
1097 static void
sctp_process_unrecog_chunk(struct sctp_tcb * stcb,struct sctp_paramhdr * phdr,struct sctp_nets * net)1098 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1099     struct sctp_nets *net)
1100 {
1101 	struct sctp_chunkhdr *chk;
1102 
1103 	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1104 	switch (chk->chunk_type) {
1105 	case SCTP_ASCONF_ACK:
1106 	case SCTP_ASCONF:
1107 		sctp_asconf_cleanup(stcb, net);
1108 		break;
1109 	case SCTP_FORWARD_CUM_TSN:
1110 		stcb->asoc.prsctp_supported = 0;
1111 		break;
1112 	default:
1113 		SCTPDBG(SCTP_DEBUG_INPUT2,
1114 			"Peer does not support chunk type %d(%x)??\n",
1115 			chk->chunk_type, (uint32_t) chk->chunk_type);
1116 		break;
1117 	}
1118 }
1119 
1120 /*
1121  * Skip past the param header and then we will find the param that caused the
1122  * problem.  There are a number of param's in a ASCONF OR the prsctp param
1123  * these will turn of specific features.
1124  * XXX: Is this the right thing to do?
1125  */
1126 static void
sctp_process_unrecog_param(struct sctp_tcb * stcb,struct sctp_paramhdr * phdr)1127 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1128 {
1129 	struct sctp_paramhdr *pbad;
1130 
1131 	pbad = phdr + 1;
1132 	switch (ntohs(pbad->param_type)) {
1133 		/* pr-sctp draft */
1134 	case SCTP_PRSCTP_SUPPORTED:
1135 		stcb->asoc.prsctp_supported = 0;
1136 		break;
1137 	case SCTP_SUPPORTED_CHUNK_EXT:
1138 		break;
1139 		/* draft-ietf-tsvwg-addip-sctp */
1140 	case SCTP_HAS_NAT_SUPPORT:
1141 	        stcb->asoc.peer_supports_nat = 0;
1142 	        break;
1143 	case SCTP_ADD_IP_ADDRESS:
1144 	case SCTP_DEL_IP_ADDRESS:
1145 	case SCTP_SET_PRIM_ADDR:
1146 		stcb->asoc.asconf_supported = 0;
1147 		break;
1148 	case SCTP_SUCCESS_REPORT:
1149 	case SCTP_ERROR_CAUSE_IND:
1150 		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1151 		SCTPDBG(SCTP_DEBUG_INPUT2,
1152 			"Turning off ASCONF to this strange peer\n");
1153 		stcb->asoc.asconf_supported = 0;
1154 		break;
1155 	default:
1156 		SCTPDBG(SCTP_DEBUG_INPUT2,
1157 			"Peer does not support param type %d(%x)??\n",
1158 			pbad->param_type, (uint32_t) pbad->param_type);
1159 		break;
1160 	}
1161 }
1162 
1163 static int
sctp_handle_error(struct sctp_chunkhdr * ch,struct sctp_tcb * stcb,struct sctp_nets * net)1164 sctp_handle_error(struct sctp_chunkhdr *ch,
1165     struct sctp_tcb *stcb, struct sctp_nets *net)
1166 {
1167 	int chklen;
1168 	struct sctp_paramhdr *phdr;
1169 	uint16_t error, error_type;
1170 	uint16_t error_len;
1171 	struct sctp_association *asoc;
1172 	int adjust;
1173 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1174 	struct socket *so;
1175 #endif
1176 
1177 	/* parse through all of the errors and process */
1178 	asoc = &stcb->asoc;
1179 	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1180 	    sizeof(struct sctp_chunkhdr));
1181 	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1182 	error = 0;
1183 	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1184 		/* Process an Error Cause */
1185 		error_type = ntohs(phdr->param_type);
1186 		error_len = ntohs(phdr->param_length);
1187 		if ((error_len > chklen) || (error_len == 0)) {
1188 			/* invalid param length for this param */
1189 			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1190 				chklen, error_len);
1191 			return (0);
1192 		}
1193 		if (error == 0) {
1194 			/* report the first error cause */
1195 			error = error_type;
1196 		}
1197 		switch (error_type) {
1198 		case SCTP_CAUSE_INVALID_STREAM:
1199 		case SCTP_CAUSE_MISSING_PARAM:
1200 		case SCTP_CAUSE_INVALID_PARAM:
1201 		case SCTP_CAUSE_NO_USER_DATA:
1202 			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1203 				error_type);
1204 			break;
1205 		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1206 		        SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1207 				ch->chunk_flags);
1208 			if (sctp_handle_nat_colliding_state(stcb)) {
1209 			  return (0);
1210 			}
1211 			break;
1212 		case SCTP_CAUSE_NAT_MISSING_STATE:
1213 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1214 			                           ch->chunk_flags);
1215 			if (sctp_handle_nat_missing_state(stcb, net)) {
1216 			  return (0);
1217 			}
1218 			break;
1219 		case SCTP_CAUSE_STALE_COOKIE:
1220 			/*
1221 			 * We only act if we have echoed a cookie and are
1222 			 * waiting.
1223 			 */
1224 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1225 				int *p;
1226 
1227 				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1228 				/* Save the time doubled */
1229 				asoc->cookie_preserve_req = ntohl(*p) << 1;
1230 				asoc->stale_cookie_count++;
1231 				if (asoc->stale_cookie_count >
1232 				    asoc->max_init_times) {
1233 					sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1234 					/* now free the asoc */
1235 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1236 					so = SCTP_INP_SO(stcb->sctp_ep);
1237 					atomic_add_int(&stcb->asoc.refcnt, 1);
1238 					SCTP_TCB_UNLOCK(stcb);
1239 					SCTP_SOCKET_LOCK(so, 1);
1240 					SCTP_TCB_LOCK(stcb);
1241 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1242 #endif
1243 					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1244 							      SCTP_FROM_SCTP_INPUT+SCTP_LOC_11);
1245 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1246 					SCTP_SOCKET_UNLOCK(so, 1);
1247 #endif
1248 					return (-1);
1249 				}
1250 				/* blast back to INIT state */
1251 				sctp_toss_old_cookies(stcb, &stcb->asoc);
1252 				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1253 				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1254 				sctp_stop_all_cookie_timers(stcb);
1255 				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1256 			}
1257 			break;
1258 		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1259 			/*
1260 			 * Nothing we can do here, we don't do hostname
1261 			 * addresses so if the peer does not like my IPv6
1262 			 * (or IPv4 for that matter) it does not matter. If
1263 			 * they don't support that type of address, they can
1264 			 * NOT possibly get that packet type... i.e. with no
1265 			 * IPv6 you can't recieve a IPv6 packet. so we can
1266 			 * safely ignore this one. If we ever added support
1267 			 * for HOSTNAME Addresses, then we would need to do
1268 			 * something here.
1269 			 */
1270 			break;
1271 		case SCTP_CAUSE_UNRECOG_CHUNK:
1272 			sctp_process_unrecog_chunk(stcb, phdr, net);
1273 			break;
1274 		case SCTP_CAUSE_UNRECOG_PARAM:
1275 			sctp_process_unrecog_param(stcb, phdr);
1276 			break;
1277 		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1278 			/*
1279 			 * We ignore this since the timer will drive out a
1280 			 * new cookie anyway and there timer will drive us
1281 			 * to send a SHUTDOWN_COMPLETE. We can't send one
1282 			 * here since we don't have their tag.
1283 			 */
1284 			break;
1285 		case SCTP_CAUSE_DELETING_LAST_ADDR:
1286 		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1287 		case SCTP_CAUSE_DELETING_SRC_ADDR:
1288 			/*
1289 			 * We should NOT get these here, but in a
1290 			 * ASCONF-ACK.
1291 			 */
1292 			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1293 				error_type);
1294 			break;
1295 		case SCTP_CAUSE_OUT_OF_RESC:
1296 			/*
1297 			 * And what, pray tell do we do with the fact that
1298 			 * the peer is out of resources? Not really sure we
1299 			 * could do anything but abort. I suspect this
1300 			 * should have came WITH an abort instead of in a
1301 			 * OP-ERROR.
1302 			 */
1303 			break;
1304 		default:
1305 			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1306 				error_type);
1307 			break;
1308 		}
1309 		adjust = SCTP_SIZE32(error_len);
1310 		chklen -= adjust;
1311 		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1312 	}
1313 	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED);
1314 	return (0);
1315 }
1316 
1317 static int
sctp_handle_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t use_mflowid,uint32_t mflowid,uint32_t vrf_id)1318 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1319                      struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1320                      struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1321                      struct sctp_nets *net, int *abort_no_unlock,
1322 #if defined(__FreeBSD__)
1323                      uint8_t use_mflowid, uint32_t mflowid,
1324 #endif
1325                      uint32_t vrf_id)
1326 {
1327 	struct sctp_init_ack *init_ack;
1328 	struct mbuf *op_err;
1329 
1330 	SCTPDBG(SCTP_DEBUG_INPUT2,
1331 		"sctp_handle_init_ack: handling INIT-ACK\n");
1332 
1333 	if (stcb == NULL) {
1334 		SCTPDBG(SCTP_DEBUG_INPUT2,
1335 			"sctp_handle_init_ack: TCB is null\n");
1336 		return (-1);
1337 	}
1338 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1339 		/* Invalid length */
1340 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1341 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1342 		                       src, dst, sh, op_err,
1343 #if defined(__FreeBSD__)
1344 		                       use_mflowid, mflowid,
1345 #endif
1346 		                       vrf_id, net->port);
1347 		*abort_no_unlock = 1;
1348 		return (-1);
1349 	}
1350 	init_ack = &cp->init;
1351 	/* validate parameters */
1352 	if (init_ack->initiate_tag == 0) {
1353 		/* protocol error... send an abort */
1354 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1355 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1356 		                       src, dst, sh, op_err,
1357 #if defined(__FreeBSD__)
1358 		                       use_mflowid, mflowid,
1359 #endif
1360 		                       vrf_id, net->port);
1361 		*abort_no_unlock = 1;
1362 		return (-1);
1363 	}
1364 	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1365 		/* protocol error... send an abort */
1366 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1367 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1368 		                       src, dst, sh, op_err,
1369 #if defined(__FreeBSD__)
1370 		                       use_mflowid, mflowid,
1371 #endif
1372 		                       vrf_id, net->port);
1373 		*abort_no_unlock = 1;
1374 		return (-1);
1375 	}
1376 	if (init_ack->num_inbound_streams == 0) {
1377 		/* protocol error... send an abort */
1378 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1379 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1380 		                       src, dst, sh, op_err,
1381 #if defined(__FreeBSD__)
1382 		                       use_mflowid, mflowid,
1383 #endif
1384 		                       vrf_id, net->port);
1385 		*abort_no_unlock = 1;
1386 		return (-1);
1387 	}
1388 	if (init_ack->num_outbound_streams == 0) {
1389 		/* protocol error... send an abort */
1390 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1391 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1392 		                       src, dst, sh, op_err,
1393 #if defined(__FreeBSD__)
1394 		                       use_mflowid, mflowid,
1395 #endif
1396 		                       vrf_id, net->port);
1397 		*abort_no_unlock = 1;
1398 		return (-1);
1399 	}
1400 	/* process according to association state... */
1401 	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1402 	case SCTP_STATE_COOKIE_WAIT:
1403 		/* this is the expected state for this chunk */
1404 		/* process the INIT-ACK parameters */
1405 		if (stcb->asoc.primary_destination->dest_state &
1406 		    SCTP_ADDR_UNCONFIRMED) {
1407 			/*
1408 			 * The primary is where we sent the INIT, we can
1409 			 * always consider it confirmed when the INIT-ACK is
1410 			 * returned. Do this before we load addresses
1411 			 * though.
1412 			 */
1413 			stcb->asoc.primary_destination->dest_state &=
1414 			    ~SCTP_ADDR_UNCONFIRMED;
1415 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1416 			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1417 		}
1418 		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1419 		                          net, abort_no_unlock,
1420 #if defined(__FreeBSD__)
1421 		                          use_mflowid, mflowid,
1422 #endif
1423 		                          vrf_id) < 0) {
1424 			/* error in parsing parameters */
1425 			return (-1);
1426 		}
1427 		/* update our state */
1428 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1429 		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1430 
1431 		/* reset the RTO calc */
1432 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1433 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1434 				       stcb->asoc.overall_error_count,
1435 				       0,
1436 				       SCTP_FROM_SCTP_INPUT,
1437 				       __LINE__);
1438 		}
1439 		stcb->asoc.overall_error_count = 0;
1440 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1441 		/*
1442 		 * collapse the init timer back in case of a exponential
1443 		 * backoff
1444 		 */
1445 		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1446 		    stcb, net);
1447 		/*
1448 		 * the send at the end of the inbound data processing will
1449 		 * cause the cookie to be sent
1450 		 */
1451 		break;
1452 	case SCTP_STATE_SHUTDOWN_SENT:
1453 		/* incorrect state... discard */
1454 		break;
1455 	case SCTP_STATE_COOKIE_ECHOED:
1456 		/* incorrect state... discard */
1457 		break;
1458 	case SCTP_STATE_OPEN:
1459 		/* incorrect state... discard */
1460 		break;
1461 	case SCTP_STATE_EMPTY:
1462 	case SCTP_STATE_INUSE:
1463 	default:
1464 		/* incorrect state... discard */
1465 		return (-1);
1466 		break;
1467 	}
1468 	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1469 	return (0);
1470 }
1471 
1472 static struct sctp_tcb *
1473 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1474     struct sockaddr *src, struct sockaddr *dst,
1475     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1476     struct sctp_inpcb *inp, struct sctp_nets **netp,
1477     struct sockaddr *init_src, int *notification,
1478     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1479 #if defined(__FreeBSD__)
1480     uint8_t use_mflowid, uint32_t mflowid,
1481 #endif
1482     uint32_t vrf_id, uint16_t port);
1483 
1484 
1485 /*
1486  * handle a state cookie for an existing association m: input packet mbuf
1487  * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1488  * "split" mbuf and the cookie signature does not exist offset: offset into
1489  * mbuf to the cookie-echo chunk
1490  */
1491 static struct sctp_tcb *
sctp_process_cookie_existing(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t use_mflowid,uint32_t mflowid,uint32_t vrf_id,uint16_t port)1492 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1493     struct sockaddr *src, struct sockaddr *dst,
1494     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1495     struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1496     struct sockaddr *init_src, int *notification,
1497     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1498 #if defined(__FreeBSD__)
1499     uint8_t use_mflowid, uint32_t mflowid,
1500 #endif
1501     uint32_t vrf_id, uint16_t port)
1502 {
1503 	struct sctp_association *asoc;
1504 	struct sctp_init_chunk *init_cp, init_buf;
1505 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1506 	struct sctp_nets *net;
1507 	struct mbuf *op_err;
1508 	int init_offset, initack_offset, i;
1509 	int retval;
1510 	int spec_flag = 0;
1511 	uint32_t how_indx;
1512 #if defined(SCTP_DETAILED_STR_STATS)
1513 	int j;
1514 #endif
1515 
1516 	net = *netp;
1517 	/* I know that the TCB is non-NULL from the caller */
1518 	asoc = &stcb->asoc;
1519 	for (how_indx = 0; how_indx  < sizeof(asoc->cookie_how); how_indx++) {
1520 		if (asoc->cookie_how[how_indx] == 0)
1521 			break;
1522 	}
1523 	if (how_indx < sizeof(asoc->cookie_how)) {
1524 		asoc->cookie_how[how_indx] = 1;
1525 	}
1526 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1527 		/* SHUTDOWN came in after sending INIT-ACK */
1528 		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1529 		op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1530 		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1531 #if defined(__FreeBSD__)
1532 		                   use_mflowid, mflowid,
1533 #endif
1534 		                   vrf_id, net->port);
1535 		if (how_indx < sizeof(asoc->cookie_how))
1536 			asoc->cookie_how[how_indx] = 2;
1537 		return (NULL);
1538 	}
1539 	/*
1540 	 * find and validate the INIT chunk in the cookie (peer's info) the
1541 	 * INIT should start after the cookie-echo header struct (chunk
1542 	 * header, state cookie header struct)
1543 	 */
1544 	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1545 
1546 	init_cp = (struct sctp_init_chunk *)
1547 		sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1548 			      (uint8_t *) & init_buf);
1549 	if (init_cp == NULL) {
1550 		/* could not pull a INIT chunk in cookie */
1551 		return (NULL);
1552 	}
1553 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1554 		return (NULL);
1555 	}
1556 	/*
1557 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1558 	 * INIT-ACK follows the INIT chunk
1559 	 */
1560 	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1561 	initack_cp = (struct sctp_init_ack_chunk *)
1562 		sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1563 			      (uint8_t *) & initack_buf);
1564 	if (initack_cp == NULL) {
1565 		/* could not pull INIT-ACK chunk in cookie */
1566 		return (NULL);
1567 	}
1568 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1569 		return (NULL);
1570 	}
1571 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1572 	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1573 		/*
1574 		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1575 		 * to get into the OPEN state
1576 		 */
1577 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1578 			/*-
1579 			 * Opps, this means that we somehow generated two vtag's
1580 			 * the same. I.e. we did:
1581 			 *  Us               Peer
1582 			 *   <---INIT(tag=a)------
1583 			 *   ----INIT-ACK(tag=t)-->
1584 			 *   ----INIT(tag=t)------> *1
1585 			 *   <---INIT-ACK(tag=a)---
1586                          *   <----CE(tag=t)------------- *2
1587 			 *
1588 			 * At point *1 we should be generating a different
1589 			 * tag t'. Which means we would throw away the CE and send
1590 			 * ours instead. Basically this is case C (throw away side).
1591 			 */
1592 			if (how_indx < sizeof(asoc->cookie_how))
1593 				asoc->cookie_how[how_indx] = 17;
1594 			return (NULL);
1595 
1596 		}
1597 		switch (SCTP_GET_STATE(asoc)) {
1598 			case SCTP_STATE_COOKIE_WAIT:
1599 			case SCTP_STATE_COOKIE_ECHOED:
1600 				/*
1601 				 * INIT was sent but got a COOKIE_ECHO with the
1602 				 * correct tags... just accept it...but we must
1603 				 * process the init so that we can make sure we
1604 				 * have the right seq no's.
1605 				 */
1606 				/* First we must process the INIT !! */
1607 				retval = sctp_process_init(init_cp, stcb);
1608 				if (retval < 0) {
1609 					if (how_indx < sizeof(asoc->cookie_how))
1610 						asoc->cookie_how[how_indx] = 3;
1611 					return (NULL);
1612 				}
1613 				/* we have already processed the INIT so no problem */
1614 				sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1615 						net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_12);
1616 				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_13);
1617 				/* update current state */
1618 				if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1619 					SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1620 				else
1621 					SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1622 
1623 				SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1624 				if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1625 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1626 							 stcb->sctp_ep, stcb, asoc->primary_destination);
1627 				}
1628 				SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1629 				sctp_stop_all_cookie_timers(stcb);
1630 				if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1631 				     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1632 				    (inp->sctp_socket->so_qlimit == 0)
1633 					) {
1634 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1635 					struct socket *so;
1636 #endif
1637 					/*
1638 					 * Here is where collision would go if we
1639 					 * did a connect() and instead got a
1640 					 * init/init-ack/cookie done before the
1641 					 * init-ack came back..
1642 					 */
1643 					stcb->sctp_ep->sctp_flags |=
1644 						SCTP_PCB_FLAGS_CONNECTED;
1645 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1646 					so = SCTP_INP_SO(stcb->sctp_ep);
1647 					atomic_add_int(&stcb->asoc.refcnt, 1);
1648 					SCTP_TCB_UNLOCK(stcb);
1649 					SCTP_SOCKET_LOCK(so, 1);
1650 					SCTP_TCB_LOCK(stcb);
1651 					atomic_add_int(&stcb->asoc.refcnt, -1);
1652 					if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1653 						SCTP_SOCKET_UNLOCK(so, 1);
1654 						return (NULL);
1655 					}
1656 #endif
1657 					soisconnected(stcb->sctp_socket);
1658 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1659 					SCTP_SOCKET_UNLOCK(so, 1);
1660 #endif
1661 				}
1662 				/* notify upper layer */
1663 				*notification = SCTP_NOTIFY_ASSOC_UP;
1664 				/*
1665 				 * since we did not send a HB make sure we
1666 				 * don't double things
1667 				 */
1668 				net->hb_responded = 1;
1669 				net->RTO = sctp_calculate_rto(stcb, asoc, net,
1670 							      &cookie->time_entered,
1671 							      sctp_align_unsafe_makecopy,
1672 							      SCTP_RTT_FROM_NON_DATA);
1673 
1674 				if (stcb->asoc.sctp_autoclose_ticks &&
1675 				    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1676 					sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1677 							 inp, stcb, NULL);
1678 				}
1679 				break;
1680 			default:
1681 				/*
1682 				 * we're in the OPEN state (or beyond), so
1683 				 * peer must have simply lost the COOKIE-ACK
1684 				 */
1685 				break;
1686 		}	/* end switch */
1687 		sctp_stop_all_cookie_timers(stcb);
1688 		/*
1689 		 * We ignore the return code here.. not sure if we should
1690 		 * somehow abort.. but we do have an existing asoc. This
1691 		 * really should not fail.
1692 		 */
1693 		if (sctp_load_addresses_from_init(stcb, m,
1694 						  init_offset + sizeof(struct sctp_init_chunk),
1695 						  initack_offset, src, dst, init_src)) {
1696 			if (how_indx < sizeof(asoc->cookie_how))
1697 				asoc->cookie_how[how_indx] = 4;
1698 			return (NULL);
1699 		}
1700 		/* respond with a COOKIE-ACK */
1701 		sctp_toss_old_cookies(stcb, asoc);
1702 		sctp_send_cookie_ack(stcb);
1703 		if (how_indx < sizeof(asoc->cookie_how))
1704 			asoc->cookie_how[how_indx] = 5;
1705 		return (stcb);
1706 	}
1707 
1708 	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1709 	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1710 	    cookie->tie_tag_my_vtag == 0 &&
1711 	    cookie->tie_tag_peer_vtag == 0) {
1712 		/*
1713 		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1714 		 */
1715 		if (how_indx < sizeof(asoc->cookie_how))
1716 			asoc->cookie_how[how_indx] = 6;
1717 		return (NULL);
1718 	}
1719 	/* If nat support, and the below and stcb is established,
1720 	 * send back a ABORT(colliding state) if we are established.
1721 	 */
1722 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN)  &&
1723 	    (asoc->peer_supports_nat) &&
1724 	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1725 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1726 	     (asoc->peer_vtag == 0)))) {
1727 		/* Special case - Peer's support nat. We may have
1728 		 * two init's that we gave out the same tag on since
1729 		 * one was not established.. i.e. we get INIT from host-1
1730 		 * behind the nat and we respond tag-a, we get a INIT from
1731 		 * host-2 behind the nat and we get tag-a again. Then we
1732 		 * bring up host-1 (or 2's) assoc, Then comes the cookie
1733 		 * from hsot-2 (or 1). Now we have colliding state. We must
1734 		 * send an abort here with colliding state indication.
1735 		 */
1736 		op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1737 		sctp_send_abort(m, iphlen,  src, dst, sh, 0, op_err,
1738 #if defined(__FreeBSD__)
1739 		                use_mflowid, mflowid,
1740 #endif
1741 		                vrf_id, port);
1742 		return (NULL);
1743 	}
1744 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1745 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1746 	     (asoc->peer_vtag == 0))) {
1747 		/*
1748 		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1749 		 * should be ok, re-accept peer info
1750 		 */
1751 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1752 			/* Extension of case C.
1753 			 * If we hit this, then the random number
1754 			 * generator returned the same vtag when we
1755 			 * first sent our INIT-ACK and when we later sent
1756 			 * our INIT. The side with the seq numbers that are
1757 			 * different will be the one that normnally would
1758 			 * have hit case C. This in effect "extends" our vtags
1759 			 * in this collision case to be 64 bits. The same collision
1760 			 * could occur aka you get both vtag and seq number the
1761 			 * same twice in a row.. but is much less likely. If it
1762 			 * did happen then we would proceed through and bring
1763 			 * up the assoc.. we may end up with the wrong stream
1764 			 * setup however.. which would be bad.. but there is
1765 			 * no way to tell.. until we send on a stream that does
1766 			 * not exist :-)
1767 			 */
1768 			if (how_indx < sizeof(asoc->cookie_how))
1769 				asoc->cookie_how[how_indx] = 7;
1770 
1771 			return (NULL);
1772 		}
1773 		if (how_indx < sizeof(asoc->cookie_how))
1774 			asoc->cookie_how[how_indx] = 8;
1775 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_14);
1776 		sctp_stop_all_cookie_timers(stcb);
1777 		/*
1778 		 * since we did not send a HB make sure we don't double
1779 		 * things
1780 		 */
1781 		net->hb_responded = 1;
1782 		if (stcb->asoc.sctp_autoclose_ticks &&
1783 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1784 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1785 					 NULL);
1786 		}
1787 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1788 		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1789 
1790 		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1791 			/* Ok the peer probably discarded our
1792 			 * data (if we echoed a cookie+data). So anything
1793 			 * on the sent_queue should be marked for
1794 			 * retransmit, we may not get something to
1795 			 * kick us so it COULD still take a timeout
1796 			 * to move these.. but it can't hurt to mark them.
1797 			 */
1798 			struct sctp_tmit_chunk *chk;
1799 		        TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1800 				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1801 					chk->sent = SCTP_DATAGRAM_RESEND;
1802 					sctp_flight_size_decrease(chk);
1803 					sctp_total_flight_decrease(stcb, chk);
1804 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1805 					spec_flag++;
1806 				}
1807 			}
1808 
1809 		}
1810 		/* process the INIT info (peer's info) */
1811 		retval = sctp_process_init(init_cp, stcb);
1812 		if (retval < 0) {
1813 			if (how_indx < sizeof(asoc->cookie_how))
1814 				asoc->cookie_how[how_indx] = 9;
1815 			return (NULL);
1816 		}
1817 		if (sctp_load_addresses_from_init(stcb, m,
1818 						  init_offset + sizeof(struct sctp_init_chunk),
1819 						  initack_offset, src, dst, init_src)) {
1820 			if (how_indx < sizeof(asoc->cookie_how))
1821 				asoc->cookie_how[how_indx] = 10;
1822 			return (NULL);
1823 		}
1824 		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1825 		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1826 			*notification = SCTP_NOTIFY_ASSOC_UP;
1827 
1828 			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1829 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1830 			    (inp->sctp_socket->so_qlimit == 0)) {
1831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1832 				struct socket *so;
1833 #endif
1834 				stcb->sctp_ep->sctp_flags |=
1835 					SCTP_PCB_FLAGS_CONNECTED;
1836 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1837 				so = SCTP_INP_SO(stcb->sctp_ep);
1838 				atomic_add_int(&stcb->asoc.refcnt, 1);
1839 				SCTP_TCB_UNLOCK(stcb);
1840 				SCTP_SOCKET_LOCK(so, 1);
1841 				SCTP_TCB_LOCK(stcb);
1842 				atomic_add_int(&stcb->asoc.refcnt, -1);
1843 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1844 					SCTP_SOCKET_UNLOCK(so, 1);
1845 					return (NULL);
1846 				}
1847 #endif
1848 				soisconnected(stcb->sctp_socket);
1849 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1850 				SCTP_SOCKET_UNLOCK(so, 1);
1851 #endif
1852 			}
1853 			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1854 				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1855 			else
1856 				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1857 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1858 		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1859 			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1860 		} else {
1861 			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1862 		}
1863 		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1864 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1865 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1866 					 stcb->sctp_ep, stcb, asoc->primary_destination);
1867 		}
1868 		sctp_stop_all_cookie_timers(stcb);
1869 		sctp_toss_old_cookies(stcb, asoc);
1870 		sctp_send_cookie_ack(stcb);
1871 		if (spec_flag) {
1872 			/* only if we have retrans set do we do this. What
1873 			 * this call does is get only the COOKIE-ACK out
1874 			 * and then when we return the normal call to
1875 			 * sctp_chunk_output will get the retrans out
1876 			 * behind this.
1877 			 */
1878 			sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1879 		}
1880 		if (how_indx < sizeof(asoc->cookie_how))
1881 			asoc->cookie_how[how_indx] = 11;
1882 
1883 		return (stcb);
1884 	}
1885 	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1886 	     ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1887 	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1888 	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1889 	    cookie->tie_tag_peer_vtag != 0) {
1890 		struct sctpasochead *head;
1891 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1892 		struct socket *so;
1893 #endif
1894 
1895 		if (asoc->peer_supports_nat) {
1896 			/* This is a gross gross hack.
1897 			 * Just call the cookie_new code since we
1898 			 * are allowing a duplicate association.
1899 			 * I hope this works...
1900 			 */
1901 			return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
1902 			                                sh, cookie, cookie_len,
1903 			                                inp, netp, init_src,notification,
1904 			                                auth_skipped, auth_offset, auth_len,
1905 #if defined(__FreeBSD__)
1906 			                                use_mflowid, mflowid,
1907 #endif
1908 			                                vrf_id, port));
1909 		}
1910 		/*
1911 		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1912 		 */
1913 		/* temp code */
1914 		if (how_indx < sizeof(asoc->cookie_how))
1915 			asoc->cookie_how[how_indx] = 12;
1916 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_15);
1917 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_16);
1918 
1919 		/* notify upper layer */
1920 		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1921 		atomic_add_int(&stcb->asoc.refcnt, 1);
1922 		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1923 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1924 		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1925 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1926 		}
1927 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1928 			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1929 		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1930 			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1931 		}
1932 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1933 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1934 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1935 					 stcb->sctp_ep, stcb, asoc->primary_destination);
1936 
1937 		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1938 			/* move to OPEN state, if not in SHUTDOWN_SENT */
1939 			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1940 		}
1941 		asoc->pre_open_streams =
1942 			ntohs(initack_cp->init.num_outbound_streams);
1943 		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1944 		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1945 		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1946 
1947 		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1948 
1949 		asoc->str_reset_seq_in = asoc->init_seq_number;
1950 
1951 		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1952 		if (asoc->mapping_array) {
1953 			memset(asoc->mapping_array, 0,
1954 			       asoc->mapping_array_size);
1955 		}
1956 		if (asoc->nr_mapping_array) {
1957 			memset(asoc->nr_mapping_array, 0,
1958 			    asoc->mapping_array_size);
1959 		}
1960 		SCTP_TCB_UNLOCK(stcb);
1961 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1962 		so = SCTP_INP_SO(stcb->sctp_ep);
1963 		SCTP_SOCKET_LOCK(so, 1);
1964 #endif
1965 		SCTP_INP_INFO_WLOCK();
1966 		SCTP_INP_WLOCK(stcb->sctp_ep);
1967 		SCTP_TCB_LOCK(stcb);
1968 		atomic_add_int(&stcb->asoc.refcnt, -1);
1969 		/* send up all the data */
1970 		SCTP_TCB_SEND_LOCK(stcb);
1971 
1972 		sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
1973 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1974 			stcb->asoc.strmout[i].chunks_on_queues = 0;
1975 #if defined(SCTP_DETAILED_STR_STATS)
1976 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1977 				asoc->strmout[i].abandoned_sent[j] = 0;
1978 				asoc->strmout[i].abandoned_unsent[j] = 0;
1979 			}
1980 #else
1981 			asoc->strmout[i].abandoned_sent[0] = 0;
1982 			asoc->strmout[i].abandoned_unsent[0] = 0;
1983 #endif
1984 			stcb->asoc.strmout[i].stream_no = i;
1985 			stcb->asoc.strmout[i].next_sequence_send = 0;
1986 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1987 		}
1988 		/* process the INIT-ACK info (my info) */
1989 		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1990 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1991 
1992 		/* pull from vtag hash */
1993 		LIST_REMOVE(stcb, sctp_asocs);
1994 		/* re-insert to new vtag position */
1995 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1996 								    SCTP_BASE_INFO(hashasocmark))];
1997 		/*
1998 		 * put it in the bucket in the vtag hash of assoc's for the
1999 		 * system
2000 		 */
2001 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2002 
2003 		SCTP_TCB_SEND_UNLOCK(stcb);
2004 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2005 		SCTP_INP_INFO_WUNLOCK();
2006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2007 		SCTP_SOCKET_UNLOCK(so, 1);
2008 #endif
2009 		asoc->total_flight = 0;
2010 		asoc->total_flight_count = 0;
2011 		/* process the INIT info (peer's info) */
2012 		retval = sctp_process_init(init_cp, stcb);
2013 		if (retval < 0) {
2014 			if (how_indx < sizeof(asoc->cookie_how))
2015 				asoc->cookie_how[how_indx] = 13;
2016 
2017 			return (NULL);
2018 		}
2019 		/*
2020 		 * since we did not send a HB make sure we don't double
2021 		 * things
2022 		 */
2023 		net->hb_responded = 1;
2024 
2025 		if (sctp_load_addresses_from_init(stcb, m,
2026 						  init_offset + sizeof(struct sctp_init_chunk),
2027 						  initack_offset, src, dst, init_src)) {
2028 			if (how_indx < sizeof(asoc->cookie_how))
2029 				asoc->cookie_how[how_indx] = 14;
2030 
2031 			return (NULL);
2032 		}
2033 		/* respond with a COOKIE-ACK */
2034 		sctp_stop_all_cookie_timers(stcb);
2035 		sctp_toss_old_cookies(stcb, asoc);
2036 		sctp_send_cookie_ack(stcb);
2037 		if (how_indx < sizeof(asoc->cookie_how))
2038 			asoc->cookie_how[how_indx] = 15;
2039 
2040 		return (stcb);
2041 	}
2042 	if (how_indx < sizeof(asoc->cookie_how))
2043 		asoc->cookie_how[how_indx] = 16;
2044 	/* all other cases... */
2045 	return (NULL);
2046 }
2047 
2048 
2049 /*
2050  * handle a state cookie for a new association m: input packet mbuf chain--
2051  * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2052  * and the cookie signature does not exist offset: offset into mbuf to the
2053  * cookie-echo chunk length: length of the cookie chunk to: where the init
2054  * was from returns a new TCB
2055  */
2056 static struct sctp_tcb *
sctp_process_cookie_new(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t use_mflowid,uint32_t mflowid,uint32_t vrf_id,uint16_t port)2057 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2058     struct sockaddr *src, struct sockaddr *dst,
2059     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2060     struct sctp_inpcb *inp, struct sctp_nets **netp,
2061     struct sockaddr *init_src, int *notification,
2062     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2063 #if defined(__FreeBSD__)
2064     uint8_t use_mflowid, uint32_t mflowid,
2065 #endif
2066     uint32_t vrf_id, uint16_t port)
2067 {
2068 	struct sctp_tcb *stcb;
2069 	struct sctp_init_chunk *init_cp, init_buf;
2070 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
2071 	union sctp_sockstore store;
2072 	struct sctp_association *asoc;
2073 	int init_offset, initack_offset, initack_limit;
2074 	int retval;
2075 	int error = 0;
2076 	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
2077 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2078 	struct socket *so;
2079 
2080 	so = SCTP_INP_SO(inp);
2081 #endif
2082 
2083 	/*
2084 	 * find and validate the INIT chunk in the cookie (peer's info) the
2085 	 * INIT should start after the cookie-echo header struct (chunk
2086 	 * header, state cookie header struct)
2087 	 */
2088 	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2089 	init_cp = (struct sctp_init_chunk *)
2090 	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2091 	    (uint8_t *) & init_buf);
2092 	if (init_cp == NULL) {
2093 		/* could not pull a INIT chunk in cookie */
2094 		SCTPDBG(SCTP_DEBUG_INPUT1,
2095 			"process_cookie_new: could not pull INIT chunk hdr\n");
2096 		return (NULL);
2097 	}
2098 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2099 		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2100 		return (NULL);
2101 	}
2102 	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2103 	/*
2104 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
2105 	 * INIT-ACK follows the INIT chunk
2106 	 */
2107 	initack_cp = (struct sctp_init_ack_chunk *)
2108 	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2109 	    (uint8_t *) & initack_buf);
2110 	if (initack_cp == NULL) {
2111 		/* could not pull INIT-ACK chunk in cookie */
2112 		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2113 		return (NULL);
2114 	}
2115 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2116 		return (NULL);
2117 	}
2118 	/*
2119 	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2120 	 * "initack_limit" value.  This is because the chk_length field
2121 	 * includes the length of the cookie, but the cookie is omitted when
2122 	 * the INIT and INIT_ACK are tacked onto the cookie...
2123 	 */
2124 	initack_limit = offset + cookie_len;
2125 
2126 	/*
2127 	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2128 	 * and popluate
2129 	 */
2130 
2131         /*
2132 	 * Here we do a trick, we set in NULL for the proc/thread argument. We
2133 	 * do this since in effect we only use the p argument when
2134 	 * the socket is unbound and we must do an implicit bind.
2135 	 * Since we are getting a cookie, we cannot be unbound.
2136 	 */
2137 	stcb = sctp_aloc_assoc(inp, init_src, &error,
2138 			       ntohl(initack_cp->init.initiate_tag), vrf_id,
2139 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2140 			       (struct thread *)NULL
2141 #elif defined(__Windows__)
2142 			       (PKTHREAD)NULL
2143 #else
2144 			       (struct proc *)NULL
2145 #endif
2146 			       );
2147 	if (stcb == NULL) {
2148 		struct mbuf *op_err;
2149 
2150 		/* memory problem? */
2151 		SCTPDBG(SCTP_DEBUG_INPUT1,
2152 			"process_cookie_new: no room for another TCB!\n");
2153 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2154 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2155 		                       src, dst, sh, op_err,
2156 #if defined(__FreeBSD__)
2157 		                       use_mflowid, mflowid,
2158 #endif
2159 		                       vrf_id, port);
2160 		return (NULL);
2161 	}
2162 	/* get the correct sctp_nets */
2163 	if (netp)
2164 		*netp = sctp_findnet(stcb, init_src);
2165 
2166 	asoc = &stcb->asoc;
2167 	/* get scope variables out of cookie */
2168 	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2169 	asoc->scope.site_scope = cookie->site_scope;
2170 	asoc->scope.local_scope = cookie->local_scope;
2171 	asoc->scope.loopback_scope = cookie->loopback_scope;
2172 
2173 #if defined(__Userspace__)
2174 	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2175 	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
2176 	    (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
2177 #else
2178 	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2179 	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2180 #endif
2181 		struct mbuf *op_err;
2182 
2183 		/*
2184 		 * Houston we have a problem. The EP changed while the
2185 		 * cookie was in flight. Only recourse is to abort the
2186 		 * association.
2187 		 */
2188 		atomic_add_int(&stcb->asoc.refcnt, 1);
2189 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2190 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2191 				       src, dst, sh, op_err,
2192 #if defined(__FreeBSD__)
2193 		                       use_mflowid, mflowid,
2194 #endif
2195 		                       vrf_id, port);
2196 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2197 		SCTP_TCB_UNLOCK(stcb);
2198 		SCTP_SOCKET_LOCK(so, 1);
2199 		SCTP_TCB_LOCK(stcb);
2200 #endif
2201 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2202 				      SCTP_FROM_SCTP_INPUT+SCTP_LOC_16);
2203 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2204 		SCTP_SOCKET_UNLOCK(so, 1);
2205 #endif
2206 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2207 		return (NULL);
2208 	}
2209 	/* process the INIT-ACK info (my info) */
2210 	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2211 	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2212 	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2213 	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2214 	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2215 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2216 	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2217 	asoc->str_reset_seq_in = asoc->init_seq_number;
2218 
2219 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2220 
2221 	/* process the INIT info (peer's info) */
2222 	if (netp)
2223 		retval = sctp_process_init(init_cp, stcb);
2224 	else
2225 		retval = 0;
2226 	if (retval < 0) {
2227 		atomic_add_int(&stcb->asoc.refcnt, 1);
2228 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2229 		SCTP_TCB_UNLOCK(stcb);
2230 		SCTP_SOCKET_LOCK(so, 1);
2231 		SCTP_TCB_LOCK(stcb);
2232 #endif
2233 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_16);
2234 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2235 		SCTP_SOCKET_UNLOCK(so, 1);
2236 #endif
2237 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2238 		return (NULL);
2239 	}
2240 	/* load all addresses */
2241 	if (sctp_load_addresses_from_init(stcb, m,
2242 	    init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2243 	    src, dst, init_src)) {
2244 		atomic_add_int(&stcb->asoc.refcnt, 1);
2245 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2246 		SCTP_TCB_UNLOCK(stcb);
2247 		SCTP_SOCKET_LOCK(so, 1);
2248 		SCTP_TCB_LOCK(stcb);
2249 #endif
2250 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_17);
2251 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2252 		SCTP_SOCKET_UNLOCK(so, 1);
2253 #endif
2254 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2255 		return (NULL);
2256 	}
2257 	/*
2258 	 * verify any preceding AUTH chunk that was skipped
2259 	 */
2260 	/* pull the local authentication parameters from the cookie/init-ack */
2261 	sctp_auth_get_cookie_params(stcb, m,
2262 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2263 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2264 	if (auth_skipped) {
2265 		struct sctp_auth_chunk *auth;
2266 
2267 		auth = (struct sctp_auth_chunk *)
2268 		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2269 		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2270 			/* auth HMAC failed, dump the assoc and packet */
2271 			SCTPDBG(SCTP_DEBUG_AUTH1,
2272 				"COOKIE-ECHO: AUTH failed\n");
2273 			atomic_add_int(&stcb->asoc.refcnt, 1);
2274 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2275 			SCTP_TCB_UNLOCK(stcb);
2276 			SCTP_SOCKET_LOCK(so, 1);
2277 			SCTP_TCB_LOCK(stcb);
2278 #endif
2279 			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_18);
2280 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2281 			SCTP_SOCKET_UNLOCK(so, 1);
2282 #endif
2283 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2284 			return (NULL);
2285 		} else {
2286 			/* remaining chunks checked... good to go */
2287 			stcb->asoc.authenticated = 1;
2288 		}
2289 	}
2290 	/* update current state */
2291 	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2292 	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2293 	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2294 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2295 				 stcb->sctp_ep, stcb, asoc->primary_destination);
2296 	}
2297 	sctp_stop_all_cookie_timers(stcb);
2298 	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2299 	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2300 
2301 	/*
2302 	 * if we're doing ASCONFs, check to see if we have any new local
2303 	 * addresses that need to get added to the peer (eg. addresses
2304 	 * changed while cookie echo in flight).  This needs to be done
2305 	 * after we go to the OPEN state to do the correct asconf
2306 	 * processing. else, make sure we have the correct addresses in our
2307 	 * lists
2308 	 */
2309 
2310 	/* warning, we re-use sin, sin6, sa_store here! */
2311 	/* pull in local_address (our "from" address) */
2312 	switch (cookie->laddr_type) {
2313 #ifdef INET
2314 	case SCTP_IPV4_ADDRESS:
2315 		/* source addr is IPv4 */
2316 		memset(&store.sin, 0, sizeof(struct sockaddr_in));
2317 		store.sin.sin_family = AF_INET;
2318 #ifdef HAVE_SIN_LEN
2319 		store.sin.sin_len = sizeof(struct sockaddr_in);
2320 #endif
2321 		store.sin.sin_addr.s_addr = cookie->laddress[0];
2322 		break;
2323 #endif
2324 #ifdef INET6
2325 	case SCTP_IPV6_ADDRESS:
2326 		/* source addr is IPv6 */
2327 		memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2328 		store.sin6.sin6_family = AF_INET6;
2329 #ifdef HAVE_SIN6_LEN
2330 		store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2331 #endif
2332 		store.sin6.sin6_scope_id = cookie->scope_id;
2333 		memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2334 		break;
2335 #endif
2336 #if defined(__Userspace__)
2337 	case SCTP_CONN_ADDRESS:
2338 		/* source addr is conn */
2339 		memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
2340 		store.sconn.sconn_family = AF_CONN;
2341 #ifdef HAVE_SCONN_LEN
2342 		store.sconn.sconn_len = sizeof(struct sockaddr_conn);
2343 #endif
2344 		memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
2345 		break;
2346 #endif
2347 	default:
2348 		atomic_add_int(&stcb->asoc.refcnt, 1);
2349 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2350 		SCTP_TCB_UNLOCK(stcb);
2351 		SCTP_SOCKET_LOCK(so, 1);
2352 		SCTP_TCB_LOCK(stcb);
2353 #endif
2354 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_19);
2355 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2356 		SCTP_SOCKET_UNLOCK(so, 1);
2357 #endif
2358 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2359 		return (NULL);
2360 	}
2361 
2362 	/* set up to notify upper layer */
2363 	*notification = SCTP_NOTIFY_ASSOC_UP;
2364 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2365 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2366 	    (inp->sctp_socket->so_qlimit == 0)) {
2367 		/*
2368 		 * This is an endpoint that called connect() how it got a
2369 		 * cookie that is NEW is a bit of a mystery. It must be that
2370 		 * the INIT was sent, but before it got there.. a complete
2371 		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2372 		 * should have went to the other code.. not here.. oh well..
2373 		 * a bit of protection is worth having..
2374 		 */
2375 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2376 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2377 		atomic_add_int(&stcb->asoc.refcnt, 1);
2378 		SCTP_TCB_UNLOCK(stcb);
2379 		SCTP_SOCKET_LOCK(so, 1);
2380 		SCTP_TCB_LOCK(stcb);
2381 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2382 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2383 			SCTP_SOCKET_UNLOCK(so, 1);
2384 			return (NULL);
2385 		}
2386 #endif
2387 		soisconnected(stcb->sctp_socket);
2388 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2389 		SCTP_SOCKET_UNLOCK(so, 1);
2390 #endif
2391 	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2392 	    (inp->sctp_socket->so_qlimit)) {
2393 		/*
2394 		 * We don't want to do anything with this one. Since it is
2395 		 * the listening guy. The timer will get started for
2396 		 * accepted connections in the caller.
2397 		 */
2398 		;
2399 	}
2400 	/* since we did not send a HB make sure we don't double things */
2401 	if ((netp) && (*netp))
2402 		(*netp)->hb_responded = 1;
2403 
2404 	if (stcb->asoc.sctp_autoclose_ticks &&
2405 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2406 		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2407 	}
2408 	/* calculate the RTT */
2409 	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2410 	if ((netp) && (*netp)) {
2411 		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2412 						  &cookie->time_entered, sctp_align_unsafe_makecopy,
2413 						  SCTP_RTT_FROM_NON_DATA);
2414 	}
2415 	/* respond with a COOKIE-ACK */
2416 	sctp_send_cookie_ack(stcb);
2417 
2418 	/*
2419 	 * check the address lists for any ASCONFs that need to be sent
2420 	 * AFTER the cookie-ack is sent
2421 	 */
2422 	sctp_check_address_list(stcb, m,
2423 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2424 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2425 	    &store.sa, cookie->local_scope, cookie->site_scope,
2426 	    cookie->ipv4_scope, cookie->loopback_scope);
2427 
2428 
2429 	return (stcb);
2430 }
2431 
2432 /*
2433  * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2434  * we NEED to make sure we are not already using the vtag. If so we
2435  * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2436 	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2437 							    SCTP_BASE_INFO(hashasocmark))];
2438 	LIST_FOREACH(stcb, head, sctp_asocs) {
2439 	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2440 		       -- SEND ABORT - TRY AGAIN --
2441 		}
2442 	}
2443 */
2444 
2445 /*
2446  * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2447  * existing (non-NULL) TCB
2448  */
2449 static struct mbuf *
2450 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2451     struct sockaddr *src, struct sockaddr *dst,
2452     struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2453     struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2454     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2455     struct sctp_tcb **locked_tcb,
2456 #if defined(__FreeBSD__)
2457     uint8_t use_mflowid, uint32_t mflowid,
2458 #endif
2459     uint32_t vrf_id, uint16_t port)
2460 {
2461 	struct sctp_state_cookie *cookie;
2462 	struct sctp_tcb *l_stcb = *stcb;
2463 	struct sctp_inpcb *l_inp;
2464 	struct sockaddr *to;
2465 	struct sctp_pcb *ep;
2466 	struct mbuf *m_sig;
2467 	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2468 	uint8_t *sig;
2469 	uint8_t cookie_ok = 0;
2470 	unsigned int sig_offset, cookie_offset;
2471 	unsigned int cookie_len;
2472 	struct timeval now;
2473 	struct timeval time_expires;
2474 	int notification = 0;
2475 	struct sctp_nets *netl;
2476 	int had_a_existing_tcb = 0;
2477 	int send_int_conf = 0;
2478 #ifdef INET
2479 	struct sockaddr_in sin;
2480 #endif
2481 #ifdef INET6
2482 	struct sockaddr_in6 sin6;
2483 #endif
2484 #if defined(__Userspace__)
2485 	struct sockaddr_conn sconn;
2486 #endif
2487 
2488 	SCTPDBG(SCTP_DEBUG_INPUT2,
2489 		"sctp_handle_cookie: handling COOKIE-ECHO\n");
2490 
2491 	if (inp_p == NULL) {
2492 		return (NULL);
2493 	}
2494 	cookie = &cp->cookie;
2495 	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2496 	cookie_len = ntohs(cp->ch.chunk_length);
2497 
2498 	if ((cookie->peerport != sh->src_port) &&
2499 	    (cookie->myport != sh->dest_port) &&
2500 	    (cookie->my_vtag != sh->v_tag)) {
2501 		/*
2502 		 * invalid ports or bad tag.  Note that we always leave the
2503 		 * v_tag in the header in network order and when we stored
2504 		 * it in the my_vtag slot we also left it in network order.
2505 		 * This maintains the match even though it may be in the
2506 		 * opposite byte order of the machine :->
2507 		 */
2508 		return (NULL);
2509 	}
2510 	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2511 	    sizeof(struct sctp_init_chunk) +
2512 	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2513 		/* cookie too small */
2514 		return (NULL);
2515 	}
2516 	/*
2517 	 * split off the signature into its own mbuf (since it should not be
2518 	 * calculated in the sctp_hmac_m() call).
2519 	 */
2520 	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2521 	m_sig = m_split(m, sig_offset, M_NOWAIT);
2522 	if (m_sig == NULL) {
2523 		/* out of memory or ?? */
2524 		return (NULL);
2525 	}
2526 #ifdef SCTP_MBUF_LOGGING
2527 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2528 		struct mbuf *mat;
2529 
2530 		for (mat = m_sig; mat; mat = SCTP_BUF_NEXT(mat)) {
2531 			if (SCTP_BUF_IS_EXTENDED(mat)) {
2532 				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2533 			}
2534 		}
2535 	}
2536 #endif
2537 
2538 	/*
2539 	 * compute the signature/digest for the cookie
2540 	 */
2541 	ep = &(*inp_p)->sctp_ep;
2542 	l_inp = *inp_p;
2543 	if (l_stcb) {
2544 		SCTP_TCB_UNLOCK(l_stcb);
2545 	}
2546 	SCTP_INP_RLOCK(l_inp);
2547 	if (l_stcb) {
2548 		SCTP_TCB_LOCK(l_stcb);
2549 	}
2550 	/* which cookie is it? */
2551 	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2552 	    (ep->current_secret_number != ep->last_secret_number)) {
2553 		/* it's the old cookie */
2554 		(void)sctp_hmac_m(SCTP_HMAC,
2555 		    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2556 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2557 	} else {
2558 		/* it's the current cookie */
2559 		(void)sctp_hmac_m(SCTP_HMAC,
2560 		    (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2561 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2562 	}
2563 	/* get the signature */
2564 	SCTP_INP_RUNLOCK(l_inp);
2565 	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2566 	if (sig == NULL) {
2567 		/* couldn't find signature */
2568 		sctp_m_freem(m_sig);
2569 		return (NULL);
2570 	}
2571 	/* compare the received digest with the computed digest */
2572 	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2573 		/* try the old cookie? */
2574 		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2575 		    (ep->current_secret_number != ep->last_secret_number)) {
2576 			/* compute digest with old */
2577 			(void)sctp_hmac_m(SCTP_HMAC,
2578 			    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2579 			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2580 			/* compare */
2581 			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2582 				cookie_ok = 1;
2583 		}
2584 	} else {
2585 		cookie_ok = 1;
2586 	}
2587 
2588 	/*
2589 	 * Now before we continue we must reconstruct our mbuf so that
2590 	 * normal processing of any other chunks will work.
2591 	 */
2592 	{
2593 		struct mbuf *m_at;
2594 
2595 		m_at = m;
2596 		while (SCTP_BUF_NEXT(m_at) != NULL) {
2597 			m_at = SCTP_BUF_NEXT(m_at);
2598 		}
2599 		SCTP_BUF_NEXT(m_at) = m_sig;
2600 	}
2601 
2602 	if (cookie_ok == 0) {
2603 		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2604 		SCTPDBG(SCTP_DEBUG_INPUT2,
2605 			"offset = %u, cookie_offset = %u, sig_offset = %u\n",
2606 			(uint32_t) offset, cookie_offset, sig_offset);
2607 		return (NULL);
2608 	}
2609 
2610 	/*
2611 	 * check the cookie timestamps to be sure it's not stale
2612 	 */
2613 	(void)SCTP_GETTIME_TIMEVAL(&now);
2614 	/* Expire time is in Ticks, so we convert to seconds */
2615 	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2616 	time_expires.tv_usec = cookie->time_entered.tv_usec;
2617         /* TODO sctp_constants.h needs alternative time macros when
2618          *  _KERNEL is undefined.
2619          */
2620 #ifndef __FreeBSD__
2621 	if (timercmp(&now, &time_expires, >))
2622 #else
2623 	if (timevalcmp(&now, &time_expires, >))
2624 #endif
2625 	{
2626 		/* cookie is stale! */
2627 		struct mbuf *op_err;
2628 		struct sctp_stale_cookie_msg *scm;
2629 		uint32_t tim;
2630 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2631 					       0, M_NOWAIT, 1, MT_DATA);
2632 		if (op_err == NULL) {
2633 			/* FOOBAR */
2634 			return (NULL);
2635 		}
2636 		/* Set the len */
2637 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2638 		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2639 		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2640 		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2641 		    (sizeof(uint32_t))));
2642 		/* seconds to usec */
2643 		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2644 		/* add in usec */
2645 		if (tim == 0)
2646 			tim = now.tv_usec - cookie->time_entered.tv_usec;
2647 		scm->time_usec = htonl(tim);
2648 		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2649 #if defined(__FreeBSD__)
2650 		                   use_mflowid, mflowid,
2651 #endif
2652 		                   vrf_id, port);
2653 		return (NULL);
2654 	}
2655 	/*
2656 	 * Now we must see with the lookup address if we have an existing
2657 	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2658 	 * and a INIT collided with us and somewhere the peer sent the
2659 	 * cookie on another address besides the single address our assoc
2660 	 * had for him. In this case we will have one of the tie-tags set at
2661 	 * least AND the address field in the cookie can be used to look it
2662 	 * up.
2663 	 */
2664 	to = NULL;
2665 	switch (cookie->addr_type) {
2666 #ifdef INET6
2667 	case SCTP_IPV6_ADDRESS:
2668 		memset(&sin6, 0, sizeof(sin6));
2669 		sin6.sin6_family = AF_INET6;
2670 #ifdef HAVE_SIN6_LEN
2671 		sin6.sin6_len = sizeof(sin6);
2672 #endif
2673 		sin6.sin6_port = sh->src_port;
2674 		sin6.sin6_scope_id = cookie->scope_id;
2675 		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2676 		    sizeof(sin6.sin6_addr.s6_addr));
2677 		to = (struct sockaddr *)&sin6;
2678 		break;
2679 #endif
2680 #ifdef INET
2681 	case SCTP_IPV4_ADDRESS:
2682 		memset(&sin, 0, sizeof(sin));
2683 		sin.sin_family = AF_INET;
2684 #ifdef HAVE_SIN_LEN
2685 		sin.sin_len = sizeof(sin);
2686 #endif
2687 		sin.sin_port = sh->src_port;
2688 		sin.sin_addr.s_addr = cookie->address[0];
2689 		to = (struct sockaddr *)&sin;
2690 		break;
2691 #endif
2692 #if defined(__Userspace__)
2693 	case SCTP_CONN_ADDRESS:
2694 		memset(&sconn, 0, sizeof(struct sockaddr_conn));
2695 		sconn.sconn_family = AF_CONN;
2696 #ifdef HAVE_SCONN_LEN
2697 		sconn.sconn_len = sizeof(struct sockaddr_conn);
2698 #endif
2699 		sconn.sconn_port = sh->src_port;
2700 		memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
2701 		to = (struct sockaddr *)&sconn;
2702 		break;
2703 #endif
2704 	default:
2705 		/* This should not happen */
2706 		return (NULL);
2707 	}
2708 	if ((*stcb == NULL) && to) {
2709 		/* Yep, lets check */
2710 		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2711 		if (*stcb == NULL) {
2712 			/*
2713 			 * We should have only got back the same inp. If we
2714 			 * got back a different ep we have a problem. The
2715 			 * original findep got back l_inp and now
2716 			 */
2717 			if (l_inp != *inp_p) {
2718 				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2719 			}
2720 		} else {
2721 			if (*locked_tcb == NULL) {
2722 				/* In this case we found the assoc only
2723 				 * after we locked the create lock. This means
2724 				 * we are in a colliding case and we must make
2725 				 * sure that we unlock the tcb if its one of the
2726 				 * cases where we throw away the incoming packets.
2727 				 */
2728 				*locked_tcb = *stcb;
2729 
2730 				/* We must also increment the inp ref count
2731 				 * since the ref_count flags was set when we
2732 				 * did not find the TCB, now we found it which
2733 				 * reduces the refcount.. we must raise it back
2734 				 * out to balance it all :-)
2735 				 */
2736 				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2737 				if ((*stcb)->sctp_ep != l_inp) {
2738 					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2739 						    (void *)(*stcb)->sctp_ep, (void *)l_inp);
2740 				}
2741 			}
2742 		}
2743 	}
2744 	if (to == NULL) {
2745 		return (NULL);
2746 	}
2747 
2748 	cookie_len -= SCTP_SIGNATURE_SIZE;
2749 	if (*stcb == NULL) {
2750 		/* this is the "normal" case... get a new TCB */
2751 		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2752 		                                cookie, cookie_len, *inp_p,
2753 		                                netp, to, &notification,
2754 		                                auth_skipped, auth_offset, auth_len,
2755 #if defined(__FreeBSD__)
2756 		                                use_mflowid, mflowid,
2757 #endif
2758 		                                vrf_id, port);
2759 	} else {
2760 		/* this is abnormal... cookie-echo on existing TCB */
2761 		had_a_existing_tcb = 1;
2762 		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
2763 		                                     src, dst, sh,
2764 						     cookie, cookie_len, *inp_p, *stcb, netp, to,
2765 						     &notification, auth_skipped, auth_offset, auth_len,
2766 #if defined(__FreeBSD__)
2767 		                                     use_mflowid, mflowid,
2768 #endif
2769 		                                     vrf_id, port);
2770 	}
2771 
2772 	if (*stcb == NULL) {
2773 		/* still no TCB... must be bad cookie-echo */
2774 		return (NULL);
2775 	}
2776 #if defined(__FreeBSD__)
2777 	if ((*netp != NULL) && (use_mflowid != 0)) {
2778 		(*netp)->flowid = mflowid;
2779 #ifdef INVARIANTS
2780 		(*netp)->flowidset = 1;
2781 #endif
2782 	}
2783 #endif
2784 	/*
2785 	 * Ok, we built an association so confirm the address we sent the
2786 	 * INIT-ACK to.
2787 	 */
2788 	netl = sctp_findnet(*stcb, to);
2789 	/*
2790 	 * This code should in theory NOT run but
2791 	 */
2792 	if (netl == NULL) {
2793 		/* TSNH! Huh, why do I need to add this address here? */
2794 		if (sctp_add_remote_addr(*stcb, to, NULL, SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2795 			return (NULL);
2796 		}
2797 		netl = sctp_findnet(*stcb, to);
2798 	}
2799 	if (netl) {
2800 		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2801 			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2802 			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2803 			    netl);
2804 			send_int_conf = 1;
2805 		}
2806 	}
2807 	sctp_start_net_timers(*stcb);
2808 	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2809 		if (!had_a_existing_tcb ||
2810 		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2811 			/*
2812 			 * If we have a NEW cookie or the connect never
2813 			 * reached the connected state during collision we
2814 			 * must do the TCP accept thing.
2815 			 */
2816 			struct socket *so, *oso;
2817 			struct sctp_inpcb *inp;
2818 
2819 			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2820 				/*
2821 				 * For a restart we will keep the same
2822 				 * socket, no need to do anything. I THINK!!
2823 				 */
2824 				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2825 				if (send_int_conf) {
2826 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2827 					                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2828 				}
2829 				return (m);
2830 			}
2831 			oso = (*inp_p)->sctp_socket;
2832 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
2833 			/*
2834 			 * We do this to keep the sockets side happy during
2835 			 * the sonewcon ONLY.
2836 			 */
2837 			NET_LOCK_GIANT();
2838 #endif
2839 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2840 			SCTP_TCB_UNLOCK((*stcb));
2841 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2842 			CURVNET_SET(oso->so_vnet);
2843 #endif
2844 #if defined(__APPLE__)
2845 			SCTP_SOCKET_LOCK(oso, 1);
2846 #endif
2847 			so = sonewconn(oso, 0
2848 #if defined(__APPLE__)
2849 			    ,NULL
2850 #endif
2851 #ifdef __Panda__
2852 			     ,NULL , (*inp_p)->def_vrf_id
2853 #endif
2854 			    );
2855 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
2856 			NET_UNLOCK_GIANT();
2857 #endif
2858 #if defined(__APPLE__)
2859 			SCTP_SOCKET_UNLOCK(oso, 1);
2860 #endif
2861 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2862 			CURVNET_RESTORE();
2863 #endif
2864 			SCTP_TCB_LOCK((*stcb));
2865 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2866 
2867 			if (so == NULL) {
2868 				struct mbuf *op_err;
2869 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2870 				struct socket *pcb_so;
2871 #endif
2872 				/* Too many sockets */
2873 				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2874 				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2875 				sctp_abort_association(*inp_p, NULL, m, iphlen,
2876 						       src, dst, sh, op_err,
2877 #if defined(__FreeBSD__)
2878 				                       use_mflowid, mflowid,
2879 #endif
2880 				                       vrf_id, port);
2881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2882 				pcb_so = SCTP_INP_SO(*inp_p);
2883 				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2884 				SCTP_TCB_UNLOCK((*stcb));
2885 				SCTP_SOCKET_LOCK(pcb_so, 1);
2886 				SCTP_TCB_LOCK((*stcb));
2887 				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2888 #endif
2889 				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_20);
2890 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2891 				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2892 #endif
2893 				return (NULL);
2894 			}
2895 			inp = (struct sctp_inpcb *)so->so_pcb;
2896 			SCTP_INP_INCR_REF(inp);
2897 			/*
2898 			 * We add the unbound flag here so that
2899 			 * if we get an soabort() before we get the
2900 			 * move_pcb done, we will properly cleanup.
2901 			 */
2902 			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2903 			    SCTP_PCB_FLAGS_CONNECTED |
2904 			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2905 			    SCTP_PCB_FLAGS_UNBOUND |
2906 			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2907 			    SCTP_PCB_FLAGS_DONT_WAKE);
2908 			inp->sctp_features = (*inp_p)->sctp_features;
2909 			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2910 			inp->sctp_socket = so;
2911 			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2912 			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2913 			inp->ecn_supported = (*inp_p)->ecn_supported;
2914 			inp->prsctp_supported = (*inp_p)->prsctp_supported;
2915 			inp->auth_supported = (*inp_p)->auth_supported;
2916 			inp->asconf_supported = (*inp_p)->asconf_supported;
2917 			inp->reconfig_supported = (*inp_p)->reconfig_supported;
2918 			inp->nrsack_supported = (*inp_p)->nrsack_supported;
2919 			inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
2920 			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2921 			inp->sctp_context = (*inp_p)->sctp_context;
2922 			inp->local_strreset_support = (*inp_p)->local_strreset_support;
2923 			inp->inp_starting_point_for_iterator = NULL;
2924 #if defined(__Userspace__)
2925 			inp->ulp_info = (*inp_p)->ulp_info;
2926 			inp->recv_callback = (*inp_p)->recv_callback;
2927 			inp->send_callback = (*inp_p)->send_callback;
2928 			inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
2929 #endif
2930 			/*
2931 			 * copy in the authentication parameters from the
2932 			 * original endpoint
2933 			 */
2934 			if (inp->sctp_ep.local_hmacs)
2935 				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2936 			inp->sctp_ep.local_hmacs =
2937 			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2938 			if (inp->sctp_ep.local_auth_chunks)
2939 				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2940 			inp->sctp_ep.local_auth_chunks =
2941 			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2942 
2943 			/*
2944 			 * Now we must move it from one hash table to
2945 			 * another and get the tcb in the right place.
2946 			 */
2947 
2948 			/* This is where the one-2-one socket is put into
2949 			 * the accept state waiting for the accept!
2950 			 */
2951 			if (*stcb) {
2952 				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
2953 			}
2954 			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2955 
2956 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2957 			SCTP_TCB_UNLOCK((*stcb));
2958 
2959 #if defined(__FreeBSD__)
2960 			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2961 			    0);
2962 #else
2963 			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2964 #endif
2965 			SCTP_TCB_LOCK((*stcb));
2966 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2967 
2968 
2969 			/* now we must check to see if we were aborted while
2970 			 * the move was going on and the lock/unlock happened.
2971 			 */
2972 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2973 				/* yep it was, we leave the
2974 				 * assoc attached to the socket since
2975 				 * the sctp_inpcb_free() call will send
2976 				 * an abort for us.
2977 				 */
2978 				SCTP_INP_DECR_REF(inp);
2979 				return (NULL);
2980 			}
2981 			SCTP_INP_DECR_REF(inp);
2982 			/* Switch over to the new guy */
2983 			*inp_p = inp;
2984 			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2985 			if (send_int_conf) {
2986 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2987 				                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2988 			}
2989 
2990 			/* Pull it from the incomplete queue and wake the guy */
2991 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2992 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2993 			SCTP_TCB_UNLOCK((*stcb));
2994 			SCTP_SOCKET_LOCK(so, 1);
2995 #endif
2996 			soisconnected(so);
2997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2998 			SCTP_TCB_LOCK((*stcb));
2999 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3000 			SCTP_SOCKET_UNLOCK(so, 1);
3001 #endif
3002 			return (m);
3003 		}
3004 	}
3005 	if (notification) {
3006 		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3007 	}
3008 	if (send_int_conf) {
3009 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3010 		                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3011 	}
3012 	return (m);
3013 }
3014 
3015 static void
3016 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
3017     struct sctp_tcb *stcb, struct sctp_nets *net)
3018 {
3019 	/* cp must not be used, others call this without a c-ack :-) */
3020 	struct sctp_association *asoc;
3021 
3022 	SCTPDBG(SCTP_DEBUG_INPUT2,
3023 		"sctp_handle_cookie_ack: handling COOKIE-ACK\n");
3024 	if ((stcb == NULL) || (net == NULL)) {
3025 		return;
3026 	}
3027 
3028 	asoc = &stcb->asoc;
3029 
3030 	sctp_stop_all_cookie_timers(stcb);
3031 	/* process according to association state */
3032 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
3033 		/* state change only needed when I am in right state */
3034 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
3035 		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
3036 		sctp_start_net_timers(stcb);
3037 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
3038 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3039 					 stcb->sctp_ep, stcb, asoc->primary_destination);
3040 
3041 		}
3042 		/* update RTO */
3043 		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
3044 		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
3045 		if (asoc->overall_error_count == 0) {
3046 			net->RTO = sctp_calculate_rto(stcb, asoc, net,
3047 					             &asoc->time_entered, sctp_align_safe_nocopy,
3048 						      SCTP_RTT_FROM_NON_DATA);
3049 		}
3050 		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
3051 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3052 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3053 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3054 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3055 			struct socket *so;
3056 
3057 #endif
3058 			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3059 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3060 			so = SCTP_INP_SO(stcb->sctp_ep);
3061 			atomic_add_int(&stcb->asoc.refcnt, 1);
3062 			SCTP_TCB_UNLOCK(stcb);
3063 			SCTP_SOCKET_LOCK(so, 1);
3064 			SCTP_TCB_LOCK(stcb);
3065 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3066 #endif
3067 			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
3068 				soisconnected(stcb->sctp_socket);
3069 			}
3070 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3071 			SCTP_SOCKET_UNLOCK(so, 1);
3072 #endif
3073 		}
3074 		/*
3075 		 * since we did not send a HB make sure we don't double
3076 		 * things
3077 		 */
3078 		net->hb_responded = 1;
3079 
3080 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3081 			/* We don't need to do the asconf thing,
3082 			 * nor hb or autoclose if the socket is closed.
3083 			 */
3084 			goto closed_socket;
3085 		}
3086 
3087 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
3088 		    stcb, net);
3089 
3090 
3091 		if (stcb->asoc.sctp_autoclose_ticks &&
3092 		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
3093 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
3094 			    stcb->sctp_ep, stcb, NULL);
3095 		}
3096 		/*
3097 		 * send ASCONF if parameters are pending and ASCONFs are
3098 		 * allowed (eg. addresses changed when init/cookie echo were
3099 		 * in flight)
3100 		 */
3101 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
3102 		    (stcb->asoc.asconf_supported == 1) &&
3103 		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
3104 #ifdef SCTP_TIMER_BASED_ASCONF
3105 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
3106 					 stcb->sctp_ep, stcb,
3107 					 stcb->asoc.primary_destination);
3108 #else
3109 			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3110 					 SCTP_ADDR_NOT_LOCKED);
3111 #endif
3112 		}
3113 	}
3114 closed_socket:
3115 	/* Toss the cookie if I can */
3116 	sctp_toss_old_cookies(stcb, asoc);
3117 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3118 		/* Restart the timer if we have pending data */
3119 		struct sctp_tmit_chunk *chk;
3120 
3121 		chk = TAILQ_FIRST(&asoc->sent_queue);
3122 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3123 	}
3124 }
3125 
3126 static void
3127 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3128 		     struct sctp_tcb *stcb)
3129 {
3130 	struct sctp_nets *net;
3131 	struct sctp_tmit_chunk *lchk;
3132 	struct sctp_ecne_chunk bkup;
3133 	uint8_t override_bit;
3134 	uint32_t tsn, window_data_tsn;
3135 	int len;
3136 	unsigned int pkt_cnt;
3137 
3138 	len = ntohs(cp->ch.chunk_length);
3139 	if ((len != sizeof(struct sctp_ecne_chunk)) &&
3140 	    (len != sizeof(struct old_sctp_ecne_chunk))) {
3141 		return;
3142 	}
3143 	if (len == sizeof(struct old_sctp_ecne_chunk)) {
3144 		/* Its the old format */
3145 		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3146 		bkup.num_pkts_since_cwr = htonl(1);
3147 		cp = &bkup;
3148 	}
3149 	SCTP_STAT_INCR(sctps_recvecne);
3150 	tsn = ntohl(cp->tsn);
3151 	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3152 	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3153 	if (lchk == NULL) {
3154 		window_data_tsn = stcb->asoc.sending_seq - 1;
3155 	} else {
3156 		window_data_tsn = lchk->rec.data.TSN_seq;
3157 	}
3158 
3159 	/* Find where it was sent to if possible. */
3160 	net = NULL;
3161 	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3162 		if (lchk->rec.data.TSN_seq == tsn) {
3163 			net = lchk->whoTo;
3164 			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3165 			break;
3166 		}
3167 		if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) {
3168 			break;
3169 		}
3170 	}
3171 	if (net == NULL) {
3172 		/*
3173 		 * What to do. A previous send of a
3174 		 * CWR was possibly lost. See how old it is, we
3175 		 * may have it marked on the actual net.
3176 		 */
3177 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3178 			if (tsn == net->last_cwr_tsn) {
3179 				/* Found him, send it off */
3180 				break;
3181 			}
3182 		}
3183 		if (net == NULL) {
3184 			/*
3185 			 * If we reach here, we need to send a special
3186 			 * CWR that says hey, we did this a long time
3187 			 * ago and you lost the response.
3188 			 */
3189 			net = TAILQ_FIRST(&stcb->asoc.nets);
3190 			if (net == NULL) {
3191 				/* TSNH */
3192 				return;
3193 			}
3194 			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3195 		} else {
3196 			override_bit = 0;
3197 		}
3198 	} else {
3199 		override_bit = 0;
3200 	}
3201 	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3202 	    ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3203 		/* JRS - Use the congestion control given in the pluggable CC module */
3204 		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3205 		/*
3206 		 * We reduce once every RTT. So we will only lower cwnd at
3207 		 * the next sending seq i.e. the window_data_tsn
3208 		 */
3209 		net->cwr_window_tsn = window_data_tsn;
3210 		net->ecn_ce_pkt_cnt += pkt_cnt;
3211 		net->lost_cnt = pkt_cnt;
3212 		net->last_cwr_tsn = tsn;
3213 	} else {
3214 		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3215 		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3216 		    ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3217 			/*
3218 			 * Another loss in the same window update how
3219 			 * many marks/packets lost we have had.
3220 			 */
3221 			int cnt = 1;
3222 			if (pkt_cnt > net->lost_cnt) {
3223 				/* Should be the case */
3224 				cnt = (pkt_cnt - net->lost_cnt);
3225 				net->ecn_ce_pkt_cnt += cnt;
3226 			}
3227 			net->lost_cnt = pkt_cnt;
3228 			net->last_cwr_tsn = tsn;
3229 			/*
3230 			 * Most CC functions will ignore this call, since we are in-window
3231 			 * yet of the initial CE the peer saw.
3232 			 */
3233 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3234 		}
3235 	}
3236 	/*
3237 	 * We always send a CWR this way if our previous one was lost our
3238 	 * peer will get an update, or if it is not time again to reduce we
3239 	 * still get the cwr to the peer. Note we set the override when we
3240 	 * could not find the TSN on the chunk or the destination network.
3241 	 */
3242 	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3243 }
3244 
3245 static void
3246 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3247 {
3248 	/*
3249 	 * Here we get a CWR from the peer. We must look in the outqueue and
3250 	 * make sure that we have a covered ECNE in the control chunk part.
3251 	 * If so remove it.
3252 	 */
3253 	struct sctp_tmit_chunk *chk;
3254 	struct sctp_ecne_chunk *ecne;
3255 	int override;
3256 	uint32_t cwr_tsn;
3257 	cwr_tsn = ntohl(cp->tsn);
3258 
3259 	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3260 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
3261 		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3262 			continue;
3263 		}
3264 		if ((override == 0) && (chk->whoTo != net)) {
3265 			/* Must be from the right src unless override is set */
3266 			continue;
3267 		}
3268 		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3269 		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3270 			/* this covers this ECNE, we can remove it */
3271 			stcb->asoc.ecn_echo_cnt_onq--;
3272 			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3273 			    sctp_next);
3274 			if (chk->data) {
3275 				sctp_m_freem(chk->data);
3276 				chk->data = NULL;
3277 			}
3278 			stcb->asoc.ctrl_queue_cnt--;
3279 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3280 			if (override == 0) {
3281 				break;
3282 			}
3283 		}
3284 	}
3285 }
3286 
3287 static void
3288 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3289     struct sctp_tcb *stcb, struct sctp_nets *net)
3290 {
3291 	struct sctp_association *asoc;
3292 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3293 	struct socket *so;
3294 #endif
3295 
3296 	SCTPDBG(SCTP_DEBUG_INPUT2,
3297 		"sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3298 	if (stcb == NULL)
3299 		return;
3300 
3301 	asoc = &stcb->asoc;
3302 	/* process according to association state */
3303 	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3304 		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3305 		SCTPDBG(SCTP_DEBUG_INPUT2,
3306 			"sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3307 		SCTP_TCB_UNLOCK(stcb);
3308 		return;
3309 	}
3310 	/* notify upper layer protocol */
3311 	if (stcb->sctp_socket) {
3312 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3313 	}
3314 #ifdef INVARIANTS
3315 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
3316 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
3317 	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
3318 		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3319 	}
3320 #endif
3321 	/* stop the timer */
3322 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_22);
3323 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3324 	/* free the TCB */
3325 	SCTPDBG(SCTP_DEBUG_INPUT2,
3326 		"sctp_handle_shutdown_complete: calls free-asoc\n");
3327 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3328 	so = SCTP_INP_SO(stcb->sctp_ep);
3329 	atomic_add_int(&stcb->asoc.refcnt, 1);
3330 	SCTP_TCB_UNLOCK(stcb);
3331 	SCTP_SOCKET_LOCK(so, 1);
3332 	SCTP_TCB_LOCK(stcb);
3333 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3334 #endif
3335 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_23);
3336 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3337 	SCTP_SOCKET_UNLOCK(so, 1);
3338 #endif
3339 	return;
3340 }
3341 
3342 static int
3343 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3344     struct sctp_nets *net, uint8_t flg)
3345 {
3346 	switch (desc->chunk_type) {
3347 	case SCTP_DATA:
3348 		/* find the tsn to resend (possibly */
3349 	{
3350 		uint32_t tsn;
3351 		struct sctp_tmit_chunk *tp1;
3352 
3353 		tsn = ntohl(desc->tsn_ifany);
3354 		TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3355 			if (tp1->rec.data.TSN_seq == tsn) {
3356 				/* found it */
3357 				break;
3358 			}
3359 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) {
3360 				/* not found */
3361 				tp1 = NULL;
3362 				break;
3363 			}
3364 		}
3365 		if (tp1 == NULL) {
3366 			/*
3367 			 * Do it the other way , aka without paying
3368 			 * attention to queue seq order.
3369 			 */
3370 			SCTP_STAT_INCR(sctps_pdrpdnfnd);
3371 			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3372 				if (tp1->rec.data.TSN_seq == tsn) {
3373 					/* found it */
3374 					break;
3375 				}
3376 			}
3377 		}
3378 		if (tp1 == NULL) {
3379 			SCTP_STAT_INCR(sctps_pdrptsnnf);
3380 		}
3381 		if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3382 			uint8_t *ddp;
3383 
3384 			if (((flg & SCTP_BADCRC) == 0) &&
3385 			    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3386 				return (0);
3387 			}
3388 			if ((stcb->asoc.peers_rwnd == 0) &&
3389 			    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3390 				SCTP_STAT_INCR(sctps_pdrpdiwnp);
3391 				return (0);
3392 			}
3393 			if (stcb->asoc.peers_rwnd == 0 &&
3394 			    (flg & SCTP_FROM_MIDDLE_BOX)) {
3395 				SCTP_STAT_INCR(sctps_pdrpdizrw);
3396 				return (0);
3397 			}
3398 			ddp = (uint8_t *) (mtod(tp1->data, caddr_t) +
3399 					   sizeof(struct sctp_data_chunk));
3400 			{
3401 				unsigned int iii;
3402 
3403 				for (iii = 0; iii < sizeof(desc->data_bytes);
3404 				     iii++) {
3405 					if (ddp[iii] != desc->data_bytes[iii]) {
3406 						SCTP_STAT_INCR(sctps_pdrpbadd);
3407 						return (-1);
3408 					}
3409 				}
3410 			}
3411 
3412 			if (tp1->do_rtt) {
3413 				/*
3414 				 * this guy had a RTO calculation
3415 				 * pending on it, cancel it
3416 				 */
3417 				if (tp1->whoTo->rto_needed == 0) {
3418 					tp1->whoTo->rto_needed = 1;
3419 				}
3420 				tp1->do_rtt = 0;
3421 			}
3422 			SCTP_STAT_INCR(sctps_pdrpmark);
3423 			if (tp1->sent != SCTP_DATAGRAM_RESEND)
3424 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3425 			/*
3426 			 * mark it as if we were doing a FR, since
3427 			 * we will be getting gap ack reports behind
3428 			 * the info from the router.
3429 			 */
3430 			tp1->rec.data.doing_fast_retransmit = 1;
3431 			/*
3432 			 * mark the tsn with what sequences can
3433 			 * cause a new FR.
3434 			 */
3435 			if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3436 				tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3437 			} else {
3438 				tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3439 			}
3440 
3441 			/* restart the timer */
3442 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3443 					stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT+SCTP_LOC_24);
3444 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3445 					 stcb, tp1->whoTo);
3446 
3447 			/* fix counts and things */
3448 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3449 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3450 					       tp1->whoTo->flight_size,
3451 					       tp1->book_size,
3452 					       (uintptr_t)stcb,
3453 					       tp1->rec.data.TSN_seq);
3454 			}
3455 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3456 				sctp_flight_size_decrease(tp1);
3457 				sctp_total_flight_decrease(stcb, tp1);
3458 			}
3459 			tp1->sent = SCTP_DATAGRAM_RESEND;
3460 		} {
3461 			/* audit code */
3462 			unsigned int audit;
3463 
3464 			audit = 0;
3465 			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3466 				if (tp1->sent == SCTP_DATAGRAM_RESEND)
3467 					audit++;
3468 			}
3469 			TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3470 				      sctp_next) {
3471 				if (tp1->sent == SCTP_DATAGRAM_RESEND)
3472 					audit++;
3473 			}
3474 			if (audit != stcb->asoc.sent_queue_retran_cnt) {
3475 				SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3476 					    audit, stcb->asoc.sent_queue_retran_cnt);
3477 #ifndef SCTP_AUDITING_ENABLED
3478 				stcb->asoc.sent_queue_retran_cnt = audit;
3479 #endif
3480 			}
3481 		}
3482 	}
3483 	break;
3484 	case SCTP_ASCONF:
3485 	{
3486 		struct sctp_tmit_chunk *asconf;
3487 
3488 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3489 			      sctp_next) {
3490 			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3491 				break;
3492 			}
3493 		}
3494 		if (asconf) {
3495 			if (asconf->sent != SCTP_DATAGRAM_RESEND)
3496 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3497 			asconf->sent = SCTP_DATAGRAM_RESEND;
3498 			asconf->snd_count--;
3499 		}
3500 	}
3501 	break;
3502 	case SCTP_INITIATION:
3503 		/* resend the INIT */
3504 		stcb->asoc.dropped_special_cnt++;
3505 		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3506 			/*
3507 			 * If we can get it in, in a few attempts we do
3508 			 * this, otherwise we let the timer fire.
3509 			 */
3510 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3511 					stcb, net, SCTP_FROM_SCTP_INPUT+SCTP_LOC_25);
3512 			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3513 		}
3514 		break;
3515 	case SCTP_SELECTIVE_ACK:
3516 	case SCTP_NR_SELECTIVE_ACK:
3517 		/* resend the sack */
3518 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3519 		break;
3520 	case SCTP_HEARTBEAT_REQUEST:
3521 		/* resend a demand HB */
3522 		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3523 			/* Only retransmit if we KNOW we wont destroy the tcb */
3524 			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3525 		}
3526 		break;
3527 	case SCTP_SHUTDOWN:
3528 		sctp_send_shutdown(stcb, net);
3529 		break;
3530 	case SCTP_SHUTDOWN_ACK:
3531 		sctp_send_shutdown_ack(stcb, net);
3532 		break;
3533 	case SCTP_COOKIE_ECHO:
3534 	{
3535 		struct sctp_tmit_chunk *cookie;
3536 
3537 		cookie = NULL;
3538 		TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3539 			      sctp_next) {
3540 			if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3541 				break;
3542 			}
3543 		}
3544 		if (cookie) {
3545 			if (cookie->sent != SCTP_DATAGRAM_RESEND)
3546 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3547 			cookie->sent = SCTP_DATAGRAM_RESEND;
3548 			sctp_stop_all_cookie_timers(stcb);
3549 		}
3550 	}
3551 	break;
3552 	case SCTP_COOKIE_ACK:
3553 		sctp_send_cookie_ack(stcb);
3554 		break;
3555 	case SCTP_ASCONF_ACK:
3556 		/* resend last asconf ack */
3557 		sctp_send_asconf_ack(stcb);
3558 		break;
3559 	case SCTP_FORWARD_CUM_TSN:
3560 		send_forward_tsn(stcb, &stcb->asoc);
3561 		break;
3562 		/* can't do anything with these */
3563 	case SCTP_PACKET_DROPPED:
3564 	case SCTP_INITIATION_ACK:	/* this should not happen */
3565 	case SCTP_HEARTBEAT_ACK:
3566 	case SCTP_ABORT_ASSOCIATION:
3567 	case SCTP_OPERATION_ERROR:
3568 	case SCTP_SHUTDOWN_COMPLETE:
3569 	case SCTP_ECN_ECHO:
3570 	case SCTP_ECN_CWR:
3571 	default:
3572 		break;
3573 	}
3574 	return (0);
3575 }
3576 
3577 void
3578 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3579 {
3580 	uint32_t i;
3581 	uint16_t temp;
3582 
3583 	/*
3584 	 * We set things to 0xffff since this is the last delivered sequence
3585 	 * and we will be sending in 0 after the reset.
3586 	 */
3587 
3588 	if (number_entries) {
3589 		for (i = 0; i < number_entries; i++) {
3590 			temp = ntohs(list[i]);
3591 			if (temp >= stcb->asoc.streamincnt) {
3592 				continue;
3593 			}
3594 			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3595 		}
3596 	} else {
3597 		list = NULL;
3598 		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3599 			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3600 		}
3601 	}
3602 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3603 }
3604 
3605 static void
3606 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3607 {
3608 	uint32_t i;
3609 	uint16_t temp;
3610 
3611 	if (number_entries > 0) {
3612 		for (i = 0; i < number_entries; i++) {
3613 			temp = ntohs(list[i]);
3614 			if (temp >= stcb->asoc.streamoutcnt) {
3615 				/* no such stream */
3616 				continue;
3617 			}
3618 			stcb->asoc.strmout[temp].next_sequence_send = 0;
3619 		}
3620 	} else {
3621 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3622 			stcb->asoc.strmout[i].next_sequence_send = 0;
3623 		}
3624 	}
3625 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3626 }
3627 
3628 
3629 struct sctp_stream_reset_out_request *
3630 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3631 {
3632 	struct sctp_association *asoc;
3633 	struct sctp_chunkhdr *ch;
3634 	struct sctp_stream_reset_out_request *r;
3635 	struct sctp_tmit_chunk *chk;
3636 	int len, clen;
3637 
3638 	asoc = &stcb->asoc;
3639 	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3640 		asoc->stream_reset_outstanding = 0;
3641 		return (NULL);
3642 	}
3643 	if (stcb->asoc.str_reset == NULL) {
3644 		asoc->stream_reset_outstanding = 0;
3645 		return (NULL);
3646 	}
3647 	chk = stcb->asoc.str_reset;
3648 	if (chk->data == NULL) {
3649 		return (NULL);
3650 	}
3651 	if (bchk) {
3652 		/* he wants a copy of the chk pointer */
3653 		*bchk = chk;
3654 	}
3655 	clen = chk->send_size;
3656 	ch = mtod(chk->data, struct sctp_chunkhdr *);
3657 	r = (struct sctp_stream_reset_out_request *)(ch + 1);
3658 	if (ntohl(r->request_seq) == seq) {
3659 		/* found it */
3660 		return (r);
3661 	}
3662 	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3663 	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3664 		/* move to the next one, there can only be a max of two */
3665 		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3666 		if (ntohl(r->request_seq) == seq) {
3667 			return (r);
3668 		}
3669 	}
3670 	/* that seq is not here */
3671 	return (NULL);
3672 }
3673 
3674 static void
3675 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3676 {
3677 	struct sctp_association *asoc;
3678 	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3679 
3680 	if (stcb->asoc.str_reset == NULL) {
3681 		return;
3682 	}
3683 	asoc = &stcb->asoc;
3684 
3685 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT+SCTP_LOC_26);
3686 	TAILQ_REMOVE(&asoc->control_send_queue,
3687 	    chk,
3688 	    sctp_next);
3689 	if (chk->data) {
3690 		sctp_m_freem(chk->data);
3691 		chk->data = NULL;
3692 	}
3693 	asoc->ctrl_queue_cnt--;
3694 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3695         /*sa_ignore NO_NULL_CHK*/
3696 	stcb->asoc.str_reset = NULL;
3697 }
3698 
3699 
3700 static int
3701 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3702 				  uint32_t seq, uint32_t action,
3703 				  struct sctp_stream_reset_response *respin)
3704 {
3705 	uint16_t type;
3706 	int lparm_len;
3707 	struct sctp_association *asoc = &stcb->asoc;
3708 	struct sctp_tmit_chunk *chk;
3709 	struct sctp_stream_reset_out_request *srparam;
3710 	uint32_t number_entries;
3711 
3712 	if (asoc->stream_reset_outstanding == 0) {
3713 		/* duplicate */
3714 		return (0);
3715 	}
3716 	if (seq == stcb->asoc.str_reset_seq_out) {
3717 		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3718 		if (srparam) {
3719 			stcb->asoc.str_reset_seq_out++;
3720 			type = ntohs(srparam->ph.param_type);
3721 			lparm_len = ntohs(srparam->ph.param_length);
3722 			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3723 				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3724 				asoc->stream_reset_out_is_outstanding = 0;
3725 				if (asoc->stream_reset_outstanding)
3726 					asoc->stream_reset_outstanding--;
3727 				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3728 					/* do it */
3729 					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3730 				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3731 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3732 				} else {
3733 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3734 				}
3735 			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3736 				/* Answered my request */
3737 				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3738 				if (asoc->stream_reset_outstanding)
3739 					asoc->stream_reset_outstanding--;
3740 				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3741 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3742 							number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3743 				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3744 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3745 							number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3746 				}
3747 			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3748 				/* Ok we now may have more streams */
3749 				int num_stream;
3750 
3751 				num_stream = stcb->asoc.strm_pending_add_size;
3752 				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3753 					/* TSNH */
3754 					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3755 				}
3756 				stcb->asoc.strm_pending_add_size = 0;
3757 				if (asoc->stream_reset_outstanding)
3758 					asoc->stream_reset_outstanding--;
3759 				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3760 					/* Put the new streams into effect */
3761 					stcb->asoc.streamoutcnt += num_stream;
3762 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3763 				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3764 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3765 								     SCTP_STREAM_CHANGE_DENIED);
3766 				} else {
3767 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3768 								     SCTP_STREAM_CHANGE_FAILED);
3769 				}
3770 			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3771 				if (asoc->stream_reset_outstanding)
3772 					asoc->stream_reset_outstanding--;
3773 				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3774 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3775 								     SCTP_STREAM_CHANGE_DENIED);
3776 				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3777 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3778 								     SCTP_STREAM_CHANGE_FAILED);
3779 				}
3780 			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3781 				/**
3782 				 * a) Adopt the new in tsn.
3783 				 * b) reset the map
3784 				 * c) Adopt the new out-tsn
3785 				 */
3786 				struct sctp_stream_reset_response_tsn *resp;
3787 				struct sctp_forward_tsn_chunk fwdtsn;
3788 				int abort_flag = 0;
3789 				if (respin == NULL) {
3790 					/* huh ? */
3791 					return (0);
3792 				}
3793 				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3794 					resp = (struct sctp_stream_reset_response_tsn *)respin;
3795 					asoc->stream_reset_outstanding--;
3796 					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3797 					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3798 					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3799 					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3800 					if (abort_flag) {
3801 						return (1);
3802 					}
3803 					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3804 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3805 						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3806 					}
3807 
3808 					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3809 					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3810 					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3811 
3812 					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3813 					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3814 
3815 					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3816 					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3817 
3818 					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3819 					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3820 					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3821 				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3822 					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3823 								     SCTP_ASSOC_RESET_DENIED);
3824 				} else {
3825 					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3826 								     SCTP_ASSOC_RESET_FAILED);
3827 				}
3828 			}
3829 			/* get rid of the request and get the request flags */
3830 			if (asoc->stream_reset_outstanding == 0) {
3831 				sctp_clean_up_stream_reset(stcb);
3832 			}
3833 		}
3834 	}
3835 	return (0);
3836 }
3837 
3838 static void
3839 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3840     struct sctp_tmit_chunk *chk,
3841     struct sctp_stream_reset_in_request *req, int trunc)
3842 {
3843 	uint32_t seq;
3844 	int len, i;
3845 	int number_entries;
3846 	uint16_t temp;
3847 
3848 	/*
3849 	 * peer wants me to send a str-reset to him for my outgoing seq's if
3850 	 * seq_in is right.
3851 	 */
3852 	struct sctp_association *asoc = &stcb->asoc;
3853 
3854 	seq = ntohl(req->request_seq);
3855 	if (asoc->str_reset_seq_in == seq) {
3856 		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3857 		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3858 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3859 		} else if (trunc) {
3860 			/* Can't do it, since they exceeded our buffer size  */
3861 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3862 		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3863 			len = ntohs(req->ph.param_length);
3864 			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3865 			for (i = 0; i < number_entries; i++) {
3866 				temp = ntohs(req->list_of_streams[i]);
3867 				req->list_of_streams[i] = temp;
3868 			}
3869 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3870 			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3871 			    asoc->str_reset_seq_out,
3872 			    seq, (asoc->sending_seq - 1));
3873 			asoc->stream_reset_out_is_outstanding = 1;
3874 			asoc->str_reset = chk;
3875 			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3876 			stcb->asoc.stream_reset_outstanding++;
3877 		} else {
3878 			/* Can't do it, since we have sent one out */
3879 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
3880 		}
3881 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3882 		asoc->str_reset_seq_in++;
3883 	} else if (asoc->str_reset_seq_in - 1 == seq) {
3884 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3885 	} else if (asoc->str_reset_seq_in - 2 == seq) {
3886 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3887 	} else {
3888 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3889 	}
3890 }
3891 
3892 static int
3893 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3894     struct sctp_tmit_chunk *chk,
3895     struct sctp_stream_reset_tsn_request *req)
3896 {
3897 	/* reset all in and out and update the tsn */
3898 	/*
3899 	 * A) reset my str-seq's on in and out. B) Select a receive next,
3900 	 * and set cum-ack to it. Also process this selected number as a
3901 	 * fwd-tsn as well. C) set in the response my next sending seq.
3902 	 */
3903 	struct sctp_forward_tsn_chunk fwdtsn;
3904 	struct sctp_association *asoc = &stcb->asoc;
3905 	int abort_flag = 0;
3906 	uint32_t seq;
3907 
3908 	seq = ntohl(req->request_seq);
3909 	if (asoc->str_reset_seq_in == seq) {
3910 		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
3911 		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
3912 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3913 		} else {
3914 			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3915 			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3916 			fwdtsn.ch.chunk_flags = 0;
3917 			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3918 			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3919 			if (abort_flag) {
3920 				return (1);
3921 			}
3922 			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3923 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3924 				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3925 			}
3926 			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
3927 			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
3928 			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
3929 			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
3930 			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
3931 			atomic_add_int(&asoc->sending_seq, 1);
3932 			/* save off historical data for retrans */
3933 			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
3934 			asoc->last_sending_seq[0] = asoc->sending_seq;
3935 			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
3936 			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
3937 			sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3938 			sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3939 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3940 			sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
3941 		}
3942 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3943 		                                 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3944 		asoc->str_reset_seq_in++;
3945 	} else if (asoc->str_reset_seq_in - 1 == seq) {
3946 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3947 		                                 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3948 	} else if (asoc->str_reset_seq_in - 2 == seq) {
3949 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3950 		                                 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
3951 	} else {
3952 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3953 	}
3954 	return (0);
3955 }
3956 
3957 static void
3958 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3959     struct sctp_tmit_chunk *chk,
3960     struct sctp_stream_reset_out_request *req, int trunc)
3961 {
3962 	uint32_t seq, tsn;
3963 	int number_entries, len;
3964 	struct sctp_association *asoc = &stcb->asoc;
3965 
3966 	seq = ntohl(req->request_seq);
3967 
3968 	/* now if its not a duplicate we process it */
3969 	if (asoc->str_reset_seq_in == seq) {
3970 		len = ntohs(req->ph.param_length);
3971 		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3972 		/*
3973 		 * the sender is resetting, handle the list issue.. we must
3974 		 * a) verify if we can do the reset, if so no problem b) If
3975 		 * we can't do the reset we must copy the request. c) queue
3976 		 * it, and setup the data in processor to trigger it off
3977 		 * when needed and dequeue all the queued data.
3978 		 */
3979 		tsn = ntohl(req->send_reset_at_tsn);
3980 
3981 		/* move the reset action back one */
3982 		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3983 		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3984 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3985 		} else if (trunc) {
3986 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3987 		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
3988 			/* we can do it now */
3989 			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3990 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3991 		} else {
3992 			/*
3993 			 * we must queue it up and thus wait for the TSN's
3994 			 * to arrive that are at or before tsn
3995 			 */
3996 			struct sctp_stream_reset_list *liste;
3997 			int siz;
3998 
3999 			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
4000 			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
4001 				    siz, SCTP_M_STRESET);
4002 			if (liste == NULL) {
4003 				/* gak out of memory */
4004 				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4005 				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4006 				return;
4007 			}
4008 			liste->tsn = tsn;
4009 			liste->number_entries = number_entries;
4010 			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
4011 			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
4012 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4013 		}
4014 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4015 		asoc->str_reset_seq_in++;
4016 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4017 		/*
4018 		 * one seq back, just echo back last action since my
4019 		 * response was lost.
4020 		 */
4021 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4022 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4023 		/*
4024 		 * two seq back, just echo back last action since my
4025 		 * response was lost.
4026 		 */
4027 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4028 	} else {
4029 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4030 	}
4031 }
4032 
4033 static void
4034 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4035 			       struct sctp_stream_reset_add_strm  *str_add)
4036 {
4037 	/*
4038 	 * Peer is requesting to add more streams.
4039 	 * If its within our max-streams we will
4040 	 * allow it.
4041 	 */
4042 	uint32_t num_stream, i;
4043 	uint32_t seq;
4044 	struct sctp_association *asoc = &stcb->asoc;
4045 	struct sctp_queued_to_read *ctl, *nctl;
4046 
4047 	/* Get the number. */
4048 	seq = ntohl(str_add->request_seq);
4049 	num_stream = ntohs(str_add->number_of_streams);
4050 	/* Now what would be the new total? */
4051 	if (asoc->str_reset_seq_in == seq) {
4052 		num_stream += stcb->asoc.streamincnt;
4053 		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4054 		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4055 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4056 		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
4057 		           (num_stream > 0xffff)) {
4058 			/* We must reject it they ask for to many */
4059   denied:
4060 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4061 		} else {
4062 			/* Ok, we can do that :-) */
4063 			struct sctp_stream_in *oldstrm;
4064 
4065 			/* save off the old */
4066 			oldstrm = stcb->asoc.strmin;
4067 			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
4068 			            (num_stream * sizeof(struct sctp_stream_in)),
4069 			            SCTP_M_STRMI);
4070 			if (stcb->asoc.strmin == NULL) {
4071 				stcb->asoc.strmin = oldstrm;
4072 				goto denied;
4073 			}
4074 			/* copy off the old data */
4075 			for (i = 0; i < stcb->asoc.streamincnt; i++) {
4076 				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4077 				stcb->asoc.strmin[i].stream_no = i;
4078 				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
4079 				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
4080 				/* now anything on those queues? */
4081 				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) {
4082 					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
4083 					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
4084 				}
4085 			}
4086 			/* Init the new streams */
4087 			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
4088 				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4089 				stcb->asoc.strmin[i].stream_no = i;
4090 				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
4091 				stcb->asoc.strmin[i].delivery_started = 0;
4092 			}
4093 			SCTP_FREE(oldstrm, SCTP_M_STRMI);
4094 			/* update the size */
4095 			stcb->asoc.streamincnt = num_stream;
4096 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4097 			sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4098 		}
4099 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4100 		asoc->str_reset_seq_in++;
4101 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4102 		/*
4103 		 * one seq back, just echo back last action since my
4104 		 * response was lost.
4105 		 */
4106 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4107 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4108 		/*
4109 		 * two seq back, just echo back last action since my
4110 		 * response was lost.
4111 		 */
4112 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4113 	} else {
4114 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4115 
4116 	}
4117 }
4118 
4119 static void
4120 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4121 				   struct sctp_stream_reset_add_strm  *str_add)
4122 {
4123 	/*
4124 	 * Peer is requesting to add more streams.
4125 	 * If its within our max-streams we will
4126 	 * allow it.
4127 	 */
4128 	uint16_t num_stream;
4129 	uint32_t seq;
4130 	struct sctp_association *asoc = &stcb->asoc;
4131 
4132 	/* Get the number. */
4133 	seq = ntohl(str_add->request_seq);
4134 	num_stream = ntohs(str_add->number_of_streams);
4135 	/* Now what would be the new total? */
4136 	if (asoc->str_reset_seq_in == seq) {
4137 		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4138 		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4139 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4140 		} else if (stcb->asoc.stream_reset_outstanding) {
4141 			/* We must reject it we have something pending */
4142 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4143 		} else {
4144 			/* Ok, we can do that :-) */
4145 			int mychk;
4146 			mychk = stcb->asoc.streamoutcnt;
4147 			mychk += num_stream;
4148 			if (mychk < 0x10000) {
4149 				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4150 				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 0, 1, num_stream, 0, 1)) {
4151 					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4152 				}
4153 			} else {
4154 				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4155 			}
4156 		}
4157 		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4158 		asoc->str_reset_seq_in++;
4159 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4160 		/*
4161 		 * one seq back, just echo back last action since my
4162 		 * response was lost.
4163 		 */
4164 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4165 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4166 		/*
4167 		 * two seq back, just echo back last action since my
4168 		 * response was lost.
4169 		 */
4170 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4171 	} else {
4172 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4173 	}
4174 }
4175 
4176 #if !defined(__Panda__)
4177 #ifdef __GNUC__
4178 __attribute__ ((noinline))
4179 #endif
4180 #endif
4181 static int
4182 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4183 			 struct sctp_chunkhdr *ch_req)
4184 {
4185 	int chk_length, param_len, ptype;
4186 	struct sctp_paramhdr pstore;
4187 	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4188 	uint32_t seq = 0;
4189 	int num_req = 0;
4190 	int trunc = 0;
4191 	struct sctp_tmit_chunk *chk;
4192 	struct sctp_chunkhdr *ch;
4193 	struct sctp_paramhdr *ph;
4194 	int ret_code = 0;
4195 	int num_param = 0;
4196 
4197 	/* now it may be a reset or a reset-response */
4198 	chk_length = ntohs(ch_req->chunk_length);
4199 
4200 	/* setup for adding the response */
4201 	sctp_alloc_a_chunk(stcb, chk);
4202 	if (chk == NULL) {
4203 		return (ret_code);
4204 	}
4205 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4206 	chk->rec.chunk_id.can_take_data = 0;
4207 	chk->asoc = &stcb->asoc;
4208 	chk->no_fr_allowed = 0;
4209 	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4210 	chk->book_size_scale = 0;
4211 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4212 	if (chk->data == NULL) {
4213 	strres_nochunk:
4214 		if (chk->data) {
4215 			sctp_m_freem(chk->data);
4216 			chk->data = NULL;
4217 		}
4218 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4219 		return (ret_code);
4220 	}
4221 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4222 
4223 	/* setup chunk parameters */
4224 	chk->sent = SCTP_DATAGRAM_UNSENT;
4225 	chk->snd_count = 0;
4226 	chk->whoTo = NULL;
4227 
4228 	ch = mtod(chk->data, struct sctp_chunkhdr *);
4229 	ch->chunk_type = SCTP_STREAM_RESET;
4230 	ch->chunk_flags = 0;
4231 	ch->chunk_length = htons(chk->send_size);
4232 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4233 	offset += sizeof(struct sctp_chunkhdr);
4234 	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
4235 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4236 		if (ph == NULL)
4237 			break;
4238 		param_len = ntohs(ph->param_length);
4239 		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
4240 			/* bad param */
4241 			break;
4242 		}
4243 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
4244 							   (uint8_t *)&cstore);
4245 		ptype = ntohs(ph->param_type);
4246 		num_param++;
4247 		if (param_len > (int)sizeof(cstore)) {
4248 			trunc = 1;
4249 		} else {
4250 			trunc = 0;
4251 		}
4252 		if (num_param > SCTP_MAX_RESET_PARAMS) {
4253 			/* hit the max of parameters already sorry.. */
4254 			break;
4255 		}
4256 		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4257 			struct sctp_stream_reset_out_request *req_out;
4258 			req_out = (struct sctp_stream_reset_out_request *)ph;
4259 			num_req++;
4260 			if (stcb->asoc.stream_reset_outstanding) {
4261 				seq = ntohl(req_out->response_seq);
4262 				if (seq == stcb->asoc.str_reset_seq_out) {
4263 					/* implicit ack */
4264 					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4265 				}
4266 			}
4267 			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4268 		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4269 			struct sctp_stream_reset_add_strm  *str_add;
4270 			str_add = (struct sctp_stream_reset_add_strm  *)ph;
4271 			num_req++;
4272 			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4273 		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4274 			struct sctp_stream_reset_add_strm  *str_add;
4275 			str_add = (struct sctp_stream_reset_add_strm  *)ph;
4276 			num_req++;
4277 			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4278 		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4279 			struct sctp_stream_reset_in_request *req_in;
4280 			num_req++;
4281 			req_in = (struct sctp_stream_reset_in_request *)ph;
4282 			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4283 		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4284 			struct sctp_stream_reset_tsn_request *req_tsn;
4285 			num_req++;
4286 			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4287 			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4288 				ret_code = 1;
4289 				goto strres_nochunk;
4290 			}
4291 			/* no more */
4292 			break;
4293 		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4294 			struct sctp_stream_reset_response *resp;
4295 			uint32_t result;
4296 			resp = (struct sctp_stream_reset_response *)ph;
4297 			seq = ntohl(resp->response_seq);
4298 			result = ntohl(resp->result);
4299 			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4300 				ret_code = 1;
4301 				goto strres_nochunk;
4302 			}
4303 		} else {
4304 			break;
4305 		}
4306 		offset += SCTP_SIZE32(param_len);
4307 		chk_length -= SCTP_SIZE32(param_len);
4308 	}
4309 	if (num_req == 0) {
4310 		/* we have no response free the stuff */
4311 		goto strres_nochunk;
4312 	}
4313 	/* ok we have a chunk to link in */
4314 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4315 			  chk,
4316 			  sctp_next);
4317 	stcb->asoc.ctrl_queue_cnt++;
4318 	return (ret_code);
4319 }
4320 
4321 /*
4322  * Handle a router or endpoints report of a packet loss, there are two ways
4323  * to handle this, either we get the whole packet and must disect it
4324  * ourselves (possibly with truncation and or corruption) or it is a summary
4325  * from a middle box that did the disectting for us.
4326  */
4327 static void
4328 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4329     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4330 {
4331 	uint32_t bottle_bw, on_queue;
4332 	uint16_t trunc_len;
4333 	unsigned int chlen;
4334 	unsigned int at;
4335 	struct sctp_chunk_desc desc;
4336 	struct sctp_chunkhdr *ch;
4337 
4338 	chlen = ntohs(cp->ch.chunk_length);
4339 	chlen -= sizeof(struct sctp_pktdrop_chunk);
4340 	/* XXX possible chlen underflow */
4341 	if (chlen == 0) {
4342 		ch = NULL;
4343 		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4344 			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4345 	} else {
4346 		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4347 		chlen -= sizeof(struct sctphdr);
4348 		/* XXX possible chlen underflow */
4349 		memset(&desc, 0, sizeof(desc));
4350 	}
4351 	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4352 	if (trunc_len > limit) {
4353 		trunc_len = limit;
4354 	}
4355 
4356 	/* now the chunks themselves */
4357 	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4358 		desc.chunk_type = ch->chunk_type;
4359 		/* get amount we need to move */
4360 		at = ntohs(ch->chunk_length);
4361 		if (at < sizeof(struct sctp_chunkhdr)) {
4362 			/* corrupt chunk, maybe at the end? */
4363 			SCTP_STAT_INCR(sctps_pdrpcrupt);
4364 			break;
4365 		}
4366 		if (trunc_len == 0) {
4367 			/* we are supposed to have all of it */
4368 			if (at > chlen) {
4369 				/* corrupt skip it */
4370 				SCTP_STAT_INCR(sctps_pdrpcrupt);
4371 				break;
4372 			}
4373 		} else {
4374 			/* is there enough of it left ? */
4375 			if (desc.chunk_type == SCTP_DATA) {
4376 				if (chlen < (sizeof(struct sctp_data_chunk) +
4377 				    sizeof(desc.data_bytes))) {
4378 					break;
4379 				}
4380 			} else {
4381 				if (chlen < sizeof(struct sctp_chunkhdr)) {
4382 					break;
4383 				}
4384 			}
4385 		}
4386 		if (desc.chunk_type == SCTP_DATA) {
4387 			/* can we get out the tsn? */
4388 			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4389 				SCTP_STAT_INCR(sctps_pdrpmbda);
4390 
4391 			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4392 				/* yep */
4393 				struct sctp_data_chunk *dcp;
4394 				uint8_t *ddp;
4395 				unsigned int iii;
4396 
4397 				dcp = (struct sctp_data_chunk *)ch;
4398 				ddp = (uint8_t *) (dcp + 1);
4399 				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4400 					desc.data_bytes[iii] = ddp[iii];
4401 				}
4402 				desc.tsn_ifany = dcp->dp.tsn;
4403 			} else {
4404 				/* nope we are done. */
4405 				SCTP_STAT_INCR(sctps_pdrpnedat);
4406 				break;
4407 			}
4408 		} else {
4409 			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4410 				SCTP_STAT_INCR(sctps_pdrpmbct);
4411 		}
4412 
4413 		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4414 			SCTP_STAT_INCR(sctps_pdrppdbrk);
4415 			break;
4416 		}
4417 		if (SCTP_SIZE32(at) > chlen) {
4418 			break;
4419 		}
4420 		chlen -= SCTP_SIZE32(at);
4421 		if (chlen < sizeof(struct sctp_chunkhdr)) {
4422 			/* done, none left */
4423 			break;
4424 		}
4425 		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4426 	}
4427 	/* Now update any rwnd --- possibly */
4428 	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4429 		/* From a peer, we get a rwnd report */
4430 		uint32_t a_rwnd;
4431 
4432 		SCTP_STAT_INCR(sctps_pdrpfehos);
4433 
4434 		bottle_bw = ntohl(cp->bottle_bw);
4435 		on_queue = ntohl(cp->current_onq);
4436 		if (bottle_bw && on_queue) {
4437 			/* a rwnd report is in here */
4438 			if (bottle_bw > on_queue)
4439 				a_rwnd = bottle_bw - on_queue;
4440 			else
4441 				a_rwnd = 0;
4442 
4443 			if (a_rwnd == 0)
4444 				stcb->asoc.peers_rwnd = 0;
4445 			else {
4446 				if (a_rwnd > stcb->asoc.total_flight) {
4447 					stcb->asoc.peers_rwnd =
4448 					    a_rwnd - stcb->asoc.total_flight;
4449 				} else {
4450 					stcb->asoc.peers_rwnd = 0;
4451 				}
4452 				if (stcb->asoc.peers_rwnd <
4453 				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4454 					/* SWS sender side engages */
4455 					stcb->asoc.peers_rwnd = 0;
4456 				}
4457 			}
4458 		}
4459 	} else {
4460 		SCTP_STAT_INCR(sctps_pdrpfmbox);
4461 	}
4462 
4463 	/* now middle boxes in sat networks get a cwnd bump */
4464 	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4465 	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4466 	    (stcb->asoc.sat_network)) {
4467 		/*
4468 		 * This is debateable but for sat networks it makes sense
4469 		 * Note if a T3 timer has went off, we will prohibit any
4470 		 * changes to cwnd until we exit the t3 loss recovery.
4471 		 */
4472 		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4473 			net, cp, &bottle_bw, &on_queue);
4474 	}
4475 }
4476 
4477 /*
4478  * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4479  * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4480  * offset: offset into the mbuf chain to first chunkhdr - length: is the
4481  * length of the complete packet outputs: - length: modified to remaining
4482  * length after control processing - netp: modified to new sctp_nets after
4483  * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4484  * bad packet,...) otherwise return the tcb for this packet
4485  */
4486 #if !defined(__Panda__)
4487 #ifdef __GNUC__
4488 __attribute__ ((noinline))
4489 #endif
4490 #endif
4491 static struct sctp_tcb *
4492 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4493     struct sockaddr *src, struct sockaddr *dst,
4494     struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4495     struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4496 #if defined(__FreeBSD__)
4497     uint8_t use_mflowid, uint32_t mflowid,
4498 #endif
4499     uint32_t vrf_id, uint16_t port)
4500 {
4501 	struct sctp_association *asoc;
4502 	struct mbuf *op_err;
4503 	char msg[SCTP_DIAG_INFO_LEN];
4504 	uint32_t vtag_in;
4505 	int num_chunks = 0;	/* number of control chunks processed */
4506 	uint32_t chk_length;
4507 	int ret;
4508 	int abort_no_unlock = 0;
4509 	int ecne_seen = 0;
4510 	/*
4511 	 * How big should this be, and should it be alloc'd? Lets try the
4512 	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4513 	 * until we get into jumbo grams and such..
4514 	 */
4515 	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4516 	struct sctp_tcb *locked_tcb = stcb;
4517 	int got_auth = 0;
4518 	uint32_t auth_offset = 0, auth_len = 0;
4519 	int auth_skipped = 0;
4520 	int asconf_cnt = 0;
4521 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4522 	struct socket *so;
4523 #endif
4524 
4525 	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4526 		iphlen, *offset, length, (void *)stcb);
4527 
4528 	/* validate chunk header length... */
4529 	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4530 		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4531 			ntohs(ch->chunk_length));
4532 		if (locked_tcb) {
4533 			SCTP_TCB_UNLOCK(locked_tcb);
4534 		}
4535 		return (NULL);
4536 	}
4537 	/*
4538 	 * validate the verification tag
4539 	 */
4540 	vtag_in = ntohl(sh->v_tag);
4541 
4542 	if (locked_tcb) {
4543 		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4544 	}
4545 	if (ch->chunk_type == SCTP_INITIATION) {
4546 		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4547 			ntohs(ch->chunk_length), vtag_in);
4548 		if (vtag_in != 0) {
4549 			/* protocol error- silently discard... */
4550 			SCTP_STAT_INCR(sctps_badvtag);
4551 			if (locked_tcb) {
4552 				SCTP_TCB_UNLOCK(locked_tcb);
4553 			}
4554 			return (NULL);
4555 		}
4556 	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4557 		/*
4558 		 * If there is no stcb, skip the AUTH chunk and process
4559 		 * later after a stcb is found (to validate the lookup was
4560 		 * valid.
4561 		 */
4562 		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4563 		    (stcb == NULL) &&
4564 		    (inp->auth_supported == 1)) {
4565 			/* save this chunk for later processing */
4566 			auth_skipped = 1;
4567 			auth_offset = *offset;
4568 			auth_len = ntohs(ch->chunk_length);
4569 
4570 			/* (temporarily) move past this chunk */
4571 			*offset += SCTP_SIZE32(auth_len);
4572 			if (*offset >= length) {
4573 				/* no more data left in the mbuf chain */
4574 				*offset = length;
4575 				if (locked_tcb) {
4576 					SCTP_TCB_UNLOCK(locked_tcb);
4577 				}
4578 				return (NULL);
4579 			}
4580 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4581 								   sizeof(struct sctp_chunkhdr), chunk_buf);
4582 		}
4583 		if (ch == NULL) {
4584 			/* Help */
4585 			*offset = length;
4586 			if (locked_tcb) {
4587 				SCTP_TCB_UNLOCK(locked_tcb);
4588 			}
4589 			return (NULL);
4590 		}
4591 		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4592 			goto process_control_chunks;
4593 		}
4594 		/*
4595 		 * first check if it's an ASCONF with an unknown src addr we
4596 		 * need to look inside to find the association
4597 		 */
4598 		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4599 			struct sctp_chunkhdr *asconf_ch = ch;
4600 			uint32_t asconf_offset = 0, asconf_len = 0;
4601 
4602 			/* inp's refcount may be reduced */
4603 			SCTP_INP_INCR_REF(inp);
4604 
4605 			asconf_offset = *offset;
4606 			do {
4607 				asconf_len = ntohs(asconf_ch->chunk_length);
4608 				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4609 					break;
4610 				stcb = sctp_findassociation_ep_asconf(m,
4611 				                                      *offset,
4612 				                                      dst,
4613 				                                      sh, &inp, netp, vrf_id);
4614 				if (stcb != NULL)
4615 					break;
4616 				asconf_offset += SCTP_SIZE32(asconf_len);
4617 				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4618 										  sizeof(struct sctp_chunkhdr), chunk_buf);
4619 			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4620 			if (stcb == NULL) {
4621 				/*
4622 				 * reduce inp's refcount if not reduced in
4623 				 * sctp_findassociation_ep_asconf().
4624 				 */
4625 				SCTP_INP_DECR_REF(inp);
4626 			} else {
4627 				locked_tcb = stcb;
4628 			}
4629 
4630 			/* now go back and verify any auth chunk to be sure */
4631 			if (auth_skipped && (stcb != NULL)) {
4632 				struct sctp_auth_chunk *auth;
4633 
4634 				auth = (struct sctp_auth_chunk *)
4635 					sctp_m_getptr(m, auth_offset,
4636 						      auth_len, chunk_buf);
4637 				got_auth = 1;
4638 				auth_skipped = 0;
4639 				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4640 								       auth_offset)) {
4641 					/* auth HMAC failed so dump it */
4642 					*offset = length;
4643 					if (locked_tcb) {
4644 						SCTP_TCB_UNLOCK(locked_tcb);
4645 					}
4646 					return (NULL);
4647 				} else {
4648 					/* remaining chunks are HMAC checked */
4649 					stcb->asoc.authenticated = 1;
4650 				}
4651 			}
4652 		}
4653 		if (stcb == NULL) {
4654 			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
4655 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4656 			                             msg);
4657 			/* no association, so it's out of the blue... */
4658 			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4659 #if defined(__FreeBSD__)
4660 			                 use_mflowid, mflowid,
4661 #endif
4662 					 vrf_id, port);
4663 			*offset = length;
4664 			if (locked_tcb) {
4665 				SCTP_TCB_UNLOCK(locked_tcb);
4666 			}
4667 			return (NULL);
4668 		}
4669 		asoc = &stcb->asoc;
4670 		/* ABORT and SHUTDOWN can use either v_tag... */
4671 		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4672 		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4673 		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4674 			/* Take the T-bit always into account. */
4675 			if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4676 			     (vtag_in == asoc->my_vtag)) ||
4677 			    (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4678 			     (vtag_in == asoc->peer_vtag))) {
4679 				/* this is valid */
4680 			} else {
4681 				/* drop this packet... */
4682 				SCTP_STAT_INCR(sctps_badvtag);
4683 				if (locked_tcb) {
4684 					SCTP_TCB_UNLOCK(locked_tcb);
4685 				}
4686 				return (NULL);
4687 			}
4688 		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4689 			if (vtag_in != asoc->my_vtag) {
4690 				/*
4691 				 * this could be a stale SHUTDOWN-ACK or the
4692 				 * peer never got the SHUTDOWN-COMPLETE and
4693 				 * is still hung; we have started a new asoc
4694 				 * but it won't complete until the shutdown
4695 				 * is completed
4696 				 */
4697 				if (locked_tcb) {
4698 					SCTP_TCB_UNLOCK(locked_tcb);
4699 				}
4700 				snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
4701 				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4702 				                             msg);
4703 				sctp_handle_ootb(m, iphlen, *offset, src, dst,
4704 				                 sh, inp, op_err,
4705 #if defined(__FreeBSD__)
4706 				                 use_mflowid, mflowid,
4707 #endif
4708 				                 vrf_id, port);
4709 				return (NULL);
4710 			}
4711 		} else {
4712 			/* for all other chunks, vtag must match */
4713 			if (vtag_in != asoc->my_vtag) {
4714 				/* invalid vtag... */
4715 				SCTPDBG(SCTP_DEBUG_INPUT3,
4716 					"invalid vtag: %xh, expect %xh\n",
4717 					vtag_in, asoc->my_vtag);
4718 				SCTP_STAT_INCR(sctps_badvtag);
4719 				if (locked_tcb) {
4720 					SCTP_TCB_UNLOCK(locked_tcb);
4721 				}
4722 				*offset = length;
4723 				return (NULL);
4724 			}
4725 		}
4726 	}			/* end if !SCTP_COOKIE_ECHO */
4727 	/*
4728 	 * process all control chunks...
4729 	 */
4730 	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4731 	     (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4732 	     (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4733 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4734 		/* implied cookie-ack.. we must have lost the ack */
4735 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4736 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4737 				       stcb->asoc.overall_error_count,
4738 				       0,
4739 				       SCTP_FROM_SCTP_INPUT,
4740 				       __LINE__);
4741 		}
4742 		stcb->asoc.overall_error_count = 0;
4743 		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4744 				       *netp);
4745 	}
4746 
4747  process_control_chunks:
4748 	while (IS_SCTP_CONTROL(ch)) {
4749 		/* validate chunk length */
4750 		chk_length = ntohs(ch->chunk_length);
4751 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4752 			ch->chunk_type, chk_length);
4753 		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4754 		if (chk_length < sizeof(*ch) ||
4755 		    (*offset + (int)chk_length) > length) {
4756 			*offset = length;
4757 			if (locked_tcb) {
4758 				SCTP_TCB_UNLOCK(locked_tcb);
4759 			}
4760 			return (NULL);
4761 		}
4762 		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4763 		/*
4764 		 * INIT-ACK only gets the init ack "header" portion only
4765 		 * because we don't have to process the peer's COOKIE. All
4766 		 * others get a complete chunk.
4767 		 */
4768 		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4769 		    (ch->chunk_type == SCTP_INITIATION)) {
4770 			/* get an init-ack chunk */
4771 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4772 								   sizeof(struct sctp_init_ack_chunk), chunk_buf);
4773 			if (ch == NULL) {
4774 				*offset = length;
4775 				if (locked_tcb) {
4776 					SCTP_TCB_UNLOCK(locked_tcb);
4777 				}
4778 				return (NULL);
4779 			}
4780 		} else {
4781 			/* For cookies and all other chunks. */
4782 			if (chk_length > sizeof(chunk_buf)) {
4783 				/*
4784 				 * use just the size of the chunk buffer
4785 				 * so the front part of our chunks fit in
4786 				 * contiguous space up to the chunk buffer
4787 				 * size (508 bytes).
4788 				 * For chunks that need to get more than that
4789 				 * they must use the sctp_m_getptr() function
4790 				 * or other means (e.g. know how to parse mbuf
4791 				 * chains). Cookies do this already.
4792 				 */
4793 				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4794 									   (sizeof(chunk_buf) - 4),
4795 									   chunk_buf);
4796 				if (ch == NULL) {
4797 					*offset = length;
4798 					if (locked_tcb) {
4799 						SCTP_TCB_UNLOCK(locked_tcb);
4800 					}
4801 					return (NULL);
4802 				}
4803 			} else {
4804 				/* We can fit it all */
4805 				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4806 								   chk_length, chunk_buf);
4807 				if (ch == NULL) {
4808 					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4809 					*offset = length;
4810 					if (locked_tcb) {
4811 						SCTP_TCB_UNLOCK(locked_tcb);
4812 					}
4813 					return (NULL);
4814 				}
4815 			}
4816 		}
4817 		num_chunks++;
4818 		/* Save off the last place we got a control from */
4819 		if (stcb != NULL) {
4820 			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4821 				/*
4822 				 * allow last_control to be NULL if
4823 				 * ASCONF... ASCONF processing will find the
4824 				 * right net later
4825 				 */
4826 				if ((netp != NULL) && (*netp != NULL))
4827 					stcb->asoc.last_control_chunk_from = *netp;
4828 			}
4829 		}
4830 #ifdef SCTP_AUDITING_ENABLED
4831 		sctp_audit_log(0xB0, ch->chunk_type);
4832 #endif
4833 
4834 		/* check to see if this chunk required auth, but isn't */
4835 		if ((stcb != NULL) &&
4836 		    (stcb->asoc.auth_supported == 1) &&
4837 		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4838 		    !stcb->asoc.authenticated) {
4839 			/* "silently" ignore */
4840 			SCTP_STAT_INCR(sctps_recvauthmissing);
4841 			goto next_chunk;
4842 		}
4843 		switch (ch->chunk_type) {
4844 		case SCTP_INITIATION:
4845 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4846 			/* The INIT chunk must be the only chunk. */
4847 			if ((num_chunks > 1) ||
4848 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4849 				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4850 				                             "INIT not the only chunk");
4851 				sctp_abort_association(inp, stcb, m, iphlen,
4852 				                       src, dst, sh, op_err,
4853 #if defined(__FreeBSD__)
4854 				                       use_mflowid, mflowid,
4855 #endif
4856 				                       vrf_id, port);
4857 				*offset = length;
4858 				return (NULL);
4859 			}
4860 			/* Honor our resource limit. */
4861 			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
4862 				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
4863 				sctp_abort_association(inp, stcb, m, iphlen,
4864 						       src, dst, sh, op_err,
4865 #if defined(__FreeBSD__)
4866 				                       use_mflowid, mflowid,
4867 #endif
4868 				                       vrf_id, port);
4869 				*offset = length;
4870 				return (NULL);
4871 			}
4872 			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
4873 			                 (struct sctp_init_chunk *)ch, inp,
4874 			                 stcb, &abort_no_unlock,
4875 #if defined(__FreeBSD__)
4876 			                 use_mflowid, mflowid,
4877 #endif
4878 			                 vrf_id, port);
4879 			*offset = length;
4880 			if ((!abort_no_unlock) && (locked_tcb)) {
4881 				SCTP_TCB_UNLOCK(locked_tcb);
4882 			}
4883 			return (NULL);
4884 			break;
4885 		case SCTP_PAD_CHUNK:
4886 			break;
4887 		case SCTP_INITIATION_ACK:
4888 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4889 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4890 				/* We are not interested anymore */
4891 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4892 					;
4893 				} else {
4894 					if (locked_tcb != stcb) {
4895 						/* Very unlikely */
4896 						SCTP_TCB_UNLOCK(locked_tcb);
4897 					}
4898 					*offset = length;
4899 					if (stcb) {
4900 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4901 						so = SCTP_INP_SO(inp);
4902 						atomic_add_int(&stcb->asoc.refcnt, 1);
4903 						SCTP_TCB_UNLOCK(stcb);
4904 						SCTP_SOCKET_LOCK(so, 1);
4905 						SCTP_TCB_LOCK(stcb);
4906 						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4907 #endif
4908 						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_27);
4909 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4910 						SCTP_SOCKET_UNLOCK(so, 1);
4911 #endif
4912 					}
4913 					return (NULL);
4914 				}
4915 			}
4916 			/* The INIT-ACK chunk must be the only chunk. */
4917 			if ((num_chunks > 1) ||
4918 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4919 				*offset = length;
4920 				if (locked_tcb) {
4921 					SCTP_TCB_UNLOCK(locked_tcb);
4922 				}
4923 				return (NULL);
4924 			}
4925 			if ((netp) && (*netp)) {
4926 				ret = sctp_handle_init_ack(m, iphlen, *offset,
4927 				                           src, dst, sh,
4928 				                           (struct sctp_init_ack_chunk *)ch,
4929 				                           stcb, *netp,
4930 				                           &abort_no_unlock,
4931 #if defined(__FreeBSD__)
4932 				                           use_mflowid, mflowid,
4933 #endif
4934 				                           vrf_id);
4935 			} else {
4936 				ret = -1;
4937 			}
4938 			*offset = length;
4939 			if (abort_no_unlock) {
4940 				return (NULL);
4941 			}
4942 			/*
4943 			 * Special case, I must call the output routine to
4944 			 * get the cookie echoed
4945 			 */
4946 			if ((stcb != NULL) && (ret == 0)) {
4947 				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4948 			}
4949 			if (locked_tcb) {
4950 				SCTP_TCB_UNLOCK(locked_tcb);
4951 			}
4952 			return (NULL);
4953 			break;
4954 		case SCTP_SELECTIVE_ACK:
4955 			{
4956 				struct sctp_sack_chunk *sack;
4957 				int abort_now = 0;
4958 				uint32_t a_rwnd, cum_ack;
4959 				uint16_t num_seg, num_dup;
4960 				uint8_t flags;
4961 				int offset_seg, offset_dup;
4962 
4963 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4964 				SCTP_STAT_INCR(sctps_recvsacks);
4965 				if (stcb == NULL) {
4966 					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4967 					break;
4968 				}
4969 				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4970 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4971 					break;
4972 				}
4973 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4974 					/*-
4975 					 * If we have sent a shutdown-ack, we will pay no
4976 					 * attention to a sack sent in to us since
4977 					 * we don't care anymore.
4978 					 */
4979 					break;
4980 				}
4981 				sack = (struct sctp_sack_chunk *)ch;
4982 				flags = ch->chunk_flags;
4983 				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4984 				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4985 				num_dup = ntohs(sack->sack.num_dup_tsns);
4986 				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4987 				if (sizeof(struct sctp_sack_chunk) +
4988 				    num_seg * sizeof(struct sctp_gap_ack_block) +
4989 				    num_dup * sizeof(uint32_t) != chk_length) {
4990 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4991 					break;
4992 				}
4993 				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4994 				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4995 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4996 				        cum_ack, num_seg, a_rwnd);
4997 				stcb->asoc.seen_a_sack_this_pkt = 1;
4998 				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4999 				    (num_seg == 0) &&
5000 				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5001 				    (stcb->asoc.saw_sack_with_frags == 0) &&
5002 				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5003 				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
5004 					) {
5005 					/* We have a SIMPLE sack having no prior segments and
5006 					 * data on sent queue to be acked.. Use the faster
5007 					 * path sack processing. We also allow window update
5008 					 * sacks with no missing segments to go this way too.
5009 					 */
5010 					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen);
5011 				} else {
5012 					if (netp && *netp)
5013 						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5014 								 num_seg, 0, num_dup, &abort_now, flags,
5015 								 cum_ack, a_rwnd, ecne_seen);
5016 				}
5017 				if (abort_now) {
5018 					/* ABORT signal from sack processing */
5019 					*offset = length;
5020 					return (NULL);
5021 				}
5022 				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5023 				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5024 				    (stcb->asoc.stream_queue_cnt == 0)) {
5025 					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_NOT_LOCKED);
5026 				}
5027 			}
5028 			break;
5029 		/* EY - nr_sack:  If the received chunk is an nr_sack chunk */
5030 		case SCTP_NR_SELECTIVE_ACK:
5031 			{
5032 				struct sctp_nr_sack_chunk *nr_sack;
5033 				int abort_now = 0;
5034 				uint32_t a_rwnd, cum_ack;
5035 				uint16_t num_seg, num_nr_seg, num_dup;
5036 				uint8_t flags;
5037 				int offset_seg, offset_dup;
5038 
5039 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
5040 				SCTP_STAT_INCR(sctps_recvsacks);
5041 				if (stcb == NULL) {
5042 					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
5043 					break;
5044 				}
5045 				if (stcb->asoc.nrsack_supported == 0) {
5046 					goto unknown_chunk;
5047 				}
5048 				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
5049 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
5050 					break;
5051 				}
5052 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
5053 					/*-
5054 					 * If we have sent a shutdown-ack, we will pay no
5055 					 * attention to a sack sent in to us since
5056 					 * we don't care anymore.
5057 					 */
5058 					break;
5059 				}
5060 				nr_sack = (struct sctp_nr_sack_chunk *)ch;
5061 				flags = ch->chunk_flags;
5062 				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
5063 				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
5064 				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
5065 				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
5066 				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
5067 				if (sizeof(struct sctp_nr_sack_chunk) +
5068 				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
5069 				    num_dup * sizeof(uint32_t) != chk_length) {
5070 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
5071 					break;
5072 				}
5073 				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
5074 				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
5075 				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5076 				        cum_ack, num_seg, a_rwnd);
5077 				stcb->asoc.seen_a_sack_this_pkt = 1;
5078 				if ((stcb->asoc.pr_sctp_cnt == 0) &&
5079 				    (num_seg == 0) && (num_nr_seg == 0) &&
5080 				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5081 				    (stcb->asoc.saw_sack_with_frags == 0) &&
5082 				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5083 				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
5084 					/*
5085 					 * We have a SIMPLE sack having no
5086 					 * prior segments and data on sent
5087 					 * queue to be acked. Use the
5088 					 * faster path sack processing. We
5089 					 * also allow window update sacks
5090 					 * with no missing segments to go
5091 					 * this way too.
5092 					 */
5093 					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
5094 					                         &abort_now, ecne_seen);
5095 				} else {
5096 					if (netp && *netp)
5097 						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5098 						                 num_seg, num_nr_seg, num_dup, &abort_now, flags,
5099 						                 cum_ack, a_rwnd, ecne_seen);
5100 				}
5101 				if (abort_now) {
5102 					/* ABORT signal from sack processing */
5103 					*offset = length;
5104 					return (NULL);
5105 				}
5106 				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5107 				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5108 				    (stcb->asoc.stream_queue_cnt == 0)) {
5109 					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_NOT_LOCKED);
5110 				}
5111 			}
5112 			break;
5113 
5114 		case SCTP_HEARTBEAT_REQUEST:
5115 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5116 			if ((stcb) && netp && *netp) {
5117 				SCTP_STAT_INCR(sctps_recvheartbeat);
5118 				sctp_send_heartbeat_ack(stcb, m, *offset,
5119 							chk_length, *netp);
5120 
5121 				/* He's alive so give him credit */
5122 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5123 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5124 						       stcb->asoc.overall_error_count,
5125 						       0,
5126 						       SCTP_FROM_SCTP_INPUT,
5127 						       __LINE__);
5128 				}
5129 				stcb->asoc.overall_error_count = 0;
5130 			}
5131 			break;
5132 		case SCTP_HEARTBEAT_ACK:
5133 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
5134 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5135 				/* Its not ours */
5136 				*offset = length;
5137 				if (locked_tcb) {
5138 					SCTP_TCB_UNLOCK(locked_tcb);
5139 				}
5140 				return (NULL);
5141 			}
5142 			/* He's alive so give him credit */
5143 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5144 				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5145 					       stcb->asoc.overall_error_count,
5146 					       0,
5147 					       SCTP_FROM_SCTP_INPUT,
5148 					       __LINE__);
5149 			}
5150 			stcb->asoc.overall_error_count = 0;
5151 			SCTP_STAT_INCR(sctps_recvheartbeatack);
5152 			if (netp && *netp)
5153 				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5154 							  stcb, *netp);
5155 			break;
5156 		case SCTP_ABORT_ASSOCIATION:
5157 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5158 				(void *)stcb);
5159 			if ((stcb) && netp && *netp)
5160 				sctp_handle_abort((struct sctp_abort_chunk *)ch,
5161 						  stcb, *netp);
5162 			*offset = length;
5163 			return (NULL);
5164 			break;
5165 		case SCTP_SHUTDOWN:
5166 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5167 				(void *)stcb);
5168 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5169 				*offset = length;
5170 				if (locked_tcb) {
5171 					SCTP_TCB_UNLOCK(locked_tcb);
5172 				}
5173 				return (NULL);
5174 			}
5175 			if (netp && *netp) {
5176 				int abort_flag = 0;
5177 
5178 				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5179 						     stcb, *netp, &abort_flag);
5180 				if (abort_flag) {
5181 					*offset = length;
5182 					return (NULL);
5183 				}
5184 			}
5185 			break;
5186 		case SCTP_SHUTDOWN_ACK:
5187 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb);
5188 			if ((stcb) && (netp) && (*netp))
5189 				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5190 			*offset = length;
5191 			return (NULL);
5192 			break;
5193 
5194 		case SCTP_OPERATION_ERROR:
5195 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
5196 			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
5197 				*offset = length;
5198 				return (NULL);
5199 			}
5200 			break;
5201 		case SCTP_COOKIE_ECHO:
5202 			SCTPDBG(SCTP_DEBUG_INPUT3,
5203 				"SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb);
5204 			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5205 				;
5206 			} else {
5207 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5208 					/* We are not interested anymore */
5209 				abend:
5210 					if (stcb) {
5211 						SCTP_TCB_UNLOCK(stcb);
5212 					}
5213 					*offset = length;
5214 					return (NULL);
5215 				}
5216 			}
5217 			/*
5218 			 * First are we accepting? We do this again here
5219 			 * since it is possible that a previous endpoint WAS
5220 			 * listening responded to a INIT-ACK and then
5221 			 * closed. We opened and bound.. and are now no
5222 			 * longer listening.
5223 			 */
5224 
5225 			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
5226 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5227 				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5228 					op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5229 					sctp_abort_association(inp, stcb, m, iphlen,
5230 					                       src, dst, sh, op_err,
5231 #if defined(__FreeBSD__)
5232 					                       use_mflowid, mflowid,
5233 #endif
5234 					                       vrf_id, port);
5235 				}
5236 				*offset = length;
5237 				return (NULL);
5238 			} else {
5239 				struct mbuf *ret_buf;
5240 				struct sctp_inpcb *linp;
5241 				if (stcb) {
5242 					linp = NULL;
5243 				} else {
5244 					linp = inp;
5245 				}
5246 
5247 				if (linp) {
5248 					SCTP_ASOC_CREATE_LOCK(linp);
5249 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5250 					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5251 						SCTP_ASOC_CREATE_UNLOCK(linp);
5252 						goto abend;
5253 					}
5254 				}
5255 
5256 				if (netp) {
5257 					ret_buf =
5258 						sctp_handle_cookie_echo(m, iphlen,
5259 						                        *offset,
5260 						                        src, dst,
5261 						                        sh,
5262 						                        (struct sctp_cookie_echo_chunk *)ch,
5263 						                        &inp, &stcb, netp,
5264 						                        auth_skipped,
5265 						                        auth_offset,
5266 						                        auth_len,
5267 						                        &locked_tcb,
5268 #if defined(__FreeBSD__)
5269 						                        use_mflowid,
5270 						                        mflowid,
5271 #endif
5272 						                        vrf_id,
5273 						                        port);
5274 				} else {
5275 					ret_buf = NULL;
5276 				}
5277 				if (linp) {
5278 					SCTP_ASOC_CREATE_UNLOCK(linp);
5279 				}
5280 				if (ret_buf == NULL) {
5281 					if (locked_tcb) {
5282 						SCTP_TCB_UNLOCK(locked_tcb);
5283 					}
5284 					SCTPDBG(SCTP_DEBUG_INPUT3,
5285 						"GAK, null buffer\n");
5286 					*offset = length;
5287 					return (NULL);
5288 				}
5289 				/* if AUTH skipped, see if it verified... */
5290 				if (auth_skipped) {
5291 					got_auth = 1;
5292 					auth_skipped = 0;
5293 				}
5294 				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
5295 					/*
5296 					 * Restart the timer if we have
5297 					 * pending data
5298 					 */
5299 					struct sctp_tmit_chunk *chk;
5300 
5301 					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
5302 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5303 				}
5304 			}
5305 			break;
5306 		case SCTP_COOKIE_ACK:
5307 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb);
5308 			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5309 				if (locked_tcb) {
5310 					SCTP_TCB_UNLOCK(locked_tcb);
5311 				}
5312 				return (NULL);
5313 			}
5314 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5315 				/* We are not interested anymore */
5316 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5317 					;
5318 				} else if (stcb) {
5319 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5320 					so = SCTP_INP_SO(inp);
5321 					atomic_add_int(&stcb->asoc.refcnt, 1);
5322 					SCTP_TCB_UNLOCK(stcb);
5323 					SCTP_SOCKET_LOCK(so, 1);
5324 					SCTP_TCB_LOCK(stcb);
5325 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5326 #endif
5327 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_27);
5328 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5329 					SCTP_SOCKET_UNLOCK(so, 1);
5330 #endif
5331 					*offset = length;
5332 					return (NULL);
5333 				}
5334 			}
5335 			/* He's alive so give him credit */
5336 			if ((stcb) && netp && *netp) {
5337 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5338 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5339 						       stcb->asoc.overall_error_count,
5340 						       0,
5341 						       SCTP_FROM_SCTP_INPUT,
5342 						       __LINE__);
5343 				}
5344 				stcb->asoc.overall_error_count = 0;
5345 				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch,stcb, *netp);
5346 			}
5347 			break;
5348 		case SCTP_ECN_ECHO:
5349 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5350 			/* He's alive so give him credit */
5351 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5352 				/* Its not ours */
5353 				if (locked_tcb) {
5354 					SCTP_TCB_UNLOCK(locked_tcb);
5355 				}
5356 				*offset = length;
5357 				return (NULL);
5358 			}
5359 			if (stcb) {
5360 				if (stcb->asoc.ecn_supported == 0) {
5361 					goto unknown_chunk;
5362 				}
5363 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5364 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5365 						       stcb->asoc.overall_error_count,
5366 						       0,
5367 						       SCTP_FROM_SCTP_INPUT,
5368 						       __LINE__);
5369 				}
5370 				stcb->asoc.overall_error_count = 0;
5371 				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5372 						     stcb);
5373 				ecne_seen = 1;
5374 			}
5375 			break;
5376 		case SCTP_ECN_CWR:
5377 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5378 			/* He's alive so give him credit */
5379 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5380 				/* Its not ours */
5381 				if (locked_tcb) {
5382 					SCTP_TCB_UNLOCK(locked_tcb);
5383 				}
5384 				*offset = length;
5385 				return (NULL);
5386 			}
5387 			if (stcb) {
5388 				if (stcb->asoc.ecn_supported == 0) {
5389 					goto unknown_chunk;
5390 				}
5391 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5392 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5393 						       stcb->asoc.overall_error_count,
5394 						       0,
5395 						       SCTP_FROM_SCTP_INPUT,
5396 						       __LINE__);
5397 				}
5398 				stcb->asoc.overall_error_count = 0;
5399 				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5400 			}
5401 			break;
5402 		case SCTP_SHUTDOWN_COMPLETE:
5403 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb);
5404 			/* must be first and only chunk */
5405 			if ((num_chunks > 1) ||
5406 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5407 				*offset = length;
5408 				if (locked_tcb) {
5409 					SCTP_TCB_UNLOCK(locked_tcb);
5410 				}
5411 				return (NULL);
5412 			}
5413 			if ((stcb) && netp && *netp) {
5414 				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5415 							      stcb, *netp);
5416 			}
5417 			*offset = length;
5418 			return (NULL);
5419 			break;
5420 		case SCTP_ASCONF:
5421 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5422 			/* He's alive so give him credit */
5423 			if (stcb) {
5424 				if (stcb->asoc.asconf_supported == 0) {
5425 					goto unknown_chunk;
5426 				}
5427 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5428 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5429 						       stcb->asoc.overall_error_count,
5430 						       0,
5431 						       SCTP_FROM_SCTP_INPUT,
5432 						       __LINE__);
5433 				}
5434 				stcb->asoc.overall_error_count = 0;
5435 				sctp_handle_asconf(m, *offset, src,
5436 						   (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5437 				asconf_cnt++;
5438 			}
5439 			break;
5440 		case SCTP_ASCONF_ACK:
5441 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5442 			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5443 				/* Its not ours */
5444 				if (locked_tcb) {
5445 					SCTP_TCB_UNLOCK(locked_tcb);
5446 				}
5447 				*offset = length;
5448 				return (NULL);
5449 			}
5450 			if ((stcb) && netp && *netp) {
5451 				if (stcb->asoc.asconf_supported == 0) {
5452 					goto unknown_chunk;
5453 				}
5454 				/* He's alive so give him credit */
5455 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5456 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5457 						       stcb->asoc.overall_error_count,
5458 						       0,
5459 						       SCTP_FROM_SCTP_INPUT,
5460 						       __LINE__);
5461 				}
5462 				stcb->asoc.overall_error_count = 0;
5463 				sctp_handle_asconf_ack(m, *offset,
5464 						       (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5465 				if (abort_no_unlock)
5466 					return (NULL);
5467 			}
5468 			break;
5469 		case SCTP_FORWARD_CUM_TSN:
5470 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5471 			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5472 				/* Its not ours */
5473 				if (locked_tcb) {
5474 					SCTP_TCB_UNLOCK(locked_tcb);
5475 				}
5476 				*offset = length;
5477 				return (NULL);
5478 			}
5479 
5480 			/* He's alive so give him credit */
5481 			if (stcb) {
5482 				int abort_flag = 0;
5483 
5484 				if (stcb->asoc.prsctp_supported == 0) {
5485 					goto unknown_chunk;
5486 				}
5487 				stcb->asoc.overall_error_count = 0;
5488 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5489 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5490 						       stcb->asoc.overall_error_count,
5491 						       0,
5492 						       SCTP_FROM_SCTP_INPUT,
5493 						       __LINE__);
5494 				}
5495 				*fwd_tsn_seen = 1;
5496 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5497 					/* We are not interested anymore */
5498 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5499 					so = SCTP_INP_SO(inp);
5500 					atomic_add_int(&stcb->asoc.refcnt, 1);
5501 					SCTP_TCB_UNLOCK(stcb);
5502 					SCTP_SOCKET_LOCK(so, 1);
5503 					SCTP_TCB_LOCK(stcb);
5504 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5505 #endif
5506 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT+SCTP_LOC_29);
5507 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5508 					SCTP_SOCKET_UNLOCK(so, 1);
5509 #endif
5510 					*offset = length;
5511 					return (NULL);
5512 				}
5513 				sctp_handle_forward_tsn(stcb,
5514 							(struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5515 				if (abort_flag) {
5516 					*offset = length;
5517 					return (NULL);
5518 				} else {
5519 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5520 						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5521 							       stcb->asoc.overall_error_count,
5522 							       0,
5523 							       SCTP_FROM_SCTP_INPUT,
5524 							       __LINE__);
5525 					}
5526 					stcb->asoc.overall_error_count = 0;
5527 				}
5528 
5529 			}
5530 			break;
5531 		case SCTP_STREAM_RESET:
5532 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5533 			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5534 				/* Its not ours */
5535 				if (locked_tcb) {
5536 					SCTP_TCB_UNLOCK(locked_tcb);
5537 				}
5538 				*offset = length;
5539 				return (NULL);
5540 			}
5541 			if (stcb->asoc.reconfig_supported == 0) {
5542 				goto unknown_chunk;
5543 			}
5544 			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5545 				/* stop processing */
5546 				*offset = length;
5547 				return (NULL);
5548 			}
5549 			break;
5550 		case SCTP_PACKET_DROPPED:
5551 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5552 			/* re-get it all please */
5553 			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5554 				/* Its not ours */
5555 				if (locked_tcb) {
5556 					SCTP_TCB_UNLOCK(locked_tcb);
5557 				}
5558 				*offset = length;
5559 				return (NULL);
5560 			}
5561 
5562 
5563 			if (ch && (stcb) && netp && (*netp)) {
5564 				if (stcb->asoc.pktdrop_supported == 0) {
5565 					goto unknown_chunk;
5566 				}
5567 				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5568 							   stcb, *netp,
5569 							   min(chk_length, (sizeof(chunk_buf) - 4)));
5570 
5571 			}
5572 
5573 			break;
5574 		case SCTP_AUTHENTICATION:
5575 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5576 			if (stcb == NULL) {
5577 				/* save the first AUTH for later processing */
5578 				if (auth_skipped == 0) {
5579 					auth_offset = *offset;
5580 					auth_len = chk_length;
5581 					auth_skipped = 1;
5582 				}
5583 				/* skip this chunk (temporarily) */
5584 				goto next_chunk;
5585 			}
5586 			if (stcb->asoc.auth_supported == 0) {
5587 				goto unknown_chunk;
5588 			}
5589 			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5590 			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5591 					   SCTP_AUTH_DIGEST_LEN_MAX))) {
5592 				/* Its not ours */
5593 				if (locked_tcb) {
5594 					SCTP_TCB_UNLOCK(locked_tcb);
5595 				}
5596 				*offset = length;
5597 				return (NULL);
5598 			}
5599 			if (got_auth == 1) {
5600 				/* skip this chunk... it's already auth'd */
5601 				goto next_chunk;
5602 			}
5603 			got_auth = 1;
5604 			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5605 							     m, *offset)) {
5606 				/* auth HMAC failed so dump the packet */
5607 				*offset = length;
5608 				return (stcb);
5609 			} else {
5610 				/* remaining chunks are HMAC checked */
5611 				stcb->asoc.authenticated = 1;
5612 			}
5613 			break;
5614 
5615 		default:
5616 		unknown_chunk:
5617 			/* it's an unknown chunk! */
5618 			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5619 				struct mbuf *mm;
5620 				struct sctp_paramhdr *phd;
5621 
5622 				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5623 							   0, M_NOWAIT, 1, MT_DATA);
5624 				if (mm) {
5625 					phd = mtod(mm, struct sctp_paramhdr *);
5626 					/*
5627 					 * We cheat and use param type since
5628 					 * we did not bother to define a
5629 					 * error cause struct. They are the
5630 					 * same basic format with different
5631 					 * names.
5632 					 */
5633 					phd->param_type =  htons(SCTP_CAUSE_UNRECOG_CHUNK);
5634 					phd->param_length = htons(chk_length + sizeof(*phd));
5635 					SCTP_BUF_LEN(mm) = sizeof(*phd);
5636 					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
5637 					if (SCTP_BUF_NEXT(mm)) {
5638 						if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(mm), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
5639 							sctp_m_freem(mm);
5640 						} else {
5641 #ifdef SCTP_MBUF_LOGGING
5642 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5643 								struct mbuf *mat;
5644 
5645 								for (mat = SCTP_BUF_NEXT(mm); mat; mat = SCTP_BUF_NEXT(mat)) {
5646 									if (SCTP_BUF_IS_EXTENDED(mat)) {
5647 										sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5648 									}
5649 								}
5650 							}
5651 #endif
5652 							sctp_queue_op_err(stcb, mm);
5653 						}
5654 					} else {
5655 						sctp_m_freem(mm);
5656 					}
5657 				}
5658 			}
5659 			if ((ch->chunk_type & 0x80) == 0) {
5660 				/* discard this packet */
5661 				*offset = length;
5662 				return (stcb);
5663 			}	/* else skip this bad chunk and continue... */
5664 			break;
5665 		}		/* switch (ch->chunk_type) */
5666 
5667 
5668 	next_chunk:
5669 		/* get the next chunk */
5670 		*offset += SCTP_SIZE32(chk_length);
5671 		if (*offset >= length) {
5672 			/* no more data left in the mbuf chain */
5673 			break;
5674 		}
5675 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5676 							   sizeof(struct sctp_chunkhdr), chunk_buf);
5677 		if (ch == NULL) {
5678 			if (locked_tcb) {
5679 				SCTP_TCB_UNLOCK(locked_tcb);
5680 			}
5681 			*offset = length;
5682 			return (NULL);
5683 		}
5684 	}			/* while */
5685 
5686 	if (asconf_cnt > 0 && stcb != NULL) {
5687 		sctp_send_asconf_ack(stcb);
5688 	}
5689 	return (stcb);
5690 }
5691 
5692 
5693 #ifdef INVARIANTS
5694 #ifdef __GNUC__
5695 __attribute__((noinline))
5696 #endif
5697 void
5698 sctp_validate_no_locks(struct sctp_inpcb *inp)
5699 {
5700 #ifndef __APPLE__
5701 	struct sctp_tcb *lstcb;
5702 
5703 	LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
5704 		if (mtx_owned(&lstcb->tcb_mtx)) {
5705 			panic("Own lock on stcb at return from input");
5706 		}
5707 	}
5708 	if (mtx_owned(&inp->inp_create_mtx)) {
5709 		panic("Own create lock on inp");
5710 	}
5711 	if (mtx_owned(&inp->inp_mtx)) {
5712 		panic("Own inp lock on inp");
5713 	}
5714 #endif
5715 }
5716 #endif
5717 
5718 /*
5719  * common input chunk processing (v4 and v6)
5720  */
5721 void
5722 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5723                              struct sockaddr *src, struct sockaddr *dst,
5724                              struct sctphdr *sh, struct sctp_chunkhdr *ch,
5725 #if !defined(SCTP_WITH_NO_CSUM)
5726                              uint8_t compute_crc,
5727 #endif
5728                              uint8_t ecn_bits,
5729 #if defined(__FreeBSD__)
5730                              uint8_t use_mflowid, uint32_t mflowid,
5731 #endif
5732                              uint32_t vrf_id, uint16_t port)
5733 {
5734 	uint32_t high_tsn;
5735 	int fwd_tsn_seen = 0, data_processed = 0;
5736 	struct mbuf *m = *mm, *op_err;
5737 	char msg[SCTP_DIAG_INFO_LEN];
5738 	int un_sent;
5739 	int cnt_ctrl_ready = 0;
5740 	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5741 	struct sctp_tcb *stcb = NULL;
5742 	struct sctp_nets *net = NULL;
5743 
5744 	SCTP_STAT_INCR(sctps_recvdatagrams);
5745 #ifdef SCTP_AUDITING_ENABLED
5746 	sctp_audit_log(0xE0, 1);
5747 	sctp_auditing(0, inp, stcb, net);
5748 #endif
5749 #if !defined(SCTP_WITH_NO_CSUM)
5750 	if (compute_crc != 0) {
5751 		uint32_t check, calc_check;
5752 
5753 		check = sh->checksum;
5754 		sh->checksum = 0;
5755 		calc_check = sctp_calculate_cksum(m, iphlen);
5756 		sh->checksum = check;
5757 		if (calc_check != check) {
5758 			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5759 			        calc_check, check, (void *)m, length, iphlen);
5760 			stcb = sctp_findassociation_addr(m, offset, src, dst,
5761 			                                 sh, ch, &inp, &net, vrf_id);
5762 #if defined(INET) || defined(INET6)
5763 			if ((net != NULL) && (port != 0)) {
5764 				if (net->port == 0) {
5765 					sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5766 				}
5767 				net->port = port;
5768 			}
5769 #endif
5770 #if defined(__FreeBSD__)
5771 			if ((net != NULL) && (use_mflowid != 0)) {
5772 				net->flowid = mflowid;
5773 #ifdef INVARIANTS
5774 				net->flowidset = 1;
5775 #endif
5776 			}
5777 #endif
5778 			if ((inp != NULL) && (stcb != NULL)) {
5779 				sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5780 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5781 			} else if ((inp != NULL) && (stcb == NULL)) {
5782 				inp_decr = inp;
5783 			}
5784 			SCTP_STAT_INCR(sctps_badsum);
5785 			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5786 			goto out;
5787 		}
5788 	}
5789 #endif
5790 	/* Destination port of 0 is illegal, based on RFC4960. */
5791 	if (sh->dest_port == 0) {
5792 		SCTP_STAT_INCR(sctps_hdrops);
5793 		goto out;
5794 	}
5795 	stcb = sctp_findassociation_addr(m, offset, src, dst,
5796 	                                 sh, ch, &inp, &net, vrf_id);
5797 #if defined(INET) || defined(INET6)
5798 	if ((net != NULL) && (port != 0)) {
5799 		if (net->port == 0) {
5800 			sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5801 		}
5802 		net->port = port;
5803 	}
5804 #endif
5805 #if defined(__FreeBSD__)
5806 	if ((net != NULL) && (use_mflowid != 0)) {
5807 		net->flowid = mflowid;
5808 #ifdef INVARIANTS
5809 		net->flowidset = 1;
5810 #endif
5811 	}
5812 #endif
5813 	if (inp == NULL) {
5814 		SCTP_STAT_INCR(sctps_noport);
5815 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
5816 		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5817 			goto out;
5818 		}
5819 #endif
5820 		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5821 			sctp_send_shutdown_complete2(src, dst, sh,
5822 #if defined(__FreeBSD__)
5823 			                             use_mflowid, mflowid,
5824 #endif
5825 			                             vrf_id, port);
5826 			goto out;
5827 		}
5828 		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5829 			goto out;
5830 		}
5831 		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5832 			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5833 			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5834 			     (ch->chunk_type != SCTP_INIT))) {
5835 				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5836 				                             "Out of the blue");
5837 				sctp_send_abort(m, iphlen, src, dst,
5838 				                sh, 0, op_err,
5839 #if defined(__FreeBSD__)
5840 				                use_mflowid, mflowid,
5841 #endif
5842 				                vrf_id, port);
5843 			}
5844 		}
5845 		goto out;
5846 	} else if (stcb == NULL) {
5847 		inp_decr = inp;
5848 	}
5849 #ifdef IPSEC
5850 	/*-
5851 	 * I very much doubt any of the IPSEC stuff will work but I have no
5852 	 * idea, so I will leave it in place.
5853 	 */
5854 	if (inp != NULL) {
5855 		switch (dst->sa_family) {
5856 #ifdef INET
5857 		case AF_INET:
5858 			if (ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5859 #if defined(__FreeBSD__) && (__FreeBSD_version > 1000036)
5860 				IPSECSTAT_INC(ips_in_polvio);
5861 #else
5862 				MODULE_GLOBAL(ipsec4stat).in_polvio++;
5863 #endif
5864 				SCTP_STAT_INCR(sctps_hdrops);
5865 				goto out;
5866 			}
5867 			break;
5868 #endif
5869 #ifdef INET6
5870 		case AF_INET6:
5871 			if (ipsec6_in_reject(m, &inp->ip_inp.inp)) {
5872 #if defined(__FreeBSD__) && (__FreeBSD_version > 1000036)
5873 				IPSEC6STAT_INC(ips_in_polvio);
5874 #else
5875 				MODULE_GLOBAL(ipsec6stat).in_polvio++;
5876 #endif
5877 				SCTP_STAT_INCR(sctps_hdrops);
5878 				goto out;
5879 			}
5880 			break;
5881 #endif
5882 		default:
5883 			break;
5884 		}
5885 	}
5886 #endif
5887 	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5888 		(void *)m, iphlen, offset, length, (void *)stcb);
5889 	if (stcb) {
5890 		/* always clear this before beginning a packet */
5891 		stcb->asoc.authenticated = 0;
5892 		stcb->asoc.seen_a_sack_this_pkt = 0;
5893 		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5894 			(void *)stcb, stcb->asoc.state);
5895 
5896 		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5897 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5898 			/*-
5899 			 * If we hit here, we had a ref count
5900 			 * up when the assoc was aborted and the
5901 			 * timer is clearing out the assoc, we should
5902 			 * NOT respond to any packet.. its OOTB.
5903 			 */
5904 			SCTP_TCB_UNLOCK(stcb);
5905 			stcb = NULL;
5906 			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5907 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5908 			                             msg);
5909 			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5910 #if defined(__FreeBSD__)
5911 			                 use_mflowid, mflowid,
5912 #endif
5913 			                 vrf_id, port);
5914 			goto out;
5915 		}
5916 
5917 	}
5918 	if (IS_SCTP_CONTROL(ch)) {
5919 		/* process the control portion of the SCTP packet */
5920 		/* sa_ignore NO_NULL_CHK */
5921 		stcb = sctp_process_control(m, iphlen, &offset, length,
5922 		                            src, dst, sh, ch,
5923 		                            inp, stcb, &net, &fwd_tsn_seen,
5924 #if defined(__FreeBSD__)
5925 		                            use_mflowid, mflowid,
5926 #endif
5927 		                            vrf_id, port);
5928 		if (stcb) {
5929 			/* This covers us if the cookie-echo was there
5930 			 * and it changes our INP.
5931 			 */
5932 			inp = stcb->sctp_ep;
5933 #if defined(INET) || defined(INET6)
5934 			if ((net) && (port)) {
5935 				if (net->port == 0) {
5936 					sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5937 				}
5938 				net->port = port;
5939 			}
5940 #endif
5941 		}
5942 	} else {
5943 		/*
5944 		 * no control chunks, so pre-process DATA chunks (these
5945 		 * checks are taken care of by control processing)
5946 		 */
5947 
5948 		/*
5949 		 * if DATA only packet, and auth is required, then punt...
5950 		 * can't have authenticated without any AUTH (control)
5951 		 * chunks
5952 		 */
5953 		if ((stcb != NULL) &&
5954 		    (stcb->asoc.auth_supported == 1) &&
5955 		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5956 			/* "silently" ignore */
5957 			SCTP_STAT_INCR(sctps_recvauthmissing);
5958 			goto out;
5959 		}
5960 		if (stcb == NULL) {
5961 			/* out of the blue DATA chunk */
5962 			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5963 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5964 			                             msg);
5965 			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5966 #if defined(__FreeBSD__)
5967 			                 use_mflowid, mflowid,
5968 #endif
5969 					 vrf_id, port);
5970 			goto out;
5971 		}
5972 		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5973 			/* v_tag mismatch! */
5974 			SCTP_STAT_INCR(sctps_badvtag);
5975 			goto out;
5976 		}
5977 	}
5978 
5979 	if (stcb == NULL) {
5980 		/*
5981 		 * no valid TCB for this packet, or we found it's a bad
5982 		 * packet while processing control, or we're done with this
5983 		 * packet (done or skip rest of data), so we drop it...
5984 		 */
5985 		goto out;
5986 	}
5987 
5988 	/*
5989 	 * DATA chunk processing
5990 	 */
5991 	/* plow through the data chunks while length > offset */
5992 
5993 	/*
5994 	 * Rest should be DATA only.  Check authentication state if AUTH for
5995 	 * DATA is required.
5996 	 */
5997 	if ((length > offset) &&
5998 	    (stcb != NULL) &&
5999 	    (stcb->asoc.auth_supported == 1) &&
6000 	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
6001 	    !stcb->asoc.authenticated) {
6002 		/* "silently" ignore */
6003 		SCTP_STAT_INCR(sctps_recvauthmissing);
6004 		SCTPDBG(SCTP_DEBUG_AUTH1,
6005 			"Data chunk requires AUTH, skipped\n");
6006 		goto trigger_send;
6007 	}
6008 	if (length > offset) {
6009 		int retval;
6010 
6011 		/*
6012 		 * First check to make sure our state is correct. We would
6013 		 * not get here unless we really did have a tag, so we don't
6014 		 * abort if this happens, just dump the chunk silently.
6015 		 */
6016 		switch (SCTP_GET_STATE(&stcb->asoc)) {
6017 		case SCTP_STATE_COOKIE_ECHOED:
6018 			/*
6019 			 * we consider data with valid tags in this state
6020 			 * shows us the cookie-ack was lost. Imply it was
6021 			 * there.
6022 			 */
6023 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6024 				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6025 					       stcb->asoc.overall_error_count,
6026 					       0,
6027 					       SCTP_FROM_SCTP_INPUT,
6028 					       __LINE__);
6029 			}
6030 			stcb->asoc.overall_error_count = 0;
6031 			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
6032 			break;
6033 		case SCTP_STATE_COOKIE_WAIT:
6034 			/*
6035 			 * We consider OOTB any data sent during asoc setup.
6036 			 */
6037 			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
6038 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6039 			                             msg);
6040 			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6041 #if defined(__FreeBSD__)
6042 			                 use_mflowid, mflowid,
6043 #endif
6044 					 vrf_id, port);
6045 			goto out;
6046 			/*sa_ignore NOTREACHED*/
6047 			break;
6048 		case SCTP_STATE_EMPTY:	/* should not happen */
6049 		case SCTP_STATE_INUSE:	/* should not happen */
6050 		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
6051 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
6052 		default:
6053 			goto out;
6054 			/*sa_ignore NOTREACHED*/
6055 			break;
6056 		case SCTP_STATE_OPEN:
6057 		case SCTP_STATE_SHUTDOWN_SENT:
6058 			break;
6059 		}
6060 		/* plow through the data chunks while length > offset */
6061 		retval = sctp_process_data(mm, iphlen, &offset, length,
6062 		                           src, dst, sh,
6063 		                           inp, stcb, net, &high_tsn,
6064 #if defined(__FreeBSD__)
6065 		                           use_mflowid, mflowid,
6066 #endif
6067 		                           vrf_id, port);
6068 		if (retval == 2) {
6069 			/*
6070 			 * The association aborted, NO UNLOCK needed since
6071 			 * the association is destroyed.
6072 			 */
6073 			stcb = NULL;
6074 			goto out;
6075 		}
6076 		data_processed = 1;
6077 		/*
6078 		 * Anything important needs to have been m_copy'ed in
6079 		 * process_data
6080 		 */
6081 	}
6082 
6083 	/* take care of ecn */
6084 	if ((data_processed == 1) &&
6085 	    (stcb->asoc.ecn_supported == 1) &&
6086 	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
6087 		/* Yep, we need to add a ECNE */
6088 		sctp_send_ecn_echo(stcb, net, high_tsn);
6089 	}
6090 
6091 	if ((data_processed == 0) && (fwd_tsn_seen)) {
6092 		int was_a_gap;
6093 		uint32_t highest_tsn;
6094 
6095 		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
6096 			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
6097 		} else {
6098 			highest_tsn = stcb->asoc.highest_tsn_inside_map;
6099 		}
6100 		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
6101 		stcb->asoc.send_sack = 1;
6102 		sctp_sack_check(stcb, was_a_gap);
6103 	} else if (fwd_tsn_seen) {
6104 		stcb->asoc.send_sack = 1;
6105 	}
6106 	/* trigger send of any chunks in queue... */
6107 trigger_send:
6108 #ifdef SCTP_AUDITING_ENABLED
6109 	sctp_audit_log(0xE0, 2);
6110 	sctp_auditing(1, inp, stcb, net);
6111 #endif
6112 	SCTPDBG(SCTP_DEBUG_INPUT1,
6113 		"Check for chunk output prw:%d tqe:%d tf=%d\n",
6114 		stcb->asoc.peers_rwnd,
6115 		TAILQ_EMPTY(&stcb->asoc.control_send_queue),
6116 		stcb->asoc.total_flight);
6117 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
6118 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
6119 		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
6120 	}
6121 	if (cnt_ctrl_ready ||
6122 	    ((un_sent) &&
6123 	     (stcb->asoc.peers_rwnd > 0 ||
6124 	      (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
6125 		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
6126 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
6127 		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
6128 	}
6129 #ifdef SCTP_AUDITING_ENABLED
6130 	sctp_audit_log(0xE0, 3);
6131 	sctp_auditing(2, inp, stcb, net);
6132 #endif
6133  out:
6134 	if (stcb != NULL) {
6135 		SCTP_TCB_UNLOCK(stcb);
6136 	}
6137 	if (inp_decr != NULL) {
6138 		/* reduce ref-count */
6139 		SCTP_INP_WLOCK(inp_decr);
6140 		SCTP_INP_DECR_REF(inp_decr);
6141 		SCTP_INP_WUNLOCK(inp_decr);
6142 	}
6143 #ifdef INVARIANTS
6144 	if (inp != NULL) {
6145 		sctp_validate_no_locks(inp);
6146 	}
6147 #endif
6148 	return;
6149 }
6150 
6151 #if 0
6152 static void
6153 sctp_print_mbuf_chain(struct mbuf *m)
6154 {
6155 	for (; m; m = SCTP_BUF_NEXT(m)) {
6156 		SCTP_PRINTF("%p: m_len = %ld\n", (void *)m, SCTP_BUF_LEN(m));
6157 		if (SCTP_BUF_IS_EXTENDED(m))
6158 			SCTP_PRINTF("%p: extend_size = %d\n", (void *)m, SCTP_BUF_EXTEND_SIZE(m));
6159 	}
6160 }
6161 #endif
6162 
6163 #ifdef INET
6164 #if !defined(__Userspace__)
6165 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
6166 void
6167 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
6168 #elif defined(__Panda__)
6169 void
6170 sctp_input(pakhandle_type i_pak)
6171 #else
6172 void
6173 #if __STDC__
6174 sctp_input(struct mbuf *i_pak,...)
6175 #else
6176 sctp_input(i_pak, va_alist)
6177 	struct mbuf *i_pak;
6178 #endif
6179 #endif
6180 {
6181 	struct mbuf *m;
6182 	int iphlen;
6183 	uint32_t vrf_id = 0;
6184 	uint8_t ecn_bits;
6185 	struct sockaddr_in src, dst;
6186 	struct ip *ip;
6187 	struct sctphdr *sh;
6188 	struct sctp_chunkhdr *ch;
6189 	int length, offset;
6190 #if !defined(SCTP_WITH_NO_CSUM)
6191 	uint8_t compute_crc;
6192 #endif
6193 #if defined(__FreeBSD__)
6194 	uint32_t mflowid;
6195 	uint8_t use_mflowid;
6196 #endif
6197 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__))
6198 	uint16_t port = 0;
6199 #endif
6200 
6201 #if defined(__Panda__)
6202 	/* This is Evil, but its the only way to make panda work right. */
6203 	iphlen = sizeof(struct ip);
6204 #else
6205 	iphlen = off;
6206 #endif
6207 	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6208 		SCTP_RELEASE_PKT(i_pak);
6209 		return;
6210 	}
6211 	m = SCTP_HEADER_TO_CHAIN(i_pak);
6212 #ifdef __Panda__
6213 	SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
6214 	(void)SCTP_RELEASE_HEADER(i_pak);
6215 #endif
6216 #ifdef SCTP_MBUF_LOGGING
6217 	/* Log in any input mbufs */
6218 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6219 		struct mbuf *mat;
6220 
6221 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6222 			if (SCTP_BUF_IS_EXTENDED(mat)) {
6223 				sctp_log_mb(mat, SCTP_MBUF_INPUT);
6224 			}
6225 		}
6226 	}
6227 #endif
6228 #ifdef SCTP_PACKET_LOGGING
6229 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6230 		sctp_packet_log(m);
6231 	}
6232 #endif
6233 #if defined(__FreeBSD__)
6234 #if __FreeBSD_version > 1000049
6235 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6236 	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6237 	        m->m_pkthdr.len,
6238 	        if_name(m->m_pkthdr.rcvif),
6239 	        (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6240 #elif __FreeBSD_version >= 800000
6241 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6242 	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6243 	        m->m_pkthdr.len,
6244 	        if_name(m->m_pkthdr.rcvif),
6245 	        m->m_pkthdr.csum_flags);
6246 #else
6247 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6248 	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6249 	        m->m_pkthdr.len,
6250 	        m->m_pkthdr.rcvif->if_xname,
6251 	        m->m_pkthdr.csum_flags);
6252 #endif
6253 #endif
6254 #if defined(__APPLE__)
6255 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6256 	        "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
6257 	        m->m_pkthdr.len,
6258 	        m->m_pkthdr.rcvif->if_name,
6259 	        m->m_pkthdr.rcvif->if_unit,
6260 	        m->m_pkthdr.csum_flags);
6261 #endif
6262 #if defined(__Windows__)
6263 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6264 	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6265 	        m->m_pkthdr.len,
6266 	        m->m_pkthdr.rcvif->if_xname,
6267 	        m->m_pkthdr.csum_flags);
6268 #endif
6269 #if defined(__FreeBSD__)
6270 	if (m->m_flags & M_FLOWID) {
6271 		mflowid = m->m_pkthdr.flowid;
6272 		use_mflowid = 1;
6273 	} else {
6274 		mflowid = 0;
6275 		use_mflowid = 0;
6276 	}
6277 #endif
6278 	SCTP_STAT_INCR(sctps_recvpackets);
6279 	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6280 	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
6281 	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6282 	if (SCTP_BUF_LEN(m) < offset) {
6283 		if ((m = m_pullup(m, offset)) == NULL) {
6284 			SCTP_STAT_INCR(sctps_hdrops);
6285 			return;
6286 		}
6287 	}
6288 	ip = mtod(m, struct ip *);
6289 	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6290 	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6291 	offset -= sizeof(struct sctp_chunkhdr);
6292 	memset(&src, 0, sizeof(struct sockaddr_in));
6293 	src.sin_family = AF_INET;
6294 #ifdef HAVE_SIN_LEN
6295 	src.sin_len = sizeof(struct sockaddr_in);
6296 #endif
6297 	src.sin_port = sh->src_port;
6298 	src.sin_addr = ip->ip_src;
6299 	memset(&dst, 0, sizeof(struct sockaddr_in));
6300 	dst.sin_family = AF_INET;
6301 #ifdef HAVE_SIN_LEN
6302 	dst.sin_len = sizeof(struct sockaddr_in);
6303 #endif
6304 	dst.sin_port = sh->dest_port;
6305 	dst.sin_addr = ip->ip_dst;
6306 #if defined(__Windows__)
6307 	NTOHS(ip->ip_len);
6308 #endif
6309 #if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
6310 	ip->ip_len = ntohs(ip->ip_len);
6311 #endif
6312 #if defined(__FreeBSD__)
6313 #if __FreeBSD_version >= 1000000
6314 	length = ntohs(ip->ip_len);
6315 #else
6316 	length = ip->ip_len + iphlen;
6317 #endif
6318 #elif defined(__APPLE__)
6319 	length = ip->ip_len + iphlen;
6320 #elif defined(__Userspace__)
6321 #if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
6322 	length = ip->ip_len;
6323 #else
6324 	length = ip->ip_len + iphlen;
6325 #endif
6326 #else
6327 	length = ip->ip_len;
6328 #endif
6329 	/* Validate mbuf chain length with IP payload length. */
6330 	if (SCTP_HEADER_LEN(m) != length) {
6331 		SCTPDBG(SCTP_DEBUG_INPUT1,
6332 		        "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6333 		SCTP_STAT_INCR(sctps_hdrops);
6334 		goto out;
6335 	}
6336 	/* SCTP does not allow broadcasts or multicasts */
6337 	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6338 		goto out;
6339 	}
6340 	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6341 		goto out;
6342 	}
6343 	ecn_bits = ip->ip_tos;
6344 #if defined(SCTP_WITH_NO_CSUM)
6345 	SCTP_STAT_INCR(sctps_recvnocrc);
6346 #else
6347 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
6348 	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6349 		SCTP_STAT_INCR(sctps_recvhwcrc);
6350 		compute_crc = 0;
6351 	} else {
6352 #else
6353 	if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
6354 	    ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
6355 	     (SCTP_IS_IT_LOOPBACK(m)))) {
6356 		SCTP_STAT_INCR(sctps_recvnocrc);
6357 		compute_crc = 0;
6358 	} else {
6359 #endif
6360 		SCTP_STAT_INCR(sctps_recvswcrc);
6361 		compute_crc = 1;
6362 	}
6363 #endif
6364 	sctp_common_input_processing(&m, iphlen, offset, length,
6365 	                             (struct sockaddr *)&src,
6366 	                             (struct sockaddr *)&dst,
6367 	                             sh, ch,
6368 #if !defined(SCTP_WITH_NO_CSUM)
6369 	                             compute_crc,
6370 #endif
6371 	                             ecn_bits,
6372 #if defined(__FreeBSD__)
6373 	                             use_mflowid, mflowid,
6374 #endif
6375 	                             vrf_id, port);
6376  out:
6377 	if (m) {
6378 		sctp_m_freem(m);
6379 	}
6380 	return;
6381 }
6382 
6383 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6384 extern int *sctp_cpuarry;
6385 #endif
6386 
6387 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6388 int
6389 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
6390 {
6391 	struct mbuf *m;
6392 	int off;
6393 
6394 	m = *mp;
6395 	off = *offp;
6396 #else
6397 void
6398 sctp_input(struct mbuf *m, int off)
6399 {
6400 #endif
6401 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6402 	if (mp_ncpus > 1) {
6403 		struct ip *ip;
6404 		struct sctphdr *sh;
6405 		int offset;
6406 		int cpu_to_use;
6407 		uint32_t flowid, tag;
6408 
6409 		if (m->m_flags & M_FLOWID) {
6410 			flowid = m->m_pkthdr.flowid;
6411 		} else {
6412 			/* No flow id built by lower layers
6413 			 * fix it so we create one.
6414 			 */
6415 			offset = off + sizeof(struct sctphdr);
6416 			if (SCTP_BUF_LEN(m) < offset) {
6417 				if ((m = m_pullup(m, offset)) == NULL) {
6418 					SCTP_STAT_INCR(sctps_hdrops);
6419 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6420 					return (IPPROTO_DONE);
6421 #else
6422 					return;
6423 #endif
6424 				}
6425 			}
6426 			ip = mtod(m, struct ip *);
6427 			sh = (struct sctphdr *)((caddr_t)ip + off);
6428 			tag = htonl(sh->v_tag);
6429 			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6430 			m->m_pkthdr.flowid = flowid;
6431 			m->m_flags |= M_FLOWID;
6432 		}
6433 		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6434 		sctp_queue_to_mcore(m, off, cpu_to_use);
6435 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6436 		return (IPPROTO_DONE);
6437 #else
6438 		return;
6439 #endif
6440 	}
6441 #endif
6442 	sctp_input_with_port(m, off, 0);
6443 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6444 	return (IPPROTO_DONE);
6445 #endif
6446 }
6447 #endif
6448 #endif
6449