• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 363194 2020-07-14 20:32:50Z tuexen $");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_input.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_timer.h>
53 #include <netinet/sctp_crc32.h>
54 #if defined(__FreeBSD__) && !defined(__Userspace__)
55 #include <netinet/sctp_kdtrace.h>
56 #endif
57 #if defined(INET) || defined(INET6)
58 #if !defined(_WIN32)
59 #include <netinet/udp.h>
60 #endif
61 #endif
62 #if defined(__FreeBSD__) && !defined(__Userspace__)
63 #include <sys/smp.h>
64 #endif
65 
66 static void
sctp_stop_all_cookie_timers(struct sctp_tcb * stcb)67 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
68 {
69 	struct sctp_nets *net;
70 
71 	/* This now not only stops all cookie timers
72 	 * it also stops any INIT timers as well. This
73 	 * will make sure that the timers are stopped in
74 	 * all collision cases.
75 	 */
76 	SCTP_TCB_LOCK_ASSERT(stcb);
77 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
78 		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
79 			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
80 					stcb->sctp_ep,
81 					stcb,
82 					net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
83 		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
84 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
85 					stcb->sctp_ep,
86 					stcb,
87 					net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
88 		}
89 	}
90 }
91 
92 /* INIT handler */
93 static void
sctp_handle_init(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_chunk * cp,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)94 sctp_handle_init(struct mbuf *m, int iphlen, int offset,
95                  struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
96                  struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
97                  struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
98 #if defined(__FreeBSD__) && !defined(__Userspace__)
99                  uint8_t mflowtype, uint32_t mflowid,
100 #endif
101                  uint32_t vrf_id, uint16_t port)
102 {
103 	struct sctp_init *init;
104 	struct mbuf *op_err;
105 
106 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
107 		(void *)stcb);
108 	if (stcb == NULL) {
109 		SCTP_INP_RLOCK(inp);
110 	}
111 	/* validate length */
112 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
113 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
114 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
115 #if defined(__FreeBSD__) && !defined(__Userspace__)
116 		                       mflowtype, mflowid,
117 #endif
118 				       vrf_id, port);
119 		if (stcb)
120 			*abort_no_unlock = 1;
121 		goto outnow;
122 	}
123 	/* validate parameters */
124 	init = &cp->init;
125 	if (init->initiate_tag == 0) {
126 		/* protocol error... send abort */
127 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
128 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
129 #if defined(__FreeBSD__) && !defined(__Userspace__)
130 		                       mflowtype, mflowid,
131 #endif
132 				       vrf_id, port);
133 		if (stcb)
134 			*abort_no_unlock = 1;
135 		goto outnow;
136 	}
137 	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
138 		/* invalid parameter... send abort */
139 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
140 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
141 #if defined(__FreeBSD__) && !defined(__Userspace__)
142 		                       mflowtype, mflowid,
143 #endif
144 				       vrf_id, port);
145 		if (stcb)
146 			*abort_no_unlock = 1;
147 		goto outnow;
148 	}
149 	if (init->num_inbound_streams == 0) {
150 		/* protocol error... send abort */
151 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
152 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
153 #if defined(__FreeBSD__) && !defined(__Userspace__)
154 		                       mflowtype, mflowid,
155 #endif
156 				       vrf_id, port);
157 		if (stcb)
158 			*abort_no_unlock = 1;
159 		goto outnow;
160 	}
161 	if (init->num_outbound_streams == 0) {
162 		/* protocol error... send abort */
163 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
164 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
165 #if defined(__FreeBSD__) && !defined(__Userspace__)
166 		                       mflowtype, mflowid,
167 #endif
168 				       vrf_id, port);
169 		if (stcb)
170 			*abort_no_unlock = 1;
171 		goto outnow;
172 	}
173 	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
174 					   offset + ntohs(cp->ch.chunk_length))) {
175 		/* auth parameter(s) error... send abort */
176 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
177 		                             "Problem with AUTH parameters");
178 		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
179 #if defined(__FreeBSD__) && !defined(__Userspace__)
180 		                       mflowtype, mflowid,
181 #endif
182 		                       vrf_id, port);
183 		if (stcb)
184 			*abort_no_unlock = 1;
185 		goto outnow;
186 	}
187 	/* We are only accepting if we have a listening socket.*/
188 	if ((stcb == NULL) &&
189 	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
190 	     (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
191 	     (!SCTP_IS_LISTENING(inp)))) {
192 		/*
193 		 * FIX ME ?? What about TCP model and we have a
194 		 * match/restart case? Actually no fix is needed.
195 		 * the lookup will always find the existing assoc so stcb
196 		 * would not be NULL. It may be questionable to do this
197 		 * since we COULD just send back the INIT-ACK and hope that
198 		 * the app did accept()'s by the time the COOKIE was sent. But
199 		 * there is a price to pay for COOKIE generation and I don't
200 		 * want to pay it on the chance that the app will actually do
201 		 * some accepts(). The App just looses and should NOT be in
202 		 * this state :-)
203 		 */
204 		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
205 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
206 			                             "No listener");
207 			sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
208 #if defined(__FreeBSD__) && !defined(__Userspace__)
209 			                mflowtype, mflowid, inp->fibnum,
210 #endif
211 			                vrf_id, port);
212 		}
213 		goto outnow;
214 	}
215 	if ((stcb != NULL) &&
216 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
217 		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
218 		sctp_send_shutdown_ack(stcb, NULL);
219 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
220 	} else {
221 		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
222 		sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
223 		                       src, dst, sh, cp,
224 #if defined(__FreeBSD__) && !defined(__Userspace__)
225 		                       mflowtype, mflowid,
226 #endif
227 		                       vrf_id, port);
228 	}
229  outnow:
230 	if (stcb == NULL) {
231 		SCTP_INP_RUNLOCK(inp);
232 	}
233 }
234 
235 /*
236  * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
237  */
238 
239 int
sctp_is_there_unsent_data(struct sctp_tcb * stcb,int so_locked)240 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
241 {
242 	int unsent_data;
243 	unsigned int i;
244 	struct sctp_stream_queue_pending *sp;
245 	struct sctp_association *asoc;
246 
247 	/* This function returns if any stream has true unsent data on it.
248 	 * Note that as it looks through it will clean up any places that
249 	 * have old data that has been sent but left at top of stream queue.
250 	 */
251 	asoc = &stcb->asoc;
252 	unsent_data = 0;
253 	SCTP_TCB_SEND_LOCK(stcb);
254 	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
255 		/* Check to see if some data queued */
256 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
257 			/*sa_ignore FREED_MEMORY*/
258 			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
259 			if (sp == NULL) {
260 				continue;
261 			}
262 			if ((sp->msg_is_complete) &&
263 			    (sp->length == 0)  &&
264 			    (sp->sender_all_done)) {
265 				/* We are doing differed cleanup. Last
266 				 * time through when we took all the data
267 				 * the sender_all_done was not set.
268 				 */
269 				if (sp->put_last_out == 0) {
270 					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
271 					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
272 					            sp->sender_all_done,
273 					            sp->length,
274 					            sp->msg_is_complete,
275 					            sp->put_last_out);
276 				}
277 				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
278 				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
279 				stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
280 				if (sp->net) {
281 					sctp_free_remote_addr(sp->net);
282 					sp->net = NULL;
283 				}
284 				if (sp->data) {
285 					sctp_m_freem(sp->data);
286 					sp->data = NULL;
287 				}
288 				sctp_free_a_strmoq(stcb, sp, so_locked);
289 				if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
290 					unsent_data++;
291 				}
292 			} else {
293 				unsent_data++;
294 			}
295 			if (unsent_data > 0) {
296 				break;
297 			}
298 		}
299 	}
300 	SCTP_TCB_SEND_UNLOCK(stcb);
301 	return (unsent_data);
302 }
303 
304 static int
sctp_process_init(struct sctp_init_chunk * cp,struct sctp_tcb * stcb)305 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
306 {
307 	struct sctp_init *init;
308 	struct sctp_association *asoc;
309 	struct sctp_nets *lnet;
310 	unsigned int i;
311 
312 	init = &cp->init;
313 	asoc = &stcb->asoc;
314 	/* save off parameters */
315 	asoc->peer_vtag = ntohl(init->initiate_tag);
316 	asoc->peers_rwnd = ntohl(init->a_rwnd);
317 	/* init tsn's */
318 	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
319 
320 	if (!TAILQ_EMPTY(&asoc->nets)) {
321 		/* update any ssthresh's that may have a default */
322 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
323 			lnet->ssthresh = asoc->peers_rwnd;
324 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
325 				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
326 			}
327 
328 		}
329 	}
330 	SCTP_TCB_SEND_LOCK(stcb);
331 	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
332 		unsigned int newcnt;
333 		struct sctp_stream_out *outs;
334 		struct sctp_stream_queue_pending *sp, *nsp;
335 		struct sctp_tmit_chunk *chk, *nchk;
336 
337 		/* abandon the upper streams */
338 		newcnt = ntohs(init->num_inbound_streams);
339 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
340 			if (chk->rec.data.sid >= newcnt) {
341 				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
342 				asoc->send_queue_cnt--;
343 				if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
344 					asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
345 #ifdef INVARIANTS
346 				} else {
347 					panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
348 #endif
349 				}
350 				if (chk->data != NULL) {
351 					sctp_free_bufspace(stcb, asoc, chk, 1);
352 					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
353 					                0, chk, SCTP_SO_NOT_LOCKED);
354 					if (chk->data) {
355 						sctp_m_freem(chk->data);
356 						chk->data = NULL;
357 					}
358 				}
359 				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
360 				/*sa_ignore FREED_MEMORY*/
361 			}
362 		}
363 		if (asoc->strmout) {
364 			for (i = newcnt; i < asoc->pre_open_streams; i++) {
365 				outs = &asoc->strmout[i];
366 				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
367 					atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
368 					TAILQ_REMOVE(&outs->outqueue, sp, next);
369 					stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
370 					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
371 					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
372 					if (sp->data) {
373 						sctp_m_freem(sp->data);
374 						sp->data = NULL;
375 					}
376 					if (sp->net) {
377 						sctp_free_remote_addr(sp->net);
378 						sp->net = NULL;
379 					}
380 					/* Free the chunk */
381 					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
382 					/*sa_ignore FREED_MEMORY*/
383 				}
384 				outs->state = SCTP_STREAM_CLOSED;
385 			}
386 		}
387 		/* cut back the count */
388 		asoc->pre_open_streams = newcnt;
389 	}
390 	SCTP_TCB_SEND_UNLOCK(stcb);
391 	asoc->streamoutcnt = asoc->pre_open_streams;
392 	if (asoc->strmout) {
393 		for (i = 0; i < asoc->streamoutcnt; i++) {
394 			asoc->strmout[i].state = SCTP_STREAM_OPEN;
395 		}
396 	}
397 	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
398 	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
399 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
400 		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
401 	}
402 	/* This is the next one we expect */
403 	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
404 
405 	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
406 	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
407 
408 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
409 	/* open the requested streams */
410 
411 	if (asoc->strmin != NULL) {
412 		/* Free the old ones */
413 		for (i = 0; i < asoc->streamincnt; i++) {
414 			sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
415 			sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
416 		}
417 		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
418 	}
419 	if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
420 		asoc->streamincnt = ntohs(init->num_outbound_streams);
421 	} else {
422 		asoc->streamincnt = asoc->max_inbound_streams;
423 	}
424 	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
425 		    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
426 	if (asoc->strmin == NULL) {
427 		/* we didn't get memory for the streams! */
428 		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
429 		return (-1);
430 	}
431 	for (i = 0; i < asoc->streamincnt; i++) {
432 		asoc->strmin[i].sid = i;
433 		asoc->strmin[i].last_mid_delivered = 0xffffffff;
434 		TAILQ_INIT(&asoc->strmin[i].inqueue);
435 		TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
436 		asoc->strmin[i].pd_api_started = 0;
437 		asoc->strmin[i].delivery_started = 0;
438 	}
439 	/*
440 	 * load_address_from_init will put the addresses into the
441 	 * association when the COOKIE is processed or the INIT-ACK is
442 	 * processed. Both types of COOKIE's existing and new call this
443 	 * routine. It will remove addresses that are no longer in the
444 	 * association (for the restarting case where addresses are
445 	 * removed). Up front when the INIT arrives we will discard it if it
446 	 * is a restart and new addresses have been added.
447 	 */
448 	/* sa_ignore MEMLEAK */
449 	return (0);
450 }
451 
452 /*
453  * INIT-ACK message processing/consumption returns value < 0 on error
454  */
455 static int
sctp_process_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id)456 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
457                       struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
458                       struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
459                       struct sctp_nets *net, int *abort_no_unlock,
460 #if defined(__FreeBSD__) && !defined(__Userspace__)
461 		      uint8_t mflowtype, uint32_t mflowid,
462 #endif
463                       uint32_t vrf_id)
464 {
465 	struct sctp_association *asoc;
466 	struct mbuf *op_err;
467 	int retval, abort_flag, cookie_found;
468 	int initack_limit;
469 	int nat_friendly = 0;
470 
471 	/* First verify that we have no illegal param's */
472 	abort_flag = 0;
473 	cookie_found = 0;
474 
475 	op_err = sctp_arethere_unrecognized_parameters(m,
476 						       (offset + sizeof(struct sctp_init_chunk)),
477 						       &abort_flag, (struct sctp_chunkhdr *)cp,
478 						       &nat_friendly, &cookie_found);
479 	if (abort_flag) {
480 		/* Send an abort and notify peer */
481 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
482 		*abort_no_unlock = 1;
483 		return (-1);
484 	}
485 	if (!cookie_found) {
486 		uint16_t len;
487 
488 		/* Only report the missing cookie parameter */
489 		if (op_err != NULL) {
490 			sctp_m_freem(op_err);
491 		}
492 		len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
493 		/* We abort with an error of missing mandatory param */
494 		op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
495 		if (op_err != NULL) {
496 			struct sctp_error_missing_param *cause;
497 
498 			SCTP_BUF_LEN(op_err) = len;
499 			cause = mtod(op_err, struct sctp_error_missing_param *);
500 			/* Subtract the reserved param */
501 			cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
502 			cause->cause.length = htons(len);
503 			cause->num_missing_params = htonl(1);
504 			cause->type[0] = htons(SCTP_STATE_COOKIE);
505 		}
506 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
507 				       src, dst, sh, op_err,
508 #if defined(__FreeBSD__) && !defined(__Userspace__)
509 				       mflowtype, mflowid,
510 #endif
511 				       vrf_id, net->port);
512 		*abort_no_unlock = 1;
513 		return (-3);
514 	}
515 	asoc = &stcb->asoc;
516 	asoc->peer_supports_nat = (uint8_t)nat_friendly;
517 	/* process the peer's parameters in the INIT-ACK */
518 	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
519 	if (retval < 0) {
520 		if (op_err != NULL) {
521 			sctp_m_freem(op_err);
522 		}
523 		return (retval);
524 	}
525 	initack_limit = offset + ntohs(cp->ch.chunk_length);
526 	/* load all addresses */
527 	if ((retval = sctp_load_addresses_from_init(stcb, m,
528 	    (offset + sizeof(struct sctp_init_chunk)), initack_limit,
529 	    src, dst, NULL, stcb->asoc.port))) {
530 		if (op_err != NULL) {
531 			sctp_m_freem(op_err);
532 		}
533 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
534 		                             "Problem with address parameters");
535 		SCTPDBG(SCTP_DEBUG_INPUT1,
536 			"Load addresses from INIT causes an abort %d\n",
537 			retval);
538 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
539 		                       src, dst, sh, op_err,
540 #if defined(__FreeBSD__) && !defined(__Userspace__)
541 		                       mflowtype, mflowid,
542 #endif
543 		                       vrf_id, net->port);
544 		*abort_no_unlock = 1;
545 		return (-1);
546 	}
547 	/* if the peer doesn't support asconf, flush the asconf queue */
548 	if (asoc->asconf_supported == 0) {
549 		struct sctp_asconf_addr *param, *nparam;
550 
551 		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
552 			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
553 			SCTP_FREE(param, SCTP_M_ASC_ADDR);
554 		}
555 	}
556 
557 	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
558 	    stcb->asoc.local_hmacs);
559 	if (op_err) {
560 		sctp_queue_op_err(stcb, op_err);
561 		/* queuing will steal away the mbuf chain to the out queue */
562 		op_err = NULL;
563 	}
564 	/* extract the cookie and queue it to "echo" it back... */
565 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
566 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
567 			       stcb->asoc.overall_error_count,
568 			       0,
569 			       SCTP_FROM_SCTP_INPUT,
570 			       __LINE__);
571 	}
572 	stcb->asoc.overall_error_count = 0;
573 	net->error_count = 0;
574 
575 	/*
576 	 * Cancel the INIT timer, We do this first before queueing the
577 	 * cookie. We always cancel at the primary to assue that we are
578 	 * canceling the timer started by the INIT which always goes to the
579 	 * primary.
580 	 */
581 	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
582 	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
583 
584 	/* calculate the RTO */
585 	sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
586 	                   SCTP_RTT_FROM_NON_DATA);
587 #if defined(__Userspace__)
588 	if (stcb->sctp_ep->recv_callback) {
589 		if (stcb->sctp_socket) {
590 			uint32_t inqueue_bytes, sb_free_now;
591 			struct sctp_inpcb *inp;
592 
593 			inp = stcb->sctp_ep;
594 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
595 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
596 
597 			/* check if the amount free in the send socket buffer crossed the threshold */
598 			if (inp->send_callback &&
599 			    (((inp->send_sb_threshold > 0) &&
600 			      (sb_free_now >= inp->send_sb_threshold) &&
601 			      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
602 			     (inp->send_sb_threshold == 0))) {
603 				atomic_add_int(&stcb->asoc.refcnt, 1);
604 				SCTP_TCB_UNLOCK(stcb);
605 				inp->send_callback(stcb->sctp_socket, sb_free_now);
606 				SCTP_TCB_LOCK(stcb);
607 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
608 			}
609 		}
610 	}
611 #endif
612 	retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
613 	return (retval);
614 }
615 
616 static void
sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net)617 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
618     struct sctp_tcb *stcb, struct sctp_nets *net)
619 {
620 	union sctp_sockstore store;
621 	struct sctp_nets *r_net, *f_net;
622 	struct timeval tv;
623 	int req_prim = 0;
624 	uint16_t old_error_counter;
625 
626 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
627 		/* Invalid length */
628 		return;
629 	}
630 
631 	memset(&store, 0, sizeof(store));
632 	switch (cp->heartbeat.hb_info.addr_family) {
633 #ifdef INET
634 	case AF_INET:
635 		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
636 			store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
637 #ifdef HAVE_SIN_LEN
638 			store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
639 #endif
640 			store.sin.sin_port = stcb->rport;
641 			memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
642 			       sizeof(store.sin.sin_addr));
643 		} else {
644 			return;
645 		}
646 		break;
647 #endif
648 #ifdef INET6
649 	case AF_INET6:
650 		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
651 			store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
652 #ifdef HAVE_SIN6_LEN
653 			store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
654 #endif
655 			store.sin6.sin6_port = stcb->rport;
656 			memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
657 		} else {
658 			return;
659 		}
660 		break;
661 #endif
662 #if defined(__Userspace__)
663 	case AF_CONN:
664 		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
665 			store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
666 #ifdef HAVE_SCONN_LEN
667 			store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
668 #endif
669 			store.sconn.sconn_port = stcb->rport;
670 			memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
671 		} else {
672 			return;
673 		}
674 		break;
675 #endif
676 	default:
677 		return;
678 	}
679 	r_net = sctp_findnet(stcb, &store.sa);
680 	if (r_net == NULL) {
681 		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
682 		return;
683 	}
684 	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
685 	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
686 	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
687 		/*
688 		 * If the its a HB and it's random value is correct when can
689 		 * confirm the destination.
690 		 */
691 		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
692 		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
693 			stcb->asoc.primary_destination = r_net;
694 			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
695 			f_net = TAILQ_FIRST(&stcb->asoc.nets);
696 			if (f_net != r_net) {
697 				/* first one on the list is NOT the primary
698 				 * sctp_cmpaddr() is much more efficient if
699 				 * the primary is the first on the list, make it
700 				 * so.
701 				 */
702 				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
703 				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
704 			}
705 			req_prim = 1;
706 		}
707 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
708 		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
709 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
710 		                r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
711 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
712 	}
713 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
714 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
715 			       stcb->asoc.overall_error_count,
716 			       0,
717 			       SCTP_FROM_SCTP_INPUT,
718 			       __LINE__);
719 	}
720 	stcb->asoc.overall_error_count = 0;
721 	old_error_counter = r_net->error_count;
722 	r_net->error_count = 0;
723 	r_net->hb_responded = 1;
724 	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
725 	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
726 	/* Now lets do a RTO with this */
727 	sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
728 	                   SCTP_RTT_FROM_NON_DATA);
729 	if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
730 		r_net->dest_state |= SCTP_ADDR_REACHABLE;
731 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
732 				0, (void *)r_net, SCTP_SO_NOT_LOCKED);
733 	}
734 	if (r_net->dest_state & SCTP_ADDR_PF) {
735 		r_net->dest_state &= ~SCTP_ADDR_PF;
736 		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
737 	}
738 	if (old_error_counter > 0) {
739 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
740 		                stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
741 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
742 	}
743 	if (r_net == stcb->asoc.primary_destination) {
744 		if (stcb->asoc.alternate) {
745 			/* release the alternate, primary is good */
746 			sctp_free_remote_addr(stcb->asoc.alternate);
747 			stcb->asoc.alternate = NULL;
748 		}
749 	}
750 	/* Mobility adaptation */
751 	if (req_prim) {
752 		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
753 		                                 SCTP_MOBILITY_BASE) ||
754 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
755 		                                SCTP_MOBILITY_FASTHANDOFF)) &&
756 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
757 		                                SCTP_MOBILITY_PRIM_DELETED)) {
758 
759 			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
760 			                stcb->sctp_ep, stcb, NULL,
761 			                SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
762 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
763 					SCTP_MOBILITY_FASTHANDOFF)) {
764 				sctp_assoc_immediate_retrans(stcb,
765 					stcb->asoc.primary_destination);
766 			}
767 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
768 					SCTP_MOBILITY_BASE)) {
769 				sctp_move_chunks_from_net(stcb,
770 					stcb->asoc.deleted_primary);
771 			}
772 			sctp_delete_prim_timer(stcb->sctp_ep, stcb);
773 		}
774 	}
775 }
776 
777 static int
sctp_handle_nat_colliding_state(struct sctp_tcb * stcb)778 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
779 {
780 	/*
781 	 * Return 0 means we want you to proceed with the abort
782 	 * non-zero means no abort processing.
783 	 */
784 	uint32_t new_vtag;
785 	struct sctpasochead *head;
786 
787 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
788 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
789 		new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
790 		atomic_add_int(&stcb->asoc.refcnt, 1);
791 		SCTP_TCB_UNLOCK(stcb);
792 		SCTP_INP_INFO_WLOCK();
793 		SCTP_TCB_LOCK(stcb);
794 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
795 	} else {
796 		return (0);
797 	}
798 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
799 		/* generate a new vtag and send init */
800 		LIST_REMOVE(stcb, sctp_asocs);
801 		stcb->asoc.my_vtag = new_vtag;
802 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
803 		/* put it in the bucket in the vtag hash of assoc's for the system */
804 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
805 		SCTP_INP_INFO_WUNLOCK();
806 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
807 		return (1);
808 	} else {
809 		/* treat like a case where the cookie expired i.e.:
810 		* - dump current cookie.
811 		* - generate a new vtag.
812 		* - resend init.
813 		*/
814 		/* generate a new vtag and send init */
815 		LIST_REMOVE(stcb, sctp_asocs);
816 		SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
817 		sctp_stop_all_cookie_timers(stcb);
818 		sctp_toss_old_cookies(stcb, &stcb->asoc);
819 		stcb->asoc.my_vtag = new_vtag;
820 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
821 		/* put it in the bucket in the vtag hash of assoc's for the system */
822 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
823 		SCTP_INP_INFO_WUNLOCK();
824 		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
825 		return (1);
826 	}
827 	return (0);
828 }
829 
830 static int
sctp_handle_nat_missing_state(struct sctp_tcb * stcb,struct sctp_nets * net)831 sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
832 			      struct sctp_nets *net)
833 {
834 	/* return 0 means we want you to proceed with the abort
835 	 * non-zero means no abort processing
836 	 */
837 	if (stcb->asoc.auth_supported == 0) {
838 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
839 		return (0);
840 	}
841 	sctp_asconf_send_nat_state_update(stcb, net);
842 	return (1);
843 }
844 
845 
846 /* Returns 1 if the stcb was aborted, 0 otherwise */
847 static int
sctp_handle_abort(struct sctp_abort_chunk * abort,struct sctp_tcb * stcb,struct sctp_nets * net)848 sctp_handle_abort(struct sctp_abort_chunk *abort,
849     struct sctp_tcb *stcb, struct sctp_nets *net)
850 {
851 #if defined(__APPLE__) && !defined(__Userspace__)
852 	struct socket *so;
853 #endif
854 	uint16_t len;
855 	uint16_t error;
856 
857 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
858 	if (stcb == NULL)
859 		return (0);
860 
861 	len = ntohs(abort->ch.chunk_length);
862 	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
863 		/* Need to check the cause codes for our
864 		 * two magic nat aborts which don't kill the assoc
865 		 * necessarily.
866 		 */
867 		struct sctp_error_cause *cause;
868 
869 		cause = (struct sctp_error_cause *)(abort + 1);
870 		error = ntohs(cause->code);
871 		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
872 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ABORT flags:%x\n",
873 			                           abort->ch.chunk_flags);
874 			if (sctp_handle_nat_colliding_state(stcb)) {
875 				return (0);
876 			}
877 		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
878 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ABORT flags:%x\n",
879 			                           abort->ch.chunk_flags);
880 			if (sctp_handle_nat_missing_state(stcb, net)) {
881 				return (0);
882 			}
883 		}
884 	} else {
885 		error = 0;
886 	}
887 	/* stop any receive timers */
888 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
889 	                SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
890 	/* notify user of the abort and clean up... */
891 	sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
892 	/* free the tcb */
893 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
894 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
895 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
896 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
897 	}
898 #ifdef SCTP_ASOCLOG_OF_TSNS
899 	sctp_print_out_track_log(stcb);
900 #endif
901 #if defined(__APPLE__) && !defined(__Userspace__)
902 	so = SCTP_INP_SO(stcb->sctp_ep);
903 	atomic_add_int(&stcb->asoc.refcnt, 1);
904 	SCTP_TCB_UNLOCK(stcb);
905 	SCTP_SOCKET_LOCK(so, 1);
906 	SCTP_TCB_LOCK(stcb);
907 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
908 #endif
909 	SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
910 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
911 			      SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
912 #if defined(__APPLE__) && !defined(__Userspace__)
913 	SCTP_SOCKET_UNLOCK(so, 1);
914 #endif
915 	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
916 	return (1);
917 }
918 
919 static void
sctp_start_net_timers(struct sctp_tcb * stcb)920 sctp_start_net_timers(struct sctp_tcb *stcb)
921 {
922 	uint32_t cnt_hb_sent;
923 	struct sctp_nets *net;
924 
925 	cnt_hb_sent = 0;
926 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
927 		/* For each network start:
928 		 * 1) A pmtu timer.
929 		 * 2) A HB timer
930 		 * 3) If the dest in unconfirmed send
931 		 *    a hb as well if under max_hb_burst have
932 		 *    been sent.
933 		 */
934 		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
935 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
936 		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
937 		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
938 			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
939 			cnt_hb_sent++;
940 		}
941 	}
942 	if (cnt_hb_sent) {
943 		sctp_chunk_output(stcb->sctp_ep, stcb,
944 				  SCTP_OUTPUT_FROM_COOKIE_ACK,
945 				  SCTP_SO_NOT_LOCKED);
946 	}
947 }
948 
949 
950 static void
sctp_handle_shutdown(struct sctp_shutdown_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_flag)951 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
952     struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
953 {
954 	struct sctp_association *asoc;
955 	int some_on_streamwheel;
956 	int old_state;
957 #if defined(__APPLE__) && !defined(__Userspace__)
958 	struct socket *so;
959 #endif
960 
961 	SCTPDBG(SCTP_DEBUG_INPUT2,
962 		"sctp_handle_shutdown: handling SHUTDOWN\n");
963 	if (stcb == NULL)
964 		return;
965 	asoc = &stcb->asoc;
966 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
967 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
968 		return;
969 	}
970 	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
971 		/* Shutdown NOT the expected size */
972 		return;
973 	}
974 	old_state = SCTP_GET_STATE(stcb);
975 	sctp_update_acked(stcb, cp, abort_flag);
976 	if (*abort_flag) {
977 		return;
978 	}
979 	if (asoc->control_pdapi) {
980 		/* With a normal shutdown
981 		 * we assume the end of last record.
982 		 */
983 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
984 		if (asoc->control_pdapi->on_strm_q) {
985 			struct sctp_stream_in *strm;
986 
987 			strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
988 			if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
989 				/* Unordered */
990 				TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
991 				asoc->control_pdapi->on_strm_q = 0;
992 			} else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
993 				/* Ordered */
994 				TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
995 				asoc->control_pdapi->on_strm_q = 0;
996 #ifdef INVARIANTS
997 			} else {
998 				panic("Unknown state on ctrl:%p on_strm_q:%d",
999 				      asoc->control_pdapi,
1000 				      asoc->control_pdapi->on_strm_q);
1001 #endif
1002 			}
1003 		}
1004 		asoc->control_pdapi->end_added = 1;
1005 		asoc->control_pdapi->pdapi_aborted = 1;
1006 		asoc->control_pdapi = NULL;
1007 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1008 #if defined(__APPLE__) && !defined(__Userspace__)
1009 		so = SCTP_INP_SO(stcb->sctp_ep);
1010 		atomic_add_int(&stcb->asoc.refcnt, 1);
1011 		SCTP_TCB_UNLOCK(stcb);
1012 		SCTP_SOCKET_LOCK(so, 1);
1013 		SCTP_TCB_LOCK(stcb);
1014 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1015 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1016 			/* assoc was freed while we were unlocked */
1017 			SCTP_SOCKET_UNLOCK(so, 1);
1018 			return;
1019 		}
1020 #endif
1021 		if (stcb->sctp_socket) {
1022 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1023 		}
1024 #if defined(__APPLE__) && !defined(__Userspace__)
1025 		SCTP_SOCKET_UNLOCK(so, 1);
1026 #endif
1027 	}
1028 	/* goto SHUTDOWN_RECEIVED state to block new requests */
1029 	if (stcb->sctp_socket) {
1030 		if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1031 		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
1032 		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
1033 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
1034 			/* notify upper layer that peer has initiated a shutdown */
1035 			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1036 
1037 			/* reset time */
1038 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
1039 		}
1040 	}
1041 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
1042 		/*
1043 		 * stop the shutdown timer, since we WILL move to
1044 		 * SHUTDOWN-ACK-SENT.
1045 		 */
1046 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
1047 		                net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
1048 	}
1049 	/* Now is there unsent data on a stream somewhere? */
1050 	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
1051 
1052 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
1053 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
1054 	    some_on_streamwheel) {
1055 		/* By returning we will push more data out */
1056 		return;
1057 	} else {
1058 		/* no outstanding data to send, so move on... */
1059 		/* send SHUTDOWN-ACK */
1060 		/* move to SHUTDOWN-ACK-SENT state */
1061 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
1062 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1063 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1064 		}
1065 		if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
1066 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
1067 			sctp_stop_timers_for_shutdown(stcb);
1068 			sctp_send_shutdown_ack(stcb, net);
1069 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
1070 			                 stcb->sctp_ep, stcb, net);
1071 		} else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1072 			sctp_send_shutdown_ack(stcb, net);
1073 		}
1074 	}
1075 }
1076 
1077 static void
sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk * cp SCTP_UNUSED,struct sctp_tcb * stcb,struct sctp_nets * net)1078 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
1079                          struct sctp_tcb *stcb,
1080                          struct sctp_nets *net)
1081 {
1082 	struct sctp_association *asoc;
1083 #if defined(__APPLE__) && !defined(__Userspace__)
1084 	struct socket *so;
1085 
1086 	so = SCTP_INP_SO(stcb->sctp_ep);
1087 #endif
1088 	SCTPDBG(SCTP_DEBUG_INPUT2,
1089 		"sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
1090 	if (stcb == NULL)
1091 		return;
1092 
1093 	asoc = &stcb->asoc;
1094 	/* process according to association state */
1095 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1096 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1097 		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
1098 		sctp_send_shutdown_complete(stcb, net, 1);
1099 		SCTP_TCB_UNLOCK(stcb);
1100 		return;
1101 	}
1102 	if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
1103 	    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1104 		/* unexpected SHUTDOWN-ACK... so ignore... */
1105 		SCTP_TCB_UNLOCK(stcb);
1106 		return;
1107 	}
1108 	if (asoc->control_pdapi) {
1109 		/* With a normal shutdown
1110 		 * we assume the end of last record.
1111 		 */
1112 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1113 		asoc->control_pdapi->end_added = 1;
1114 		asoc->control_pdapi->pdapi_aborted = 1;
1115 		asoc->control_pdapi = NULL;
1116 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1117 #if defined(__APPLE__) && !defined(__Userspace__)
1118 		atomic_add_int(&stcb->asoc.refcnt, 1);
1119 		SCTP_TCB_UNLOCK(stcb);
1120 		SCTP_SOCKET_LOCK(so, 1);
1121 		SCTP_TCB_LOCK(stcb);
1122 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1123 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1124 			/* assoc was freed while we were unlocked */
1125 			SCTP_SOCKET_UNLOCK(so, 1);
1126 			return;
1127 		}
1128 #endif
1129 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1130 #if defined(__APPLE__) && !defined(__Userspace__)
1131 		SCTP_SOCKET_UNLOCK(so, 1);
1132 #endif
1133 	}
1134 #ifdef INVARIANTS
1135 	if (!TAILQ_EMPTY(&asoc->send_queue) ||
1136 	    !TAILQ_EMPTY(&asoc->sent_queue) ||
1137 	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
1138 		panic("Queues are not empty when handling SHUTDOWN-ACK");
1139 	}
1140 #endif
1141 	/* stop the timer */
1142 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
1143 	                SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1144 	/* send SHUTDOWN-COMPLETE */
1145 	sctp_send_shutdown_complete(stcb, net, 0);
1146 	/* notify upper layer protocol */
1147 	if (stcb->sctp_socket) {
1148 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1149 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1150 			stcb->sctp_socket->so_snd.sb_cc = 0;
1151 		}
1152 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1153 	}
1154 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1155 	/* free the TCB but first save off the ep */
1156 #if defined(__APPLE__) && !defined(__Userspace__)
1157 	atomic_add_int(&stcb->asoc.refcnt, 1);
1158 	SCTP_TCB_UNLOCK(stcb);
1159 	SCTP_SOCKET_LOCK(so, 1);
1160 	SCTP_TCB_LOCK(stcb);
1161 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
1162 #endif
1163 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1164 			      SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1165 #if defined(__APPLE__) && !defined(__Userspace__)
1166 	SCTP_SOCKET_UNLOCK(so, 1);
1167 #endif
1168 }
1169 
1170 static void
sctp_process_unrecog_chunk(struct sctp_tcb * stcb,uint8_t chunk_type)1171 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
1172 {
1173 	switch (chunk_type) {
1174 	case SCTP_ASCONF_ACK:
1175 	case SCTP_ASCONF:
1176 		sctp_asconf_cleanup(stcb);
1177 		break;
1178 	case SCTP_IFORWARD_CUM_TSN:
1179 	case SCTP_FORWARD_CUM_TSN:
1180 		stcb->asoc.prsctp_supported = 0;
1181 		break;
1182 	default:
1183 		SCTPDBG(SCTP_DEBUG_INPUT2,
1184 			"Peer does not support chunk type %d (0x%x).\n",
1185 			chunk_type, chunk_type);
1186 		break;
1187 	}
1188 }
1189 
1190 /*
1191  * Skip past the param header and then we will find the param that caused the
1192  * problem.  There are a number of param's in a ASCONF OR the prsctp param
1193  * these will turn of specific features.
1194  * XXX: Is this the right thing to do?
1195  */
1196 static void
sctp_process_unrecog_param(struct sctp_tcb * stcb,uint16_t parameter_type)1197 sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
1198 {
1199 	switch (parameter_type) {
1200 		/* pr-sctp draft */
1201 	case SCTP_PRSCTP_SUPPORTED:
1202 		stcb->asoc.prsctp_supported = 0;
1203 		break;
1204 	case SCTP_SUPPORTED_CHUNK_EXT:
1205 		break;
1206 		/* draft-ietf-tsvwg-addip-sctp */
1207 	case SCTP_HAS_NAT_SUPPORT:
1208 	        stcb->asoc.peer_supports_nat = 0;
1209 	        break;
1210 	case SCTP_ADD_IP_ADDRESS:
1211 	case SCTP_DEL_IP_ADDRESS:
1212 	case SCTP_SET_PRIM_ADDR:
1213 		stcb->asoc.asconf_supported = 0;
1214 		break;
1215 	case SCTP_SUCCESS_REPORT:
1216 	case SCTP_ERROR_CAUSE_IND:
1217 		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1218 		SCTPDBG(SCTP_DEBUG_INPUT2,
1219 			"Turning off ASCONF to this strange peer\n");
1220 		stcb->asoc.asconf_supported = 0;
1221 		break;
1222 	default:
1223 		SCTPDBG(SCTP_DEBUG_INPUT2,
1224 			"Peer does not support param type %d (0x%x)??\n",
1225 			parameter_type, parameter_type);
1226 		break;
1227 	}
1228 }
1229 
1230 static int
sctp_handle_error(struct sctp_chunkhdr * ch,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t limit)1231 sctp_handle_error(struct sctp_chunkhdr *ch,
1232                   struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
1233 {
1234 	struct sctp_error_cause *cause;
1235 	struct sctp_association *asoc;
1236 	uint32_t remaining_length, adjust;
1237 	uint16_t code, cause_code, cause_length;
1238 #if defined(__APPLE__) && !defined(__Userspace__)
1239 	struct socket *so;
1240 #endif
1241 
1242 	/* parse through all of the errors and process */
1243 	asoc = &stcb->asoc;
1244 	cause = (struct sctp_error_cause *)((caddr_t)ch +
1245 	    sizeof(struct sctp_chunkhdr));
1246 	remaining_length = ntohs(ch->chunk_length);
1247 	if (remaining_length > limit) {
1248 		remaining_length = limit;
1249 	}
1250 	if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
1251 		remaining_length -= sizeof(struct sctp_chunkhdr);
1252 	} else {
1253 		remaining_length = 0;
1254 	}
1255 	code = 0;
1256 	while (remaining_length >= sizeof(struct sctp_error_cause)) {
1257 		/* Process an Error Cause */
1258 		cause_code = ntohs(cause->code);
1259 		cause_length = ntohs(cause->length);
1260 		if ((cause_length > remaining_length) || (cause_length == 0)) {
1261 			/* Invalid cause length, possibly due to truncation. */
1262 			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
1263 				remaining_length, cause_length);
1264 			return (0);
1265 		}
1266 		if (code == 0) {
1267 			/* report the first error cause */
1268 			code = cause_code;
1269 		}
1270 		switch (cause_code) {
1271 		case SCTP_CAUSE_INVALID_STREAM:
1272 		case SCTP_CAUSE_MISSING_PARAM:
1273 		case SCTP_CAUSE_INVALID_PARAM:
1274 		case SCTP_CAUSE_NO_USER_DATA:
1275 			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
1276 				cause_code);
1277 			break;
1278 		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1279 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n",
1280 				ch->chunk_flags);
1281 			if (sctp_handle_nat_colliding_state(stcb)) {
1282 				return (0);
1283 			}
1284 			break;
1285 		case SCTP_CAUSE_NAT_MISSING_STATE:
1286 			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n",
1287 			                           ch->chunk_flags);
1288 			if (sctp_handle_nat_missing_state(stcb, net)) {
1289 				return (0);
1290 			}
1291 			break;
1292 		case SCTP_CAUSE_STALE_COOKIE:
1293 			/*
1294 			 * We only act if we have echoed a cookie and are
1295 			 * waiting.
1296 			 */
1297 			if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
1298 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1299 				struct sctp_error_stale_cookie *stale_cookie;
1300 
1301 				stale_cookie = (struct sctp_error_stale_cookie *)cause;
1302 				asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time);
1303 				/* Double it to be more robust on RTX */
1304 				if (asoc->cookie_preserve_req <= UINT32_MAX / 2) {
1305 					asoc->cookie_preserve_req *= 2;
1306 				} else {
1307 					asoc->cookie_preserve_req = UINT32_MAX;
1308 				}
1309 				asoc->stale_cookie_count++;
1310 				if (asoc->stale_cookie_count >
1311 				    asoc->max_init_times) {
1312 					sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1313 					/* now free the asoc */
1314 #if defined(__APPLE__) && !defined(__Userspace__)
1315 					so = SCTP_INP_SO(stcb->sctp_ep);
1316 					atomic_add_int(&stcb->asoc.refcnt, 1);
1317 					SCTP_TCB_UNLOCK(stcb);
1318 					SCTP_SOCKET_LOCK(so, 1);
1319 					SCTP_TCB_LOCK(stcb);
1320 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1321 #endif
1322 					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1323 							      SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1324 #if defined(__APPLE__) && !defined(__Userspace__)
1325 					SCTP_SOCKET_UNLOCK(so, 1);
1326 #endif
1327 					return (-1);
1328 				}
1329 				/* blast back to INIT state */
1330 				sctp_toss_old_cookies(stcb, &stcb->asoc);
1331 				SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
1332 				sctp_stop_all_cookie_timers(stcb);
1333 				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1334 			}
1335 			break;
1336 		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1337 			/*
1338 			 * Nothing we can do here, we don't do hostname
1339 			 * addresses so if the peer does not like my IPv6
1340 			 * (or IPv4 for that matter) it does not matter. If
1341 			 * they don't support that type of address, they can
1342 			 * NOT possibly get that packet type... i.e. with no
1343 			 * IPv6 you can't receive a IPv6 packet. so we can
1344 			 * safely ignore this one. If we ever added support
1345 			 * for HOSTNAME Addresses, then we would need to do
1346 			 * something here.
1347 			 */
1348 			break;
1349 		case SCTP_CAUSE_UNRECOG_CHUNK:
1350 			if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
1351 				struct sctp_error_unrecognized_chunk *unrec_chunk;
1352 
1353 				unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
1354 				sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
1355 			}
1356 			break;
1357 		case SCTP_CAUSE_UNRECOG_PARAM:
1358 			/* XXX: We only consider the first parameter */
1359 			if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
1360 				struct sctp_paramhdr *unrec_parameter;
1361 
1362 				unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
1363 				sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
1364 			}
1365 			break;
1366 		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1367 			/*
1368 			 * We ignore this since the timer will drive out a
1369 			 * new cookie anyway and there timer will drive us
1370 			 * to send a SHUTDOWN_COMPLETE. We can't send one
1371 			 * here since we don't have their tag.
1372 			 */
1373 			break;
1374 		case SCTP_CAUSE_DELETING_LAST_ADDR:
1375 		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1376 		case SCTP_CAUSE_DELETING_SRC_ADDR:
1377 			/*
1378 			 * We should NOT get these here, but in a
1379 			 * ASCONF-ACK.
1380 			 */
1381 			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
1382 				cause_code);
1383 			break;
1384 		case SCTP_CAUSE_OUT_OF_RESC:
1385 			/*
1386 			 * And what, pray tell do we do with the fact that
1387 			 * the peer is out of resources? Not really sure we
1388 			 * could do anything but abort. I suspect this
1389 			 * should have came WITH an abort instead of in a
1390 			 * OP-ERROR.
1391 			 */
1392 			break;
1393 		default:
1394 			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
1395 				cause_code);
1396 			break;
1397 		}
1398 		adjust = SCTP_SIZE32(cause_length);
1399 		if (remaining_length >= adjust) {
1400 			remaining_length -= adjust;
1401 		} else {
1402 			remaining_length = 0;
1403 		}
1404 		cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
1405 	}
1406 	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
1407 	return (0);
1408 }
1409 
1410 static int
sctp_handle_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id)1411 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1412                      struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1413                      struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1414                      struct sctp_nets *net, int *abort_no_unlock,
1415 #if defined(__FreeBSD__) && !defined(__Userspace__)
1416                      uint8_t mflowtype, uint32_t mflowid,
1417 #endif
1418                      uint32_t vrf_id)
1419 {
1420 	struct sctp_init_ack *init_ack;
1421 	struct mbuf *op_err;
1422 
1423 	SCTPDBG(SCTP_DEBUG_INPUT2,
1424 		"sctp_handle_init_ack: handling INIT-ACK\n");
1425 
1426 	if (stcb == NULL) {
1427 		SCTPDBG(SCTP_DEBUG_INPUT2,
1428 			"sctp_handle_init_ack: TCB is null\n");
1429 		return (-1);
1430 	}
1431 	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1432 		/* Invalid length */
1433 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1434 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1435 		                       src, dst, sh, op_err,
1436 #if defined(__FreeBSD__) && !defined(__Userspace__)
1437 		                       mflowtype, mflowid,
1438 #endif
1439 		                       vrf_id, net->port);
1440 		*abort_no_unlock = 1;
1441 		return (-1);
1442 	}
1443 	init_ack = &cp->init;
1444 	/* validate parameters */
1445 	if (init_ack->initiate_tag == 0) {
1446 		/* protocol error... send an abort */
1447 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1448 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1449 		                       src, dst, sh, op_err,
1450 #if defined(__FreeBSD__) && !defined(__Userspace__)
1451 		                       mflowtype, mflowid,
1452 #endif
1453 		                       vrf_id, net->port);
1454 		*abort_no_unlock = 1;
1455 		return (-1);
1456 	}
1457 	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1458 		/* protocol error... send an abort */
1459 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1460 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1461 		                       src, dst, sh, op_err,
1462 #if defined(__FreeBSD__) && !defined(__Userspace__)
1463 		                       mflowtype, mflowid,
1464 #endif
1465 		                       vrf_id, net->port);
1466 		*abort_no_unlock = 1;
1467 		return (-1);
1468 	}
1469 	if (init_ack->num_inbound_streams == 0) {
1470 		/* protocol error... send an abort */
1471 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1472 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1473 		                       src, dst, sh, op_err,
1474 #if defined(__FreeBSD__) && !defined(__Userspace__)
1475 		                       mflowtype, mflowid,
1476 #endif
1477 		                       vrf_id, net->port);
1478 		*abort_no_unlock = 1;
1479 		return (-1);
1480 	}
1481 	if (init_ack->num_outbound_streams == 0) {
1482 		/* protocol error... send an abort */
1483 		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1484 		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1485 		                       src, dst, sh, op_err,
1486 #if defined(__FreeBSD__) && !defined(__Userspace__)
1487 		                       mflowtype, mflowid,
1488 #endif
1489 		                       vrf_id, net->port);
1490 		*abort_no_unlock = 1;
1491 		return (-1);
1492 	}
1493 	/* process according to association state... */
1494 	switch (SCTP_GET_STATE(stcb)) {
1495 	case SCTP_STATE_COOKIE_WAIT:
1496 		/* this is the expected state for this chunk */
1497 		/* process the INIT-ACK parameters */
1498 		if (stcb->asoc.primary_destination->dest_state &
1499 		    SCTP_ADDR_UNCONFIRMED) {
1500 			/*
1501 			 * The primary is where we sent the INIT, we can
1502 			 * always consider it confirmed when the INIT-ACK is
1503 			 * returned. Do this before we load addresses
1504 			 * though.
1505 			 */
1506 			stcb->asoc.primary_destination->dest_state &=
1507 			    ~SCTP_ADDR_UNCONFIRMED;
1508 			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1509 			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1510 		}
1511 		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1512 		                          net, abort_no_unlock,
1513 #if defined(__FreeBSD__) && !defined(__Userspace__)
1514 		                          mflowtype, mflowid,
1515 #endif
1516 		                          vrf_id) < 0) {
1517 			/* error in parsing parameters */
1518 			return (-1);
1519 		}
1520 		/* update our state */
1521 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1522 		SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
1523 
1524 		/* reset the RTO calc */
1525 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1526 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1527 				       stcb->asoc.overall_error_count,
1528 				       0,
1529 				       SCTP_FROM_SCTP_INPUT,
1530 				       __LINE__);
1531 		}
1532 		stcb->asoc.overall_error_count = 0;
1533 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1534 		/*
1535 		 * collapse the init timer back in case of a exponential
1536 		 * backoff
1537 		 */
1538 		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1539 		    stcb, net);
1540 		/*
1541 		 * the send at the end of the inbound data processing will
1542 		 * cause the cookie to be sent
1543 		 */
1544 		break;
1545 	case SCTP_STATE_SHUTDOWN_SENT:
1546 		/* incorrect state... discard */
1547 		break;
1548 	case SCTP_STATE_COOKIE_ECHOED:
1549 		/* incorrect state... discard */
1550 		break;
1551 	case SCTP_STATE_OPEN:
1552 		/* incorrect state... discard */
1553 		break;
1554 	case SCTP_STATE_EMPTY:
1555 	case SCTP_STATE_INUSE:
1556 	default:
1557 		/* incorrect state... discard */
1558 		return (-1);
1559 		break;
1560 	}
1561 	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1562 	return (0);
1563 }
1564 
1565 static struct sctp_tcb *
1566 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1567     struct sockaddr *src, struct sockaddr *dst,
1568     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1569     struct sctp_inpcb *inp, struct sctp_nets **netp,
1570     struct sockaddr *init_src, int *notification,
1571     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1572 #if defined(__FreeBSD__) && !defined(__Userspace__)
1573     uint8_t mflowtype, uint32_t mflowid,
1574 #endif
1575     uint32_t vrf_id, uint16_t port);
1576 
1577 
1578 /*
1579  * handle a state cookie for an existing association m: input packet mbuf
1580  * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1581  * "split" mbuf and the cookie signature does not exist offset: offset into
1582  * mbuf to the cookie-echo chunk
1583  */
1584 static struct sctp_tcb *
sctp_process_cookie_existing(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)1585 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1586     struct sockaddr *src, struct sockaddr *dst,
1587     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1588     struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1589     struct sockaddr *init_src, int *notification,
1590     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1591 #if defined(__FreeBSD__) && !defined(__Userspace__)
1592     uint8_t mflowtype, uint32_t mflowid,
1593 #endif
1594     uint32_t vrf_id, uint16_t port)
1595 {
1596 	struct sctp_association *asoc;
1597 	struct sctp_init_chunk *init_cp, init_buf;
1598 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1599 	struct sctp_nets *net;
1600 	struct mbuf *op_err;
1601 	struct timeval old;
1602 	int init_offset, initack_offset, i;
1603 	int retval;
1604 	int spec_flag = 0;
1605 	uint32_t how_indx;
1606 #if defined(SCTP_DETAILED_STR_STATS)
1607 	int j;
1608 #endif
1609 
1610 	net = *netp;
1611 	/* I know that the TCB is non-NULL from the caller */
1612 	asoc = &stcb->asoc;
1613 	for (how_indx = 0; how_indx  < sizeof(asoc->cookie_how); how_indx++) {
1614 		if (asoc->cookie_how[how_indx] == 0)
1615 			break;
1616 	}
1617 	if (how_indx < sizeof(asoc->cookie_how)) {
1618 		asoc->cookie_how[how_indx] = 1;
1619 	}
1620 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1621 		/* SHUTDOWN came in after sending INIT-ACK */
1622 		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1623 		op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1624 		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1625 #if defined(__FreeBSD__) && !defined(__Userspace__)
1626 		                   mflowtype, mflowid, inp->fibnum,
1627 #endif
1628 		                   vrf_id, net->port);
1629 		if (how_indx < sizeof(asoc->cookie_how))
1630 			asoc->cookie_how[how_indx] = 2;
1631 		return (NULL);
1632 	}
1633 	/*
1634 	 * find and validate the INIT chunk in the cookie (peer's info) the
1635 	 * INIT should start after the cookie-echo header struct (chunk
1636 	 * header, state cookie header struct)
1637 	 */
1638 	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1639 
1640 	init_cp = (struct sctp_init_chunk *)
1641 		sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1642 			      (uint8_t *) & init_buf);
1643 	if (init_cp == NULL) {
1644 		/* could not pull a INIT chunk in cookie */
1645 		return (NULL);
1646 	}
1647 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1648 		return (NULL);
1649 	}
1650 	/*
1651 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1652 	 * INIT-ACK follows the INIT chunk
1653 	 */
1654 	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1655 	initack_cp = (struct sctp_init_ack_chunk *)
1656 		sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1657 			      (uint8_t *) & initack_buf);
1658 	if (initack_cp == NULL) {
1659 		/* could not pull INIT-ACK chunk in cookie */
1660 		return (NULL);
1661 	}
1662 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1663 		return (NULL);
1664 	}
1665 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1666 	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1667 		/*
1668 		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1669 		 * to get into the OPEN state
1670 		 */
1671 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1672 			/*-
1673 			 * Opps, this means that we somehow generated two vtag's
1674 			 * the same. I.e. we did:
1675 			 *  Us               Peer
1676 			 *   <---INIT(tag=a)------
1677 			 *   ----INIT-ACK(tag=t)-->
1678 			 *   ----INIT(tag=t)------> *1
1679 			 *   <---INIT-ACK(tag=a)---
1680                          *   <----CE(tag=t)------------- *2
1681 			 *
1682 			 * At point *1 we should be generating a different
1683 			 * tag t'. Which means we would throw away the CE and send
1684 			 * ours instead. Basically this is case C (throw away side).
1685 			 */
1686 			if (how_indx < sizeof(asoc->cookie_how))
1687 				asoc->cookie_how[how_indx] = 17;
1688 			return (NULL);
1689 
1690 		}
1691 		switch (SCTP_GET_STATE(stcb)) {
1692 			case SCTP_STATE_COOKIE_WAIT:
1693 			case SCTP_STATE_COOKIE_ECHOED:
1694 				/*
1695 				 * INIT was sent but got a COOKIE_ECHO with the
1696 				 * correct tags... just accept it...but we must
1697 				 * process the init so that we can make sure we
1698 				 * have the right seq no's.
1699 				 */
1700 				/* First we must process the INIT !! */
1701 				retval = sctp_process_init(init_cp, stcb);
1702 				if (retval < 0) {
1703 					if (how_indx < sizeof(asoc->cookie_how))
1704 						asoc->cookie_how[how_indx] = 3;
1705 					return (NULL);
1706 				}
1707 				/* we have already processed the INIT so no problem */
1708 				sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
1709 				                stcb, net,
1710 				                SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1711 				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
1712 				                stcb, net,
1713 				                SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1714 				/* update current state */
1715 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1716 					SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1717 				else
1718 					SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1719 
1720 				SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1721 				if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1722 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1723 					                 stcb->sctp_ep, stcb, NULL);
1724 				}
1725 				SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1726 				sctp_stop_all_cookie_timers(stcb);
1727 				if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1728 				     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1729 				    (!SCTP_IS_LISTENING(inp))) {
1730 #if defined(__APPLE__) && !defined(__Userspace__)
1731 					struct socket *so;
1732 #endif
1733 					/*
1734 					 * Here is where collision would go if we
1735 					 * did a connect() and instead got a
1736 					 * init/init-ack/cookie done before the
1737 					 * init-ack came back..
1738 					 */
1739 					stcb->sctp_ep->sctp_flags |=
1740 						SCTP_PCB_FLAGS_CONNECTED;
1741 #if defined(__APPLE__) && !defined(__Userspace__)
1742 					so = SCTP_INP_SO(stcb->sctp_ep);
1743 					atomic_add_int(&stcb->asoc.refcnt, 1);
1744 					SCTP_TCB_UNLOCK(stcb);
1745 					SCTP_SOCKET_LOCK(so, 1);
1746 					SCTP_TCB_LOCK(stcb);
1747 					atomic_add_int(&stcb->asoc.refcnt, -1);
1748 					if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1749 						SCTP_SOCKET_UNLOCK(so, 1);
1750 						return (NULL);
1751 					}
1752 #endif
1753 					soisconnected(stcb->sctp_socket);
1754 #if defined(__APPLE__) && !defined(__Userspace__)
1755 					SCTP_SOCKET_UNLOCK(so, 1);
1756 #endif
1757 				}
1758 				/* notify upper layer */
1759 				*notification = SCTP_NOTIFY_ASSOC_UP;
1760 				/*
1761 				 * since we did not send a HB make sure we
1762 				 * don't double things
1763 				 */
1764 				old.tv_sec = cookie->time_entered.tv_sec;
1765 				old.tv_usec = cookie->time_entered.tv_usec;
1766 				net->hb_responded = 1;
1767 				sctp_calculate_rto(stcb, asoc, net, &old,
1768 				                   SCTP_RTT_FROM_NON_DATA);
1769 
1770 				if (stcb->asoc.sctp_autoclose_ticks &&
1771 				    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1772 					sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1773 							 inp, stcb, NULL);
1774 				}
1775 				break;
1776 			default:
1777 				/*
1778 				 * we're in the OPEN state (or beyond), so
1779 				 * peer must have simply lost the COOKIE-ACK
1780 				 */
1781 				break;
1782 		}	/* end switch */
1783 		sctp_stop_all_cookie_timers(stcb);
1784 		/*
1785 		 * We ignore the return code here.. not sure if we should
1786 		 * somehow abort.. but we do have an existing asoc. This
1787 		 * really should not fail.
1788 		 */
1789 		if (sctp_load_addresses_from_init(stcb, m,
1790 						  init_offset + sizeof(struct sctp_init_chunk),
1791 						  initack_offset, src, dst, init_src, stcb->asoc.port)) {
1792 			if (how_indx < sizeof(asoc->cookie_how))
1793 				asoc->cookie_how[how_indx] = 4;
1794 			return (NULL);
1795 		}
1796 		/* respond with a COOKIE-ACK */
1797 		sctp_toss_old_cookies(stcb, asoc);
1798 		sctp_send_cookie_ack(stcb);
1799 		if (how_indx < sizeof(asoc->cookie_how))
1800 			asoc->cookie_how[how_indx] = 5;
1801 		return (stcb);
1802 	}
1803 
1804 	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1805 	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1806 	    cookie->tie_tag_my_vtag == 0 &&
1807 	    cookie->tie_tag_peer_vtag == 0) {
1808 		/*
1809 		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1810 		 */
1811 		if (how_indx < sizeof(asoc->cookie_how))
1812 			asoc->cookie_how[how_indx] = 6;
1813 		return (NULL);
1814 	}
1815 	/* If nat support, and the below and stcb is established,
1816 	 * send back a ABORT(colliding state) if we are established.
1817 	 */
1818 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN)  &&
1819 	    (asoc->peer_supports_nat) &&
1820 	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1821 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1822 	     (asoc->peer_vtag == 0)))) {
1823 		/* Special case - Peer's support nat. We may have
1824 		 * two init's that we gave out the same tag on since
1825 		 * one was not established.. i.e. we get INIT from host-1
1826 		 * behind the nat and we respond tag-a, we get a INIT from
1827 		 * host-2 behind the nat and we get tag-a again. Then we
1828 		 * bring up host-1 (or 2's) assoc, Then comes the cookie
1829 		 * from hsot-2 (or 1). Now we have colliding state. We must
1830 		 * send an abort here with colliding state indication.
1831 		 */
1832 		op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1833 		sctp_send_abort(m, iphlen,  src, dst, sh, 0, op_err,
1834 #if defined(__FreeBSD__) && !defined(__Userspace__)
1835 		                mflowtype, mflowid, inp->fibnum,
1836 #endif
1837 		                vrf_id, port);
1838 		return (NULL);
1839 	}
1840 	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1841 	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1842 	     (asoc->peer_vtag == 0))) {
1843 		/*
1844 		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1845 		 * should be ok, re-accept peer info
1846 		 */
1847 		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1848 			/* Extension of case C.
1849 			 * If we hit this, then the random number
1850 			 * generator returned the same vtag when we
1851 			 * first sent our INIT-ACK and when we later sent
1852 			 * our INIT. The side with the seq numbers that are
1853 			 * different will be the one that normnally would
1854 			 * have hit case C. This in effect "extends" our vtags
1855 			 * in this collision case to be 64 bits. The same collision
1856 			 * could occur aka you get both vtag and seq number the
1857 			 * same twice in a row.. but is much less likely. If it
1858 			 * did happen then we would proceed through and bring
1859 			 * up the assoc.. we may end up with the wrong stream
1860 			 * setup however.. which would be bad.. but there is
1861 			 * no way to tell.. until we send on a stream that does
1862 			 * not exist :-)
1863 			 */
1864 			if (how_indx < sizeof(asoc->cookie_how))
1865 				asoc->cookie_how[how_indx] = 7;
1866 
1867 			return (NULL);
1868 		}
1869 		if (how_indx < sizeof(asoc->cookie_how))
1870 			asoc->cookie_how[how_indx] = 8;
1871 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1872 		                SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1873 		sctp_stop_all_cookie_timers(stcb);
1874 		/*
1875 		 * since we did not send a HB make sure we don't double
1876 		 * things
1877 		 */
1878 		net->hb_responded = 1;
1879 		if (stcb->asoc.sctp_autoclose_ticks &&
1880 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1881 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1882 					 NULL);
1883 		}
1884 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1885 		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1886 
1887 		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1888 			/* Ok the peer probably discarded our
1889 			 * data (if we echoed a cookie+data). So anything
1890 			 * on the sent_queue should be marked for
1891 			 * retransmit, we may not get something to
1892 			 * kick us so it COULD still take a timeout
1893 			 * to move these.. but it can't hurt to mark them.
1894 			 */
1895 			struct sctp_tmit_chunk *chk;
1896 		        TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1897 				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1898 					chk->sent = SCTP_DATAGRAM_RESEND;
1899 					sctp_flight_size_decrease(chk);
1900 					sctp_total_flight_decrease(stcb, chk);
1901 					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1902 					spec_flag++;
1903 				}
1904 			}
1905 
1906 		}
1907 		/* process the INIT info (peer's info) */
1908 		retval = sctp_process_init(init_cp, stcb);
1909 		if (retval < 0) {
1910 			if (how_indx < sizeof(asoc->cookie_how))
1911 				asoc->cookie_how[how_indx] = 9;
1912 			return (NULL);
1913 		}
1914 		if (sctp_load_addresses_from_init(stcb, m,
1915 						  init_offset + sizeof(struct sctp_init_chunk),
1916 						  initack_offset, src, dst, init_src, stcb->asoc.port)) {
1917 			if (how_indx < sizeof(asoc->cookie_how))
1918 				asoc->cookie_how[how_indx] = 10;
1919 			return (NULL);
1920 		}
1921 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1922 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1923 			*notification = SCTP_NOTIFY_ASSOC_UP;
1924 
1925 			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1926 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1927 			    (!SCTP_IS_LISTENING(inp))) {
1928 #if defined(__APPLE__) && !defined(__Userspace__)
1929 				struct socket *so;
1930 #endif
1931 				stcb->sctp_ep->sctp_flags |=
1932 					SCTP_PCB_FLAGS_CONNECTED;
1933 #if defined(__APPLE__) && !defined(__Userspace__)
1934 				so = SCTP_INP_SO(stcb->sctp_ep);
1935 				atomic_add_int(&stcb->asoc.refcnt, 1);
1936 				SCTP_TCB_UNLOCK(stcb);
1937 				SCTP_SOCKET_LOCK(so, 1);
1938 				SCTP_TCB_LOCK(stcb);
1939 				atomic_add_int(&stcb->asoc.refcnt, -1);
1940 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1941 					SCTP_SOCKET_UNLOCK(so, 1);
1942 					return (NULL);
1943 				}
1944 #endif
1945 				soisconnected(stcb->sctp_socket);
1946 #if defined(__APPLE__) && !defined(__Userspace__)
1947 				SCTP_SOCKET_UNLOCK(so, 1);
1948 #endif
1949 			}
1950 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1951 				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1952 			else
1953 				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1954 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1955 		} else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1956 			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1957 		} else {
1958 			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1959 		}
1960 		SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1961 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1962 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1963 			                 stcb->sctp_ep, stcb, NULL);
1964 		}
1965 		sctp_stop_all_cookie_timers(stcb);
1966 		sctp_toss_old_cookies(stcb, asoc);
1967 		sctp_send_cookie_ack(stcb);
1968 		if (spec_flag) {
1969 			/* only if we have retrans set do we do this. What
1970 			 * this call does is get only the COOKIE-ACK out
1971 			 * and then when we return the normal call to
1972 			 * sctp_chunk_output will get the retrans out
1973 			 * behind this.
1974 			 */
1975 			sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1976 		}
1977 		if (how_indx < sizeof(asoc->cookie_how))
1978 			asoc->cookie_how[how_indx] = 11;
1979 
1980 		return (stcb);
1981 	}
1982 	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1983 	     ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1984 	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1985 	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1986 	    cookie->tie_tag_peer_vtag != 0) {
1987 		struct sctpasochead *head;
1988 #if defined(__APPLE__) && !defined(__Userspace__)
1989 		struct socket *so;
1990 #endif
1991 
1992 		if (asoc->peer_supports_nat) {
1993 			/* This is a gross gross hack.
1994 			 * Just call the cookie_new code since we
1995 			 * are allowing a duplicate association.
1996 			 * I hope this works...
1997 			 */
1998 			return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
1999 			                                sh, cookie, cookie_len,
2000 			                                inp, netp, init_src,notification,
2001 			                                auth_skipped, auth_offset, auth_len,
2002 #if defined(__FreeBSD__) && !defined(__Userspace__)
2003 			                                mflowtype, mflowid,
2004 #endif
2005 			                                vrf_id, port));
2006 		}
2007 		/*
2008 		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
2009 		 */
2010 		/* temp code */
2011 		if (how_indx < sizeof(asoc->cookie_how))
2012 			asoc->cookie_how[how_indx] = 12;
2013 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
2014 		                SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2015 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
2016 		                SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2017 
2018 		/* notify upper layer */
2019 		*notification = SCTP_NOTIFY_ASSOC_RESTART;
2020 		atomic_add_int(&stcb->asoc.refcnt, 1);
2021 		if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
2022 		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
2023 		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
2024 			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2025 		}
2026 		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
2027 			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
2028 		} else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
2029 			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
2030 		}
2031 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2032 			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2033 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2034 			                 stcb->sctp_ep, stcb, NULL);
2035 
2036 		} else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
2037 			/* move to OPEN state, if not in SHUTDOWN_SENT */
2038 			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2039 		}
2040 		asoc->pre_open_streams =
2041 			ntohs(initack_cp->init.num_outbound_streams);
2042 		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2043 		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2044 		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2045 
2046 		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2047 
2048 		asoc->str_reset_seq_in = asoc->init_seq_number;
2049 
2050 		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2051 		if (asoc->mapping_array) {
2052 			memset(asoc->mapping_array, 0,
2053 			       asoc->mapping_array_size);
2054 		}
2055 		if (asoc->nr_mapping_array) {
2056 			memset(asoc->nr_mapping_array, 0,
2057 			    asoc->mapping_array_size);
2058 		}
2059 		SCTP_TCB_UNLOCK(stcb);
2060 #if defined(__APPLE__) && !defined(__Userspace__)
2061 		so = SCTP_INP_SO(stcb->sctp_ep);
2062 		SCTP_SOCKET_LOCK(so, 1);
2063 #endif
2064 		SCTP_INP_INFO_WLOCK();
2065 		SCTP_INP_WLOCK(stcb->sctp_ep);
2066 		SCTP_TCB_LOCK(stcb);
2067 		atomic_add_int(&stcb->asoc.refcnt, -1);
2068 		/* send up all the data */
2069 		SCTP_TCB_SEND_LOCK(stcb);
2070 
2071 		sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
2072 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2073 			stcb->asoc.strmout[i].chunks_on_queues = 0;
2074 #if defined(SCTP_DETAILED_STR_STATS)
2075 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
2076 				asoc->strmout[i].abandoned_sent[j] = 0;
2077 				asoc->strmout[i].abandoned_unsent[j] = 0;
2078 			}
2079 #else
2080 			asoc->strmout[i].abandoned_sent[0] = 0;
2081 			asoc->strmout[i].abandoned_unsent[0] = 0;
2082 #endif
2083 			stcb->asoc.strmout[i].sid = i;
2084 			stcb->asoc.strmout[i].next_mid_ordered = 0;
2085 			stcb->asoc.strmout[i].next_mid_unordered = 0;
2086 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
2087 		}
2088 		/* process the INIT-ACK info (my info) */
2089 		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2090 		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2091 
2092 		/* pull from vtag hash */
2093 		LIST_REMOVE(stcb, sctp_asocs);
2094 		/* re-insert to new vtag position */
2095 		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
2096 								    SCTP_BASE_INFO(hashasocmark))];
2097 		/*
2098 		 * put it in the bucket in the vtag hash of assoc's for the
2099 		 * system
2100 		 */
2101 		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2102 
2103 		SCTP_TCB_SEND_UNLOCK(stcb);
2104 		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2105 		SCTP_INP_INFO_WUNLOCK();
2106 #if defined(__APPLE__) && !defined(__Userspace__)
2107 		SCTP_SOCKET_UNLOCK(so, 1);
2108 #endif
2109 		asoc->total_flight = 0;
2110 		asoc->total_flight_count = 0;
2111 		/* process the INIT info (peer's info) */
2112 		retval = sctp_process_init(init_cp, stcb);
2113 		if (retval < 0) {
2114 			if (how_indx < sizeof(asoc->cookie_how))
2115 				asoc->cookie_how[how_indx] = 13;
2116 
2117 			return (NULL);
2118 		}
2119 		/*
2120 		 * since we did not send a HB make sure we don't double
2121 		 * things
2122 		 */
2123 		net->hb_responded = 1;
2124 
2125 		if (sctp_load_addresses_from_init(stcb, m,
2126 						  init_offset + sizeof(struct sctp_init_chunk),
2127 						  initack_offset, src, dst, init_src, stcb->asoc.port)) {
2128 			if (how_indx < sizeof(asoc->cookie_how))
2129 				asoc->cookie_how[how_indx] = 14;
2130 
2131 			return (NULL);
2132 		}
2133 		/* respond with a COOKIE-ACK */
2134 		sctp_stop_all_cookie_timers(stcb);
2135 		sctp_toss_old_cookies(stcb, asoc);
2136 		sctp_send_cookie_ack(stcb);
2137 		if (how_indx < sizeof(asoc->cookie_how))
2138 			asoc->cookie_how[how_indx] = 15;
2139 
2140 		return (stcb);
2141 	}
2142 	if (how_indx < sizeof(asoc->cookie_how))
2143 		asoc->cookie_how[how_indx] = 16;
2144 	/* all other cases... */
2145 	return (NULL);
2146 }
2147 
2148 
2149 /*
2150  * handle a state cookie for a new association m: input packet mbuf chain--
2151  * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2152  * and the cookie signature does not exist offset: offset into mbuf to the
2153  * cookie-echo chunk length: length of the cookie chunk to: where the init
2154  * was from returns a new TCB
2155  */
2156 static struct sctp_tcb *
sctp_process_cookie_new(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)2157 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2158     struct sockaddr *src, struct sockaddr *dst,
2159     struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2160     struct sctp_inpcb *inp, struct sctp_nets **netp,
2161     struct sockaddr *init_src, int *notification,
2162     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2163 #if defined(__FreeBSD__) && !defined(__Userspace__)
2164     uint8_t mflowtype, uint32_t mflowid,
2165 #endif
2166     uint32_t vrf_id, uint16_t port)
2167 {
2168 	struct sctp_tcb *stcb;
2169 	struct sctp_init_chunk *init_cp, init_buf;
2170 	struct sctp_init_ack_chunk *initack_cp, initack_buf;
2171 	union sctp_sockstore store;
2172 	struct sctp_association *asoc;
2173 	int init_offset, initack_offset, initack_limit;
2174 	int retval;
2175 	int error = 0;
2176 	uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
2177 #if defined(__APPLE__) && !defined(__Userspace__)
2178 	struct socket *so;
2179 
2180 	so = SCTP_INP_SO(inp);
2181 #endif
2182 
2183 	/*
2184 	 * find and validate the INIT chunk in the cookie (peer's info) the
2185 	 * INIT should start after the cookie-echo header struct (chunk
2186 	 * header, state cookie header struct)
2187 	 */
2188 	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2189 	init_cp = (struct sctp_init_chunk *)
2190 	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2191 	    (uint8_t *) & init_buf);
2192 	if (init_cp == NULL) {
2193 		/* could not pull a INIT chunk in cookie */
2194 		SCTPDBG(SCTP_DEBUG_INPUT1,
2195 			"process_cookie_new: could not pull INIT chunk hdr\n");
2196 		return (NULL);
2197 	}
2198 	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2199 		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2200 		return (NULL);
2201 	}
2202 	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2203 	/*
2204 	 * find and validate the INIT-ACK chunk in the cookie (my info) the
2205 	 * INIT-ACK follows the INIT chunk
2206 	 */
2207 	initack_cp = (struct sctp_init_ack_chunk *)
2208 	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2209 	    (uint8_t *) & initack_buf);
2210 	if (initack_cp == NULL) {
2211 		/* could not pull INIT-ACK chunk in cookie */
2212 		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2213 		return (NULL);
2214 	}
2215 	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2216 		return (NULL);
2217 	}
2218 	/*
2219 	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2220 	 * "initack_limit" value.  This is because the chk_length field
2221 	 * includes the length of the cookie, but the cookie is omitted when
2222 	 * the INIT and INIT_ACK are tacked onto the cookie...
2223 	 */
2224 	initack_limit = offset + cookie_len;
2225 
2226 	/*
2227 	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2228 	 * and popluate
2229 	 */
2230 
2231 	/*
2232 	 * Here we do a trick, we set in NULL for the proc/thread argument. We
2233 	 * do this since in effect we only use the p argument when
2234 	 * the socket is unbound and we must do an implicit bind.
2235 	 * Since we are getting a cookie, we cannot be unbound.
2236 	 */
2237 	stcb = sctp_aloc_assoc(inp, init_src, &error,
2238 	                       ntohl(initack_cp->init.initiate_tag), vrf_id,
2239 	                       ntohs(initack_cp->init.num_outbound_streams),
2240 	                       port,
2241 #if defined(__FreeBSD__) && !defined(__Userspace__)
2242 	                       (struct thread *)NULL,
2243 #elif defined(_WIN32) && !defined(__Userspace__)
2244 	                       (PKTHREAD)NULL,
2245 #else
2246 	                       (struct proc *)NULL,
2247 #endif
2248 	                       SCTP_DONT_INITIALIZE_AUTH_PARAMS);
2249 	if (stcb == NULL) {
2250 		struct mbuf *op_err;
2251 
2252 		/* memory problem? */
2253 		SCTPDBG(SCTP_DEBUG_INPUT1,
2254 			"process_cookie_new: no room for another TCB!\n");
2255 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2256 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2257 		                       src, dst, sh, op_err,
2258 #if defined(__FreeBSD__) && !defined(__Userspace__)
2259 		                       mflowtype, mflowid,
2260 #endif
2261 		                       vrf_id, port);
2262 		return (NULL);
2263 	}
2264 	/* get the correct sctp_nets */
2265 	if (netp)
2266 		*netp = sctp_findnet(stcb, init_src);
2267 
2268 	asoc = &stcb->asoc;
2269 	/* get scope variables out of cookie */
2270 	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2271 	asoc->scope.site_scope = cookie->site_scope;
2272 	asoc->scope.local_scope = cookie->local_scope;
2273 	asoc->scope.loopback_scope = cookie->loopback_scope;
2274 
2275 #if defined(__Userspace__)
2276 	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2277 	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
2278 	    (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
2279 #else
2280 	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2281 	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2282 #endif
2283 		struct mbuf *op_err;
2284 
2285 		/*
2286 		 * Houston we have a problem. The EP changed while the
2287 		 * cookie was in flight. Only recourse is to abort the
2288 		 * association.
2289 		 */
2290 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2291 		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2292 				       src, dst, sh, op_err,
2293 #if defined(__FreeBSD__) && !defined(__Userspace__)
2294 		                       mflowtype, mflowid,
2295 #endif
2296 		                       vrf_id, port);
2297 #if defined(__APPLE__) && !defined(__Userspace__)
2298 		atomic_add_int(&stcb->asoc.refcnt, 1);
2299 		SCTP_TCB_UNLOCK(stcb);
2300 		SCTP_SOCKET_LOCK(so, 1);
2301 		SCTP_TCB_LOCK(stcb);
2302 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2303 #endif
2304 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2305 				      SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2306 #if defined(__APPLE__) && !defined(__Userspace__)
2307 		SCTP_SOCKET_UNLOCK(so, 1);
2308 #endif
2309 		return (NULL);
2310 	}
2311 	/* process the INIT-ACK info (my info) */
2312 	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2313 	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2314 	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2315 	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2316 	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2317 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2318 	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2319 	asoc->str_reset_seq_in = asoc->init_seq_number;
2320 
2321 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2322 
2323 	/* process the INIT info (peer's info) */
2324 	if (netp)
2325 		retval = sctp_process_init(init_cp, stcb);
2326 	else
2327 		retval = 0;
2328 	if (retval < 0) {
2329 #if defined(__APPLE__) && !defined(__Userspace__)
2330 		atomic_add_int(&stcb->asoc.refcnt, 1);
2331 		SCTP_TCB_UNLOCK(stcb);
2332 		SCTP_SOCKET_LOCK(so, 1);
2333 		SCTP_TCB_LOCK(stcb);
2334 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2335 #endif
2336 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2337 		                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2338 #if defined(__APPLE__) && !defined(__Userspace__)
2339 		SCTP_SOCKET_UNLOCK(so, 1);
2340 #endif
2341 		return (NULL);
2342 	}
2343 	/* load all addresses */
2344 	if (sctp_load_addresses_from_init(stcb, m,
2345 	    init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2346 	    src, dst, init_src, port)) {
2347 #if defined(__APPLE__) && !defined(__Userspace__)
2348 		atomic_add_int(&stcb->asoc.refcnt, 1);
2349 		SCTP_TCB_UNLOCK(stcb);
2350 		SCTP_SOCKET_LOCK(so, 1);
2351 		SCTP_TCB_LOCK(stcb);
2352 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2353 #endif
2354 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2355 		                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2356 #if defined(__APPLE__) && !defined(__Userspace__)
2357 		SCTP_SOCKET_UNLOCK(so, 1);
2358 #endif
2359 		return (NULL);
2360 	}
2361 	/*
2362 	 * verify any preceding AUTH chunk that was skipped
2363 	 */
2364 	/* pull the local authentication parameters from the cookie/init-ack */
2365 	sctp_auth_get_cookie_params(stcb, m,
2366 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2367 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2368 	if (auth_skipped) {
2369 		struct sctp_auth_chunk *auth;
2370 
2371 		if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
2372 			auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2373 		} else {
2374 			auth = NULL;
2375 		}
2376 		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2377 			/* auth HMAC failed, dump the assoc and packet */
2378 			SCTPDBG(SCTP_DEBUG_AUTH1,
2379 				"COOKIE-ECHO: AUTH failed\n");
2380 #if defined(__APPLE__) && !defined(__Userspace__)
2381 			atomic_add_int(&stcb->asoc.refcnt, 1);
2382 			SCTP_TCB_UNLOCK(stcb);
2383 			SCTP_SOCKET_LOCK(so, 1);
2384 			SCTP_TCB_LOCK(stcb);
2385 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2386 #endif
2387 			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2388 			                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2389 #if defined(__APPLE__) && !defined(__Userspace__)
2390 			SCTP_SOCKET_UNLOCK(so, 1);
2391 #endif
2392 			return (NULL);
2393 		} else {
2394 			/* remaining chunks checked... good to go */
2395 			stcb->asoc.authenticated = 1;
2396 		}
2397 	}
2398 
2399 	/*
2400 	 * if we're doing ASCONFs, check to see if we have any new local
2401 	 * addresses that need to get added to the peer (eg. addresses
2402 	 * changed while cookie echo in flight).  This needs to be done
2403 	 * after we go to the OPEN state to do the correct asconf
2404 	 * processing. else, make sure we have the correct addresses in our
2405 	 * lists
2406 	 */
2407 
2408 	/* warning, we re-use sin, sin6, sa_store here! */
2409 	/* pull in local_address (our "from" address) */
2410 	switch (cookie->laddr_type) {
2411 #ifdef INET
2412 	case SCTP_IPV4_ADDRESS:
2413 		/* source addr is IPv4 */
2414 		memset(&store.sin, 0, sizeof(struct sockaddr_in));
2415 		store.sin.sin_family = AF_INET;
2416 #ifdef HAVE_SIN_LEN
2417 		store.sin.sin_len = sizeof(struct sockaddr_in);
2418 #endif
2419 		store.sin.sin_addr.s_addr = cookie->laddress[0];
2420 		break;
2421 #endif
2422 #ifdef INET6
2423 	case SCTP_IPV6_ADDRESS:
2424 		/* source addr is IPv6 */
2425 		memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2426 		store.sin6.sin6_family = AF_INET6;
2427 #ifdef HAVE_SIN6_LEN
2428 		store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2429 #endif
2430 		store.sin6.sin6_scope_id = cookie->scope_id;
2431 		memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2432 		break;
2433 #endif
2434 #if defined(__Userspace__)
2435 	case SCTP_CONN_ADDRESS:
2436 		/* source addr is conn */
2437 		memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
2438 		store.sconn.sconn_family = AF_CONN;
2439 #ifdef HAVE_SCONN_LEN
2440 		store.sconn.sconn_len = sizeof(struct sockaddr_conn);
2441 #endif
2442 		memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
2443 		break;
2444 #endif
2445 	default:
2446 #if defined(__APPLE__) && !defined(__Userspace__)
2447 		atomic_add_int(&stcb->asoc.refcnt, 1);
2448 		SCTP_TCB_UNLOCK(stcb);
2449 		SCTP_SOCKET_LOCK(so, 1);
2450 		SCTP_TCB_LOCK(stcb);
2451 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2452 #endif
2453 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2454 		                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2455 #if defined(__APPLE__) && !defined(__Userspace__)
2456 		SCTP_SOCKET_UNLOCK(so, 1);
2457 #endif
2458 		return (NULL);
2459 	}
2460 
2461 	/* update current state */
2462 	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2463 	SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2464 	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2465 		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2466 		                 stcb->sctp_ep, stcb, NULL);
2467 	}
2468 	sctp_stop_all_cookie_timers(stcb);
2469 	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2470 	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2471 
2472 	/* set up to notify upper layer */
2473 	*notification = SCTP_NOTIFY_ASSOC_UP;
2474 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2475 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2476 	    (!SCTP_IS_LISTENING(inp))) {
2477 		/*
2478 		 * This is an endpoint that called connect() how it got a
2479 		 * cookie that is NEW is a bit of a mystery. It must be that
2480 		 * the INIT was sent, but before it got there.. a complete
2481 		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2482 		 * should have went to the other code.. not here.. oh well..
2483 		 * a bit of protection is worth having..
2484 		 */
2485 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2486 #if defined(__APPLE__) && !defined(__Userspace__)
2487 		atomic_add_int(&stcb->asoc.refcnt, 1);
2488 		SCTP_TCB_UNLOCK(stcb);
2489 		SCTP_SOCKET_LOCK(so, 1);
2490 		SCTP_TCB_LOCK(stcb);
2491 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2492 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2493 			SCTP_SOCKET_UNLOCK(so, 1);
2494 			return (NULL);
2495 		}
2496 #endif
2497 		soisconnected(stcb->sctp_socket);
2498 #if defined(__APPLE__) && !defined(__Userspace__)
2499 		SCTP_SOCKET_UNLOCK(so, 1);
2500 #endif
2501 	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2502 	           (SCTP_IS_LISTENING(inp))) {
2503 		/*
2504 		 * We don't want to do anything with this one. Since it is
2505 		 * the listening guy. The timer will get started for
2506 		 * accepted connections in the caller.
2507 		 */
2508 		;
2509 	}
2510 	/* since we did not send a HB make sure we don't double things */
2511 	if ((netp) && (*netp))
2512 		(*netp)->hb_responded = 1;
2513 
2514 	if (stcb->asoc.sctp_autoclose_ticks &&
2515 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2516 		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2517 	}
2518 	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2519 	if ((netp != NULL) && (*netp != NULL)) {
2520 		struct timeval old;
2521 
2522 		/* calculate the RTT and set the encaps port */
2523 		old.tv_sec = cookie->time_entered.tv_sec;
2524 		old.tv_usec = cookie->time_entered.tv_usec;
2525 		sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA);
2526 	}
2527 	/* respond with a COOKIE-ACK */
2528 	sctp_send_cookie_ack(stcb);
2529 
2530 	/*
2531 	 * check the address lists for any ASCONFs that need to be sent
2532 	 * AFTER the cookie-ack is sent
2533 	 */
2534 	sctp_check_address_list(stcb, m,
2535 	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2536 	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2537 	    &store.sa, cookie->local_scope, cookie->site_scope,
2538 	    cookie->ipv4_scope, cookie->loopback_scope);
2539 
2540 
2541 	return (stcb);
2542 }
2543 
2544 /*
2545  * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2546  * we NEED to make sure we are not already using the vtag. If so we
2547  * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2548 	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2549 							    SCTP_BASE_INFO(hashasocmark))];
2550 	LIST_FOREACH(stcb, head, sctp_asocs) {
2551 	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2552 		       -- SEND ABORT - TRY AGAIN --
2553 		}
2554 	}
2555 */
2556 
2557 /*
2558  * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2559  * existing (non-NULL) TCB
2560  */
2561 static struct mbuf *
2562 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2563     struct sockaddr *src, struct sockaddr *dst,
2564     struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2565     struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2566     int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2567     struct sctp_tcb **locked_tcb,
2568 #if defined(__FreeBSD__) && !defined(__Userspace__)
2569     uint8_t mflowtype, uint32_t mflowid,
2570 #endif
2571     uint32_t vrf_id, uint16_t port)
2572 {
2573 	struct sctp_state_cookie *cookie;
2574 	struct sctp_tcb *l_stcb = *stcb;
2575 	struct sctp_inpcb *l_inp;
2576 	struct sockaddr *to;
2577 	struct sctp_pcb *ep;
2578 	struct mbuf *m_sig;
2579 	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2580 	uint8_t *sig;
2581 	uint8_t cookie_ok = 0;
2582 	unsigned int sig_offset, cookie_offset;
2583 	unsigned int cookie_len;
2584 	struct timeval now;
2585 	struct timeval time_expires;
2586 	int notification = 0;
2587 	struct sctp_nets *netl;
2588 	int had_a_existing_tcb = 0;
2589 	int send_int_conf = 0;
2590 #ifdef INET
2591 	struct sockaddr_in sin;
2592 #endif
2593 #ifdef INET6
2594 	struct sockaddr_in6 sin6;
2595 #endif
2596 #if defined(__Userspace__)
2597 	struct sockaddr_conn sconn;
2598 #endif
2599 
2600 	SCTPDBG(SCTP_DEBUG_INPUT2,
2601 		"sctp_handle_cookie: handling COOKIE-ECHO\n");
2602 
2603 	if (inp_p == NULL) {
2604 		return (NULL);
2605 	}
2606 	cookie = &cp->cookie;
2607 	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2608 	cookie_len = ntohs(cp->ch.chunk_length);
2609 
2610 	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2611 	    sizeof(struct sctp_init_chunk) +
2612 	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2613 		/* cookie too small */
2614 		return (NULL);
2615 	}
2616 	if ((cookie->peerport != sh->src_port) ||
2617 	    (cookie->myport != sh->dest_port) ||
2618 	    (cookie->my_vtag != sh->v_tag)) {
2619 		/*
2620 		 * invalid ports or bad tag.  Note that we always leave the
2621 		 * v_tag in the header in network order and when we stored
2622 		 * it in the my_vtag slot we also left it in network order.
2623 		 * This maintains the match even though it may be in the
2624 		 * opposite byte order of the machine :->
2625 		 */
2626 		return (NULL);
2627 	}
2628 #if defined(__Userspace__)
2629 	/*
2630 	 * Recover the AF_CONN addresses within the cookie.
2631 	 * This needs to be done in the buffer provided for later processing
2632 	 * of the cookie and in the mbuf chain for HMAC validation.
2633 	 */
2634 	if ((cookie->addr_type == SCTP_CONN_ADDRESS) && (src->sa_family == AF_CONN)) {
2635 		struct sockaddr_conn *sconnp = (struct sockaddr_conn *)src;
2636 
2637 		memcpy(cookie->address, &sconnp->sconn_addr , sizeof(void *));
2638 		m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, address),
2639 		           (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
2640 	}
2641 	if ((cookie->laddr_type == SCTP_CONN_ADDRESS) && (dst->sa_family == AF_CONN)) {
2642 		struct sockaddr_conn *sconnp = (struct sockaddr_conn *)dst;
2643 
2644 		memcpy(cookie->laddress, &sconnp->sconn_addr , sizeof(void *));
2645 		m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, laddress),
2646 		           (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
2647 	}
2648 #endif
2649 	/*
2650 	 * split off the signature into its own mbuf (since it should not be
2651 	 * calculated in the sctp_hmac_m() call).
2652 	 */
2653 	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2654 	m_sig = m_split(m, sig_offset, M_NOWAIT);
2655 	if (m_sig == NULL) {
2656 		/* out of memory or ?? */
2657 		return (NULL);
2658 	}
2659 #ifdef SCTP_MBUF_LOGGING
2660 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2661 		sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
2662 	}
2663 #endif
2664 
2665 	/*
2666 	 * compute the signature/digest for the cookie
2667 	 */
2668 	ep = &(*inp_p)->sctp_ep;
2669 	l_inp = *inp_p;
2670 	if (l_stcb) {
2671 		SCTP_TCB_UNLOCK(l_stcb);
2672 	}
2673 	SCTP_INP_RLOCK(l_inp);
2674 	if (l_stcb) {
2675 		SCTP_TCB_LOCK(l_stcb);
2676 	}
2677 	/* which cookie is it? */
2678 	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2679 	    (ep->current_secret_number != ep->last_secret_number)) {
2680 		/* it's the old cookie */
2681 		(void)sctp_hmac_m(SCTP_HMAC,
2682 		    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2683 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2684 	} else {
2685 		/* it's the current cookie */
2686 		(void)sctp_hmac_m(SCTP_HMAC,
2687 		    (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2688 		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2689 	}
2690 	/* get the signature */
2691 	SCTP_INP_RUNLOCK(l_inp);
2692 	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2693 	if (sig == NULL) {
2694 		/* couldn't find signature */
2695 		sctp_m_freem(m_sig);
2696 		return (NULL);
2697 	}
2698 	/* compare the received digest with the computed digest */
2699 	if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2700 		/* try the old cookie? */
2701 		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2702 		    (ep->current_secret_number != ep->last_secret_number)) {
2703 			/* compute digest with old */
2704 			(void)sctp_hmac_m(SCTP_HMAC,
2705 			    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2706 			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2707 			/* compare */
2708 			if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2709 				cookie_ok = 1;
2710 		}
2711 	} else {
2712 		cookie_ok = 1;
2713 	}
2714 
2715 	/*
2716 	 * Now before we continue we must reconstruct our mbuf so that
2717 	 * normal processing of any other chunks will work.
2718 	 */
2719 	{
2720 		struct mbuf *m_at;
2721 
2722 		m_at = m;
2723 		while (SCTP_BUF_NEXT(m_at) != NULL) {
2724 			m_at = SCTP_BUF_NEXT(m_at);
2725 		}
2726 		SCTP_BUF_NEXT(m_at) = m_sig;
2727 	}
2728 
2729 	if (cookie_ok == 0) {
2730 		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2731 		SCTPDBG(SCTP_DEBUG_INPUT2,
2732 			"offset = %u, cookie_offset = %u, sig_offset = %u\n",
2733 			(uint32_t) offset, cookie_offset, sig_offset);
2734 		return (NULL);
2735 	}
2736 
2737 	/*
2738 	 * check the cookie timestamps to be sure it's not stale
2739 	 */
2740 	(void)SCTP_GETTIME_TIMEVAL(&now);
2741 	/* Expire time is in Ticks, so we convert to seconds */
2742 	time_expires.tv_sec = cookie->time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life);
2743 	time_expires.tv_usec = cookie->time_entered.tv_usec;
2744 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
2745 	if (timercmp(&now, &time_expires, >))
2746 #else
2747 	if (timevalcmp(&now, &time_expires, >))
2748 #endif
2749 	{
2750 		/* cookie is stale! */
2751 		struct mbuf *op_err;
2752 		struct sctp_error_stale_cookie *cause;
2753 		struct timeval diff;
2754 		uint32_t staleness;
2755 
2756 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
2757 		                               0, M_NOWAIT, 1, MT_DATA);
2758 		if (op_err == NULL) {
2759 			/* FOOBAR */
2760 			return (NULL);
2761 		}
2762 		/* Set the len */
2763 		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
2764 		cause = mtod(op_err, struct sctp_error_stale_cookie *);
2765 		cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
2766 		cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
2767 		    (sizeof(uint32_t))));
2768 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
2769 		timersub(&now, &time_expires, &diff);
2770 #else
2771 		diff = now;
2772 		timevalsub(&diff, &time_expires);
2773 #endif
2774 		if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
2775 			staleness = UINT32_MAX;
2776 		} else {
2777 			staleness = diff.tv_sec * 1000000;
2778 		}
2779 		if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
2780 			staleness += diff.tv_usec;
2781 		} else {
2782 			staleness = UINT32_MAX;
2783 		}
2784 		cause->stale_time = htonl(staleness);
2785 		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2786 #if defined(__FreeBSD__) && !defined(__Userspace__)
2787 		                   mflowtype, mflowid, l_inp->fibnum,
2788 #endif
2789 		                   vrf_id, port);
2790 		return (NULL);
2791 	}
2792 	/*
2793 	 * Now we must see with the lookup address if we have an existing
2794 	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2795 	 * and a INIT collided with us and somewhere the peer sent the
2796 	 * cookie on another address besides the single address our assoc
2797 	 * had for him. In this case we will have one of the tie-tags set at
2798 	 * least AND the address field in the cookie can be used to look it
2799 	 * up.
2800 	 */
2801 	to = NULL;
2802 	switch (cookie->addr_type) {
2803 #ifdef INET6
2804 	case SCTP_IPV6_ADDRESS:
2805 		memset(&sin6, 0, sizeof(sin6));
2806 		sin6.sin6_family = AF_INET6;
2807 #ifdef HAVE_SIN6_LEN
2808 		sin6.sin6_len = sizeof(sin6);
2809 #endif
2810 		sin6.sin6_port = sh->src_port;
2811 		sin6.sin6_scope_id = cookie->scope_id;
2812 		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2813 		    sizeof(sin6.sin6_addr.s6_addr));
2814 		to = (struct sockaddr *)&sin6;
2815 		break;
2816 #endif
2817 #ifdef INET
2818 	case SCTP_IPV4_ADDRESS:
2819 		memset(&sin, 0, sizeof(sin));
2820 		sin.sin_family = AF_INET;
2821 #ifdef HAVE_SIN_LEN
2822 		sin.sin_len = sizeof(sin);
2823 #endif
2824 		sin.sin_port = sh->src_port;
2825 		sin.sin_addr.s_addr = cookie->address[0];
2826 		to = (struct sockaddr *)&sin;
2827 		break;
2828 #endif
2829 #if defined(__Userspace__)
2830 	case SCTP_CONN_ADDRESS:
2831 		memset(&sconn, 0, sizeof(struct sockaddr_conn));
2832 		sconn.sconn_family = AF_CONN;
2833 #ifdef HAVE_SCONN_LEN
2834 		sconn.sconn_len = sizeof(struct sockaddr_conn);
2835 #endif
2836 		sconn.sconn_port = sh->src_port;
2837 		memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
2838 		to = (struct sockaddr *)&sconn;
2839 		break;
2840 #endif
2841 	default:
2842 		/* This should not happen */
2843 		return (NULL);
2844 	}
2845 	if (*stcb == NULL) {
2846 		/* Yep, lets check */
2847 		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2848 		if (*stcb == NULL) {
2849 			/*
2850 			 * We should have only got back the same inp. If we
2851 			 * got back a different ep we have a problem. The
2852 			 * original findep got back l_inp and now
2853 			 */
2854 			if (l_inp != *inp_p) {
2855 				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2856 			}
2857 		} else {
2858 			if (*locked_tcb == NULL) {
2859 				/* In this case we found the assoc only
2860 				 * after we locked the create lock. This means
2861 				 * we are in a colliding case and we must make
2862 				 * sure that we unlock the tcb if its one of the
2863 				 * cases where we throw away the incoming packets.
2864 				 */
2865 				*locked_tcb = *stcb;
2866 
2867 				/* We must also increment the inp ref count
2868 				 * since the ref_count flags was set when we
2869 				 * did not find the TCB, now we found it which
2870 				 * reduces the refcount.. we must raise it back
2871 				 * out to balance it all :-)
2872 				 */
2873 				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2874 				if ((*stcb)->sctp_ep != l_inp) {
2875 					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2876 						    (void *)(*stcb)->sctp_ep, (void *)l_inp);
2877 				}
2878 			}
2879 		}
2880 	}
2881 
2882 	cookie_len -= SCTP_SIGNATURE_SIZE;
2883 	if (*stcb == NULL) {
2884 		/* this is the "normal" case... get a new TCB */
2885 		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2886 		                                cookie, cookie_len, *inp_p,
2887 		                                netp, to, &notification,
2888 		                                auth_skipped, auth_offset, auth_len,
2889 #if defined(__FreeBSD__) && !defined(__Userspace__)
2890 		                                mflowtype, mflowid,
2891 #endif
2892 		                                vrf_id, port);
2893 	} else {
2894 		/* this is abnormal... cookie-echo on existing TCB */
2895 		had_a_existing_tcb = 1;
2896 		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
2897 		                                     src, dst, sh,
2898 						     cookie, cookie_len, *inp_p, *stcb, netp, to,
2899 						     &notification, auth_skipped, auth_offset, auth_len,
2900 #if defined(__FreeBSD__) && !defined(__Userspace__)
2901 		                                     mflowtype, mflowid,
2902 #endif
2903 		                                     vrf_id, port);
2904 	}
2905 
2906 	if (*stcb == NULL) {
2907 		/* still no TCB... must be bad cookie-echo */
2908 		return (NULL);
2909 	}
2910 #if defined(__FreeBSD__) && !defined(__Userspace__)
2911 	if (*netp != NULL) {
2912 		(*netp)->flowtype = mflowtype;
2913 		(*netp)->flowid = mflowid;
2914 	}
2915 #endif
2916 	/*
2917 	 * Ok, we built an association so confirm the address we sent the
2918 	 * INIT-ACK to.
2919 	 */
2920 	netl = sctp_findnet(*stcb, to);
2921 	/*
2922 	 * This code should in theory NOT run but
2923 	 */
2924 	if (netl == NULL) {
2925 		/* TSNH! Huh, why do I need to add this address here? */
2926 		if (sctp_add_remote_addr(*stcb, to, NULL, port,
2927 		                         SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2928 			return (NULL);
2929 		}
2930 		netl = sctp_findnet(*stcb, to);
2931 	}
2932 	if (netl) {
2933 		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2934 			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2935 			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2936 			    netl);
2937 			send_int_conf = 1;
2938 		}
2939 	}
2940 	sctp_start_net_timers(*stcb);
2941 	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2942 		if (!had_a_existing_tcb ||
2943 		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2944 			/*
2945 			 * If we have a NEW cookie or the connect never
2946 			 * reached the connected state during collision we
2947 			 * must do the TCP accept thing.
2948 			 */
2949 			struct socket *so, *oso;
2950 			struct sctp_inpcb *inp;
2951 
2952 			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2953 				/*
2954 				 * For a restart we will keep the same
2955 				 * socket, no need to do anything. I THINK!!
2956 				 */
2957 				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2958 				if (send_int_conf) {
2959 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2960 					                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2961 				}
2962 				return (m);
2963 			}
2964 			oso = (*inp_p)->sctp_socket;
2965 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2966 			SCTP_TCB_UNLOCK((*stcb));
2967 #if defined(__FreeBSD__) && !defined(__Userspace__)
2968 			CURVNET_SET(oso->so_vnet);
2969 #endif
2970 #if defined(__APPLE__) && !defined(__Userspace__)
2971 			SCTP_SOCKET_LOCK(oso, 1);
2972 #endif
2973 			so = sonewconn(oso, 0
2974 #if defined(__APPLE__) && !defined(__Userspace__)
2975 			    ,NULL
2976 #endif
2977 			    );
2978 #if defined(__APPLE__) && !defined(__Userspace__)
2979 			SCTP_SOCKET_UNLOCK(oso, 1);
2980 #endif
2981 #if defined(__FreeBSD__) && !defined(__Userspace__)
2982 			CURVNET_RESTORE();
2983 #endif
2984 			SCTP_TCB_LOCK((*stcb));
2985 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2986 
2987 			if (so == NULL) {
2988 				struct mbuf *op_err;
2989 #if defined(__APPLE__) && !defined(__Userspace__)
2990 				struct socket *pcb_so;
2991 #endif
2992 				/* Too many sockets */
2993 				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2994 				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2995 				sctp_abort_association(*inp_p, NULL, m, iphlen,
2996 						       src, dst, sh, op_err,
2997 #if defined(__FreeBSD__) && !defined(__Userspace__)
2998 				                       mflowtype, mflowid,
2999 #endif
3000 				                       vrf_id, port);
3001 #if defined(__APPLE__) && !defined(__Userspace__)
3002 				pcb_so = SCTP_INP_SO(*inp_p);
3003 				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3004 				SCTP_TCB_UNLOCK((*stcb));
3005 				SCTP_SOCKET_LOCK(pcb_so, 1);
3006 				SCTP_TCB_LOCK((*stcb));
3007 				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3008 #endif
3009 				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
3010 				                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3011 #if defined(__APPLE__) && !defined(__Userspace__)
3012 				SCTP_SOCKET_UNLOCK(pcb_so, 1);
3013 #endif
3014 				return (NULL);
3015 			}
3016 			inp = (struct sctp_inpcb *)so->so_pcb;
3017 			SCTP_INP_INCR_REF(inp);
3018 			/*
3019 			 * We add the unbound flag here so that
3020 			 * if we get an soabort() before we get the
3021 			 * move_pcb done, we will properly cleanup.
3022 			 */
3023 			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
3024 			    SCTP_PCB_FLAGS_CONNECTED |
3025 			    SCTP_PCB_FLAGS_IN_TCPPOOL |
3026 			    SCTP_PCB_FLAGS_UNBOUND |
3027 			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
3028 			    SCTP_PCB_FLAGS_DONT_WAKE);
3029 			inp->sctp_features = (*inp_p)->sctp_features;
3030 			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
3031 			inp->sctp_socket = so;
3032 			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
3033 			inp->max_cwnd = (*inp_p)->max_cwnd;
3034 			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
3035 			inp->ecn_supported = (*inp_p)->ecn_supported;
3036 			inp->prsctp_supported = (*inp_p)->prsctp_supported;
3037 			inp->auth_supported = (*inp_p)->auth_supported;
3038 			inp->asconf_supported = (*inp_p)->asconf_supported;
3039 			inp->reconfig_supported = (*inp_p)->reconfig_supported;
3040 			inp->nrsack_supported = (*inp_p)->nrsack_supported;
3041 			inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
3042 			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
3043 			inp->sctp_context = (*inp_p)->sctp_context;
3044 			inp->local_strreset_support = (*inp_p)->local_strreset_support;
3045 			inp->fibnum = (*inp_p)->fibnum;
3046 			inp->inp_starting_point_for_iterator = NULL;
3047 #if defined(__Userspace__)
3048 			inp->ulp_info = (*inp_p)->ulp_info;
3049 			inp->recv_callback = (*inp_p)->recv_callback;
3050 			inp->send_callback = (*inp_p)->send_callback;
3051 			inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
3052 #endif
3053 			/*
3054 			 * copy in the authentication parameters from the
3055 			 * original endpoint
3056 			 */
3057 			if (inp->sctp_ep.local_hmacs)
3058 				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3059 			inp->sctp_ep.local_hmacs =
3060 			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
3061 			if (inp->sctp_ep.local_auth_chunks)
3062 				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
3063 			inp->sctp_ep.local_auth_chunks =
3064 			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
3065 
3066 			/*
3067 			 * Now we must move it from one hash table to
3068 			 * another and get the tcb in the right place.
3069 			 */
3070 
3071 			/* This is where the one-2-one socket is put into
3072 			 * the accept state waiting for the accept!
3073 			 */
3074 			if (*stcb) {
3075 				SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
3076 			}
3077 			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
3078 
3079 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3080 			SCTP_TCB_UNLOCK((*stcb));
3081 
3082 #if defined(__FreeBSD__) && !defined(__Userspace__)
3083 			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
3084 			    0);
3085 #else
3086 			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
3087 #endif
3088 			SCTP_TCB_LOCK((*stcb));
3089 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3090 
3091 
3092 			/* now we must check to see if we were aborted while
3093 			 * the move was going on and the lock/unlock happened.
3094 			 */
3095 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3096 				/* yep it was, we leave the
3097 				 * assoc attached to the socket since
3098 				 * the sctp_inpcb_free() call will send
3099 				 * an abort for us.
3100 				 */
3101 				SCTP_INP_DECR_REF(inp);
3102 				return (NULL);
3103 			}
3104 			SCTP_INP_DECR_REF(inp);
3105 			/* Switch over to the new guy */
3106 			*inp_p = inp;
3107 			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3108 			if (send_int_conf) {
3109 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3110 				                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3111 			}
3112 
3113 			/* Pull it from the incomplete queue and wake the guy */
3114 #if defined(__APPLE__) && !defined(__Userspace__)
3115 			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3116 			SCTP_TCB_UNLOCK((*stcb));
3117 			SCTP_SOCKET_LOCK(so, 1);
3118 #endif
3119 			soisconnected(so);
3120 #if defined(__APPLE__) && !defined(__Userspace__)
3121 			SCTP_TCB_LOCK((*stcb));
3122 			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3123 			SCTP_SOCKET_UNLOCK(so, 1);
3124 #endif
3125 			return (m);
3126 		}
3127 	}
3128 	if (notification) {
3129 		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3130 	}
3131 	if (send_int_conf) {
3132 		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3133 		                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3134 	}
3135 	return (m);
3136 }
3137 
3138 static void
3139 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
3140     struct sctp_tcb *stcb, struct sctp_nets *net)
3141 {
3142 	/* cp must not be used, others call this without a c-ack :-) */
3143 	struct sctp_association *asoc;
3144 	struct sctp_tmit_chunk *chk;
3145 
3146 	SCTPDBG(SCTP_DEBUG_INPUT2,
3147 		"sctp_handle_cookie_ack: handling COOKIE-ACK\n");
3148 	if ((stcb == NULL) || (net == NULL)) {
3149 		return;
3150 	}
3151 
3152 	asoc = &stcb->asoc;
3153 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3154 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3155 			       asoc->overall_error_count,
3156 			       0,
3157 			       SCTP_FROM_SCTP_INPUT,
3158 			       __LINE__);
3159 	}
3160 	asoc->overall_error_count = 0;
3161 	sctp_stop_all_cookie_timers(stcb);
3162 	/* process according to association state */
3163 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
3164 		/* state change only needed when I am in right state */
3165 		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
3166 		SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
3167 		sctp_start_net_timers(stcb);
3168 		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
3169 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3170 			                 stcb->sctp_ep, stcb, NULL);
3171 
3172 		}
3173 		/* update RTO */
3174 		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
3175 		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
3176 		if (asoc->overall_error_count == 0) {
3177 			sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
3178 			                   SCTP_RTT_FROM_NON_DATA);
3179 		}
3180 		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
3181 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3182 		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3183 		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3184 #if defined(__APPLE__) && !defined(__Userspace__)
3185 			struct socket *so;
3186 
3187 #endif
3188 			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3189 #if defined(__APPLE__) && !defined(__Userspace__)
3190 			so = SCTP_INP_SO(stcb->sctp_ep);
3191 			atomic_add_int(&stcb->asoc.refcnt, 1);
3192 			SCTP_TCB_UNLOCK(stcb);
3193 			SCTP_SOCKET_LOCK(so, 1);
3194 			SCTP_TCB_LOCK(stcb);
3195 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3196 #endif
3197 			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
3198 				soisconnected(stcb->sctp_socket);
3199 			}
3200 #if defined(__APPLE__) && !defined(__Userspace__)
3201 			SCTP_SOCKET_UNLOCK(so, 1);
3202 #endif
3203 		}
3204 		/*
3205 		 * since we did not send a HB make sure we don't double
3206 		 * things
3207 		 */
3208 		net->hb_responded = 1;
3209 
3210 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3211 			/* We don't need to do the asconf thing,
3212 			 * nor hb or autoclose if the socket is closed.
3213 			 */
3214 			goto closed_socket;
3215 		}
3216 
3217 		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
3218 		    stcb, net);
3219 
3220 
3221 		if (stcb->asoc.sctp_autoclose_ticks &&
3222 		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
3223 			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
3224 			    stcb->sctp_ep, stcb, NULL);
3225 		}
3226 		/*
3227 		 * send ASCONF if parameters are pending and ASCONFs are
3228 		 * allowed (eg. addresses changed when init/cookie echo were
3229 		 * in flight)
3230 		 */
3231 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
3232 		    (stcb->asoc.asconf_supported == 1) &&
3233 		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
3234 #ifdef SCTP_TIMER_BASED_ASCONF
3235 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
3236 					 stcb->sctp_ep, stcb,
3237 					 stcb->asoc.primary_destination);
3238 #else
3239 			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3240 					 SCTP_ADDR_NOT_LOCKED);
3241 #endif
3242 		}
3243 	}
3244 closed_socket:
3245 	/* Toss the cookie if I can */
3246 	sctp_toss_old_cookies(stcb, asoc);
3247 	/* Restart the timer if we have pending data */
3248 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3249 		if (chk->whoTo != NULL) {
3250 			break;
3251 		}
3252 	}
3253 	if (chk != NULL) {
3254 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3255 	}
3256 }
3257 
3258 static void
3259 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3260 		     struct sctp_tcb *stcb)
3261 {
3262 	struct sctp_nets *net;
3263 	struct sctp_tmit_chunk *lchk;
3264 	struct sctp_ecne_chunk bkup;
3265 	uint8_t override_bit;
3266 	uint32_t tsn, window_data_tsn;
3267 	int len;
3268 	unsigned int pkt_cnt;
3269 
3270 	len = ntohs(cp->ch.chunk_length);
3271 	if ((len != sizeof(struct sctp_ecne_chunk)) &&
3272 	    (len != sizeof(struct old_sctp_ecne_chunk))) {
3273 		return;
3274 	}
3275 	if (len == sizeof(struct old_sctp_ecne_chunk)) {
3276 		/* Its the old format */
3277 		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3278 		bkup.num_pkts_since_cwr = htonl(1);
3279 		cp = &bkup;
3280 	}
3281 	SCTP_STAT_INCR(sctps_recvecne);
3282 	tsn = ntohl(cp->tsn);
3283 	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3284 	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3285 	if (lchk == NULL) {
3286 		window_data_tsn = stcb->asoc.sending_seq - 1;
3287 	} else {
3288 		window_data_tsn = lchk->rec.data.tsn;
3289 	}
3290 
3291 	/* Find where it was sent to if possible. */
3292 	net = NULL;
3293 	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3294 		if (lchk->rec.data.tsn == tsn) {
3295 			net = lchk->whoTo;
3296 			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3297 			break;
3298 		}
3299 		if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
3300 			break;
3301 		}
3302 	}
3303 	if (net == NULL) {
3304 		/*
3305 		 * What to do. A previous send of a
3306 		 * CWR was possibly lost. See how old it is, we
3307 		 * may have it marked on the actual net.
3308 		 */
3309 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3310 			if (tsn == net->last_cwr_tsn) {
3311 				/* Found him, send it off */
3312 				break;
3313 			}
3314 		}
3315 		if (net == NULL) {
3316 			/*
3317 			 * If we reach here, we need to send a special
3318 			 * CWR that says hey, we did this a long time
3319 			 * ago and you lost the response.
3320 			 */
3321 			net = TAILQ_FIRST(&stcb->asoc.nets);
3322 			if (net == NULL) {
3323 				/* TSNH */
3324 				return;
3325 			}
3326 			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3327 		} else {
3328 			override_bit = 0;
3329 		}
3330 	} else {
3331 		override_bit = 0;
3332 	}
3333 	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3334 	    ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3335 		/* JRS - Use the congestion control given in the pluggable CC module */
3336 		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3337 		/*
3338 		 * We reduce once every RTT. So we will only lower cwnd at
3339 		 * the next sending seq i.e. the window_data_tsn
3340 		 */
3341 		net->cwr_window_tsn = window_data_tsn;
3342 		net->ecn_ce_pkt_cnt += pkt_cnt;
3343 		net->lost_cnt = pkt_cnt;
3344 		net->last_cwr_tsn = tsn;
3345 	} else {
3346 		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3347 		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3348 		    ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3349 			/*
3350 			 * Another loss in the same window update how
3351 			 * many marks/packets lost we have had.
3352 			 */
3353 			int cnt = 1;
3354 			if (pkt_cnt > net->lost_cnt) {
3355 				/* Should be the case */
3356 				cnt = (pkt_cnt - net->lost_cnt);
3357 				net->ecn_ce_pkt_cnt += cnt;
3358 			}
3359 			net->lost_cnt = pkt_cnt;
3360 			net->last_cwr_tsn = tsn;
3361 			/*
3362 			 * Most CC functions will ignore this call, since we are in-window
3363 			 * yet of the initial CE the peer saw.
3364 			 */
3365 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3366 		}
3367 	}
3368 	/*
3369 	 * We always send a CWR this way if our previous one was lost our
3370 	 * peer will get an update, or if it is not time again to reduce we
3371 	 * still get the cwr to the peer. Note we set the override when we
3372 	 * could not find the TSN on the chunk or the destination network.
3373 	 */
3374 	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3375 }
3376 
3377 static void
3378 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3379 {
3380 	/*
3381 	 * Here we get a CWR from the peer. We must look in the outqueue and
3382 	 * make sure that we have a covered ECNE in the control chunk part.
3383 	 * If so remove it.
3384 	 */
3385 	struct sctp_tmit_chunk *chk, *nchk;
3386 	struct sctp_ecne_chunk *ecne;
3387 	int override;
3388 	uint32_t cwr_tsn;
3389 
3390 	cwr_tsn = ntohl(cp->tsn);
3391 	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3392 	TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
3393 		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3394 			continue;
3395 		}
3396 		if ((override == 0) && (chk->whoTo != net)) {
3397 			/* Must be from the right src unless override is set */
3398 			continue;
3399 		}
3400 		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3401 		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3402 			/* this covers this ECNE, we can remove it */
3403 			stcb->asoc.ecn_echo_cnt_onq--;
3404 			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3405 			    sctp_next);
3406 			stcb->asoc.ctrl_queue_cnt--;
3407 			sctp_m_freem(chk->data);
3408 			chk->data = NULL;
3409 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3410 			if (override == 0) {
3411 				break;
3412 			}
3413 		}
3414 	}
3415 }
3416 
3417 static void
3418 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3419     struct sctp_tcb *stcb, struct sctp_nets *net)
3420 {
3421 #if defined(__APPLE__) && !defined(__Userspace__)
3422 	struct socket *so;
3423 #endif
3424 
3425 	SCTPDBG(SCTP_DEBUG_INPUT2,
3426 		"sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3427 	if (stcb == NULL)
3428 		return;
3429 
3430 	/* process according to association state */
3431 	if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3432 		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3433 		SCTPDBG(SCTP_DEBUG_INPUT2,
3434 			"sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3435 		SCTP_TCB_UNLOCK(stcb);
3436 		return;
3437 	}
3438 	/* notify upper layer protocol */
3439 	if (stcb->sctp_socket) {
3440 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3441 	}
3442 #ifdef INVARIANTS
3443 	if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
3444 	    !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
3445 	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
3446 		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3447 	}
3448 #endif
3449 	/* stop the timer */
3450 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
3451 	                SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3452 	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3453 	/* free the TCB */
3454 	SCTPDBG(SCTP_DEBUG_INPUT2,
3455 		"sctp_handle_shutdown_complete: calls free-asoc\n");
3456 #if defined(__APPLE__) && !defined(__Userspace__)
3457 	so = SCTP_INP_SO(stcb->sctp_ep);
3458 	atomic_add_int(&stcb->asoc.refcnt, 1);
3459 	SCTP_TCB_UNLOCK(stcb);
3460 	SCTP_SOCKET_LOCK(so, 1);
3461 	SCTP_TCB_LOCK(stcb);
3462 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3463 #endif
3464 	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
3465 	                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3466 #if defined(__APPLE__) && !defined(__Userspace__)
3467 	SCTP_SOCKET_UNLOCK(so, 1);
3468 #endif
3469 	return;
3470 }
3471 
3472 static int
3473 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3474     struct sctp_nets *net, uint8_t flg)
3475 {
3476 	switch (desc->chunk_type) {
3477 	case SCTP_DATA:
3478 	case SCTP_IDATA:
3479 		/* find the tsn to resend (possibly) */
3480 	{
3481 		uint32_t tsn;
3482 		struct sctp_tmit_chunk *tp1;
3483 
3484 		tsn = ntohl(desc->tsn_ifany);
3485 		TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3486 			if (tp1->rec.data.tsn == tsn) {
3487 				/* found it */
3488 				break;
3489 			}
3490 			if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
3491 				/* not found */
3492 				tp1 = NULL;
3493 				break;
3494 			}
3495 		}
3496 		if (tp1 == NULL) {
3497 			/*
3498 			 * Do it the other way , aka without paying
3499 			 * attention to queue seq order.
3500 			 */
3501 			SCTP_STAT_INCR(sctps_pdrpdnfnd);
3502 			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3503 				if (tp1->rec.data.tsn == tsn) {
3504 					/* found it */
3505 					break;
3506 				}
3507 			}
3508 		}
3509 		if (tp1 == NULL) {
3510 			SCTP_STAT_INCR(sctps_pdrptsnnf);
3511 		}
3512 		if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3513 			if (((flg & SCTP_BADCRC) == 0) &&
3514 			    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3515 				return (0);
3516 			}
3517 			if ((stcb->asoc.peers_rwnd == 0) &&
3518 			    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3519 				SCTP_STAT_INCR(sctps_pdrpdiwnp);
3520 				return (0);
3521 			}
3522 			if (stcb->asoc.peers_rwnd == 0 &&
3523 			    (flg & SCTP_FROM_MIDDLE_BOX)) {
3524 				SCTP_STAT_INCR(sctps_pdrpdizrw);
3525 				return (0);
3526 			}
3527 			if ((uint32_t)SCTP_BUF_LEN(tp1->data) <
3528 			    SCTP_DATA_CHUNK_OVERHEAD(stcb) + SCTP_NUM_DB_TO_VERIFY) {
3529 				/* Payload not matching. */
3530 				SCTP_STAT_INCR(sctps_pdrpbadd);
3531 				return (-1);
3532 			}
3533 			if (memcmp(mtod(tp1->data, caddr_t) + SCTP_DATA_CHUNK_OVERHEAD(stcb),
3534 			           desc->data_bytes, SCTP_NUM_DB_TO_VERIFY) != 0) {
3535 				/* Payload not matching. */
3536 				SCTP_STAT_INCR(sctps_pdrpbadd);
3537 				return (-1);
3538 			}
3539 			if (tp1->do_rtt) {
3540 				/*
3541 				 * this guy had a RTO calculation
3542 				 * pending on it, cancel it
3543 				 */
3544 				if (tp1->whoTo->rto_needed == 0) {
3545 					tp1->whoTo->rto_needed = 1;
3546 				}
3547 				tp1->do_rtt = 0;
3548 			}
3549 			SCTP_STAT_INCR(sctps_pdrpmark);
3550 			if (tp1->sent != SCTP_DATAGRAM_RESEND)
3551 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3552 			/*
3553 			 * mark it as if we were doing a FR, since
3554 			 * we will be getting gap ack reports behind
3555 			 * the info from the router.
3556 			 */
3557 			tp1->rec.data.doing_fast_retransmit = 1;
3558 			/*
3559 			 * mark the tsn with what sequences can
3560 			 * cause a new FR.
3561 			 */
3562 			if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3563 				tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3564 			} else {
3565 				tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
3566 			}
3567 
3568 			/* restart the timer */
3569 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3570 					stcb, tp1->whoTo,
3571 			                SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3572 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3573 					 stcb, tp1->whoTo);
3574 
3575 			/* fix counts and things */
3576 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3577 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3578 					       tp1->whoTo->flight_size,
3579 					       tp1->book_size,
3580 					       (uint32_t)(uintptr_t)stcb,
3581 					       tp1->rec.data.tsn);
3582 			}
3583 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3584 				sctp_flight_size_decrease(tp1);
3585 				sctp_total_flight_decrease(stcb, tp1);
3586 			}
3587 			tp1->sent = SCTP_DATAGRAM_RESEND;
3588 		} {
3589 			/* audit code */
3590 			unsigned int audit;
3591 
3592 			audit = 0;
3593 			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3594 				if (tp1->sent == SCTP_DATAGRAM_RESEND)
3595 					audit++;
3596 			}
3597 			TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3598 				      sctp_next) {
3599 				if (tp1->sent == SCTP_DATAGRAM_RESEND)
3600 					audit++;
3601 			}
3602 			if (audit != stcb->asoc.sent_queue_retran_cnt) {
3603 				SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3604 					    audit, stcb->asoc.sent_queue_retran_cnt);
3605 #ifndef SCTP_AUDITING_ENABLED
3606 				stcb->asoc.sent_queue_retran_cnt = audit;
3607 #endif
3608 			}
3609 		}
3610 	}
3611 	break;
3612 	case SCTP_ASCONF:
3613 	{
3614 		struct sctp_tmit_chunk *asconf;
3615 
3616 		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3617 			      sctp_next) {
3618 			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3619 				break;
3620 			}
3621 		}
3622 		if (asconf) {
3623 			if (asconf->sent != SCTP_DATAGRAM_RESEND)
3624 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3625 			asconf->sent = SCTP_DATAGRAM_RESEND;
3626 			asconf->snd_count--;
3627 		}
3628 	}
3629 	break;
3630 	case SCTP_INITIATION:
3631 		/* resend the INIT */
3632 		stcb->asoc.dropped_special_cnt++;
3633 		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3634 			/*
3635 			 * If we can get it in, in a few attempts we do
3636 			 * this, otherwise we let the timer fire.
3637 			 */
3638 			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3639 					stcb, net,
3640 			                SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
3641 			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3642 		}
3643 		break;
3644 	case SCTP_SELECTIVE_ACK:
3645 	case SCTP_NR_SELECTIVE_ACK:
3646 		/* resend the sack */
3647 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3648 		break;
3649 	case SCTP_HEARTBEAT_REQUEST:
3650 		/* resend a demand HB */
3651 		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3652 			/* Only retransmit if we KNOW we wont destroy the tcb */
3653 			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3654 		}
3655 		break;
3656 	case SCTP_SHUTDOWN:
3657 		sctp_send_shutdown(stcb, net);
3658 		break;
3659 	case SCTP_SHUTDOWN_ACK:
3660 		sctp_send_shutdown_ack(stcb, net);
3661 		break;
3662 	case SCTP_COOKIE_ECHO:
3663 	{
3664 		struct sctp_tmit_chunk *cookie;
3665 
3666 		cookie = NULL;
3667 		TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3668 			      sctp_next) {
3669 			if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3670 				break;
3671 			}
3672 		}
3673 		if (cookie) {
3674 			if (cookie->sent != SCTP_DATAGRAM_RESEND)
3675 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3676 			cookie->sent = SCTP_DATAGRAM_RESEND;
3677 			sctp_stop_all_cookie_timers(stcb);
3678 		}
3679 	}
3680 	break;
3681 	case SCTP_COOKIE_ACK:
3682 		sctp_send_cookie_ack(stcb);
3683 		break;
3684 	case SCTP_ASCONF_ACK:
3685 		/* resend last asconf ack */
3686 		sctp_send_asconf_ack(stcb);
3687 		break;
3688 	case SCTP_IFORWARD_CUM_TSN:
3689 	case SCTP_FORWARD_CUM_TSN:
3690 		send_forward_tsn(stcb, &stcb->asoc);
3691 		break;
3692 		/* can't do anything with these */
3693 	case SCTP_PACKET_DROPPED:
3694 	case SCTP_INITIATION_ACK:	/* this should not happen */
3695 	case SCTP_HEARTBEAT_ACK:
3696 	case SCTP_ABORT_ASSOCIATION:
3697 	case SCTP_OPERATION_ERROR:
3698 	case SCTP_SHUTDOWN_COMPLETE:
3699 	case SCTP_ECN_ECHO:
3700 	case SCTP_ECN_CWR:
3701 	default:
3702 		break;
3703 	}
3704 	return (0);
3705 }
3706 
3707 void
3708 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3709 {
3710 	uint32_t i;
3711 	uint16_t temp;
3712 
3713 	/*
3714 	 * We set things to 0xffffffff since this is the last delivered sequence
3715 	 * and we will be sending in 0 after the reset.
3716 	 */
3717 
3718 	if (number_entries) {
3719 		for (i = 0; i < number_entries; i++) {
3720 			temp = ntohs(list[i]);
3721 			if (temp >= stcb->asoc.streamincnt) {
3722 				continue;
3723 			}
3724 			stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
3725 		}
3726 	} else {
3727 		list = NULL;
3728 		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3729 			stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3730 		}
3731 	}
3732 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3733 }
3734 
3735 static void
3736 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3737 {
3738 	uint32_t i;
3739 	uint16_t temp;
3740 
3741 	if (number_entries > 0) {
3742 		for (i = 0; i < number_entries; i++) {
3743 			temp = ntohs(list[i]);
3744 			if (temp >= stcb->asoc.streamoutcnt) {
3745 				/* no such stream */
3746 				continue;
3747 			}
3748 			stcb->asoc.strmout[temp].next_mid_ordered = 0;
3749 			stcb->asoc.strmout[temp].next_mid_unordered = 0;
3750 		}
3751 	} else {
3752 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3753 			stcb->asoc.strmout[i].next_mid_ordered = 0;
3754 			stcb->asoc.strmout[i].next_mid_unordered = 0;
3755 		}
3756 	}
3757 	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3758 }
3759 
3760 static void
3761 sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3762 {
3763 	uint32_t i;
3764 	uint16_t temp;
3765 
3766 	if (number_entries > 0) {
3767 		for (i = 0; i < number_entries; i++) {
3768 			temp = ntohs(list[i]);
3769 			if (temp >= stcb->asoc.streamoutcnt) {
3770 				/* no such stream */
3771 				continue;
3772 			}
3773 			stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
3774 		}
3775 	} else {
3776 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3777 			stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
3778 		}
3779 	}
3780 }
3781 
3782 
3783 struct sctp_stream_reset_request *
3784 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3785 {
3786 	struct sctp_association *asoc;
3787 	struct sctp_chunkhdr *ch;
3788 	struct sctp_stream_reset_request *r;
3789 	struct sctp_tmit_chunk *chk;
3790 	int len, clen;
3791 
3792 	asoc = &stcb->asoc;
3793 	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3794 		asoc->stream_reset_outstanding = 0;
3795 		return (NULL);
3796 	}
3797 	if (stcb->asoc.str_reset == NULL) {
3798 		asoc->stream_reset_outstanding = 0;
3799 		return (NULL);
3800 	}
3801 	chk = stcb->asoc.str_reset;
3802 	if (chk->data == NULL) {
3803 		return (NULL);
3804 	}
3805 	if (bchk) {
3806 		/* he wants a copy of the chk pointer */
3807 		*bchk = chk;
3808 	}
3809 	clen = chk->send_size;
3810 	ch = mtod(chk->data, struct sctp_chunkhdr *);
3811 	r = (struct sctp_stream_reset_request *)(ch + 1);
3812 	if (ntohl(r->request_seq) == seq) {
3813 		/* found it */
3814 		return (r);
3815 	}
3816 	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3817 	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3818 		/* move to the next one, there can only be a max of two */
3819 		r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
3820 		if (ntohl(r->request_seq) == seq) {
3821 			return (r);
3822 		}
3823 	}
3824 	/* that seq is not here */
3825 	return (NULL);
3826 }
3827 
3828 static void
3829 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3830 {
3831 	struct sctp_association *asoc;
3832 	struct sctp_tmit_chunk *chk;
3833 
3834 	asoc = &stcb->asoc;
3835 	chk = asoc->str_reset;
3836 	if (chk == NULL) {
3837 		return;
3838 	}
3839 	asoc->str_reset = NULL;
3840 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
3841 	                NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
3842 	TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3843 	asoc->ctrl_queue_cnt--;
3844 	if (chk->data) {
3845 		sctp_m_freem(chk->data);
3846 		chk->data = NULL;
3847 	}
3848 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3849 }
3850 
3851 
3852 static int
3853 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3854 				  uint32_t seq, uint32_t action,
3855 				  struct sctp_stream_reset_response *respin)
3856 {
3857 	uint16_t type;
3858 	int lparam_len;
3859 	struct sctp_association *asoc = &stcb->asoc;
3860 	struct sctp_tmit_chunk *chk;
3861 	struct sctp_stream_reset_request *req_param;
3862 	struct sctp_stream_reset_out_request *req_out_param;
3863 	struct sctp_stream_reset_in_request *req_in_param;
3864 	uint32_t number_entries;
3865 
3866 	if (asoc->stream_reset_outstanding == 0) {
3867 		/* duplicate */
3868 		return (0);
3869 	}
3870 	if (seq == stcb->asoc.str_reset_seq_out) {
3871 		req_param = sctp_find_stream_reset(stcb, seq, &chk);
3872 		if (req_param != NULL) {
3873 			stcb->asoc.str_reset_seq_out++;
3874 			type = ntohs(req_param->ph.param_type);
3875 			lparam_len = ntohs(req_param->ph.param_length);
3876 			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3877 				int no_clear = 0;
3878 
3879 				req_out_param = (struct sctp_stream_reset_out_request *)req_param;
3880 				number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3881 				asoc->stream_reset_out_is_outstanding = 0;
3882 				if (asoc->stream_reset_outstanding)
3883 					asoc->stream_reset_outstanding--;
3884 				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3885 					/* do it */
3886 					sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
3887 				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3888 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3889 				} else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
3890 					/* Set it up so we don't stop retransmitting */
3891 					asoc->stream_reset_outstanding++;
3892 					stcb->asoc.str_reset_seq_out--;
3893 					asoc->stream_reset_out_is_outstanding = 1;
3894 					no_clear = 1;
3895 				} else {
3896 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3897 				}
3898 				if (no_clear == 0) {
3899 					sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
3900 				}
3901 			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3902 				req_in_param = (struct sctp_stream_reset_in_request *)req_param;
3903 				number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3904 				if (asoc->stream_reset_outstanding)
3905 					asoc->stream_reset_outstanding--;
3906 				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3907 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3908 							number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3909 				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3910 					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3911 							number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3912 				}
3913 			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3914 				/* Ok we now may have more streams */
3915 				int num_stream;
3916 
3917 				num_stream = stcb->asoc.strm_pending_add_size;
3918 				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3919 					/* TSNH */
3920 					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3921 				}
3922 				stcb->asoc.strm_pending_add_size = 0;
3923 				if (asoc->stream_reset_outstanding)
3924 					asoc->stream_reset_outstanding--;
3925 				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3926 					/* Put the new streams into effect */
3927 					int i;
3928 					for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) {
3929 						asoc->strmout[i].state = SCTP_STREAM_OPEN;
3930 					}
3931 					asoc->streamoutcnt += num_stream;
3932 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3933 				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3934 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3935 								     SCTP_STREAM_CHANGE_DENIED);
3936 				} else {
3937 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3938 								     SCTP_STREAM_CHANGE_FAILED);
3939 				}
3940 			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3941 				if (asoc->stream_reset_outstanding)
3942 					asoc->stream_reset_outstanding--;
3943 				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3944 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3945 								     SCTP_STREAM_CHANGE_DENIED);
3946 				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3947 					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3948 								     SCTP_STREAM_CHANGE_FAILED);
3949 				}
3950 			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3951 				/**
3952 				 * a) Adopt the new in tsn.
3953 				 * b) reset the map
3954 				 * c) Adopt the new out-tsn
3955 				 */
3956 				struct sctp_stream_reset_response_tsn *resp;
3957 				struct sctp_forward_tsn_chunk fwdtsn;
3958 				int abort_flag = 0;
3959 				if (respin == NULL) {
3960 					/* huh ? */
3961 					return (0);
3962 				}
3963 				if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
3964 					return (0);
3965 				}
3966 				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3967 					resp = (struct sctp_stream_reset_response_tsn *)respin;
3968 					asoc->stream_reset_outstanding--;
3969 					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3970 					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3971 					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3972 					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3973 					if (abort_flag) {
3974 						return (1);
3975 					}
3976 					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3977 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3978 						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3979 					}
3980 
3981 					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3982 					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3983 					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3984 
3985 					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3986 					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3987 
3988 					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3989 					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3990 
3991 					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3992 					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3993 					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3994 				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3995 					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3996 								     SCTP_ASSOC_RESET_DENIED);
3997 				} else {
3998 					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3999 								     SCTP_ASSOC_RESET_FAILED);
4000 				}
4001 			}
4002 			/* get rid of the request and get the request flags */
4003 			if (asoc->stream_reset_outstanding == 0) {
4004 				sctp_clean_up_stream_reset(stcb);
4005 			}
4006 		}
4007 	}
4008 	if (asoc->stream_reset_outstanding == 0) {
4009 		sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4010 	}
4011 	return (0);
4012 }
4013 
4014 static void
4015 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
4016     struct sctp_tmit_chunk *chk,
4017     struct sctp_stream_reset_in_request *req, int trunc)
4018 {
4019 	uint32_t seq;
4020 	int len, i;
4021 	int number_entries;
4022 	uint16_t temp;
4023 
4024 	/*
4025 	 * peer wants me to send a str-reset to him for my outgoing seq's if
4026 	 * seq_in is right.
4027 	 */
4028 	struct sctp_association *asoc = &stcb->asoc;
4029 
4030 	seq = ntohl(req->request_seq);
4031 	if (asoc->str_reset_seq_in == seq) {
4032 		asoc->last_reset_action[1] = asoc->last_reset_action[0];
4033 		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4034 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4035 		} else if (trunc) {
4036 			/* Can't do it, since they exceeded our buffer size  */
4037 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4038 		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
4039 			len = ntohs(req->ph.param_length);
4040 			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
4041 			if (number_entries) {
4042 				for (i = 0; i < number_entries; i++) {
4043 					temp = ntohs(req->list_of_streams[i]);
4044 					if (temp >= stcb->asoc.streamoutcnt) {
4045 						asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4046 						goto bad_boy;
4047 					}
4048 					req->list_of_streams[i] = temp;
4049 				}
4050 				for (i = 0; i < number_entries; i++) {
4051 					if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
4052 						stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
4053 					}
4054 				}
4055 			} else {
4056 				/* Its all */
4057 				for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
4058 					if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
4059 						stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
4060 				}
4061 			}
4062 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4063 		} else {
4064 			/* Can't do it, since we have sent one out */
4065 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4066 		}
4067 	bad_boy:
4068 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4069 		asoc->str_reset_seq_in++;
4070 	} else if (asoc->str_reset_seq_in - 1 == seq) {
4071 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4072 	} else if (asoc->str_reset_seq_in - 2 == seq) {
4073 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4074 	} else {
4075 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4076 	}
4077 	sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4078 }
4079 
4080 static int
4081 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
4082     struct sctp_tmit_chunk *chk,
4083     struct sctp_stream_reset_tsn_request *req)
4084 {
4085 	/* reset all in and out and update the tsn */
4086 	/*
4087 	 * A) reset my str-seq's on in and out. B) Select a receive next,
4088 	 * and set cum-ack to it. Also process this selected number as a
4089 	 * fwd-tsn as well. C) set in the response my next sending seq.
4090 	 */
4091 	struct sctp_forward_tsn_chunk fwdtsn;
4092 	struct sctp_association *asoc = &stcb->asoc;
4093 	int abort_flag = 0;
4094 	uint32_t seq;
4095 
4096 	seq = ntohl(req->request_seq);
4097 	if (asoc->str_reset_seq_in == seq) {
4098 		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
4099 		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4100 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4101 		} else {
4102 			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
4103 			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
4104 			fwdtsn.ch.chunk_flags = 0;
4105 			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
4106 			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
4107 			if (abort_flag) {
4108 				return (1);
4109 			}
4110 			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
4111 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
4112 				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4113 			}
4114 			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4115 			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
4116 			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
4117 			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
4118 			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
4119 			atomic_add_int(&asoc->sending_seq, 1);
4120 			/* save off historical data for retrans */
4121 			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
4122 			asoc->last_sending_seq[0] = asoc->sending_seq;
4123 			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
4124 			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
4125 			sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
4126 			sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
4127 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4128 			sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
4129 		}
4130 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4131 		                                 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4132 		asoc->str_reset_seq_in++;
4133 	} else if (asoc->str_reset_seq_in - 1 == seq) {
4134 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4135 		                                 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4136 	} else if (asoc->str_reset_seq_in - 2 == seq) {
4137 		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
4138 		                                 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
4139 	} else {
4140 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4141 	}
4142 	return (0);
4143 }
4144 
4145 static void
4146 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
4147     struct sctp_tmit_chunk *chk,
4148     struct sctp_stream_reset_out_request *req, int trunc)
4149 {
4150 	uint32_t seq, tsn;
4151 	int number_entries, len;
4152 	struct sctp_association *asoc = &stcb->asoc;
4153 
4154 	seq = ntohl(req->request_seq);
4155 
4156 	/* now if its not a duplicate we process it */
4157 	if (asoc->str_reset_seq_in == seq) {
4158 		len = ntohs(req->ph.param_length);
4159 		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
4160 		/*
4161 		 * the sender is resetting, handle the list issue.. we must
4162 		 * a) verify if we can do the reset, if so no problem b) If
4163 		 * we can't do the reset we must copy the request. c) queue
4164 		 * it, and setup the data in processor to trigger it off
4165 		 * when needed and dequeue all the queued data.
4166 		 */
4167 		tsn = ntohl(req->send_reset_at_tsn);
4168 
4169 		/* move the reset action back one */
4170 		asoc->last_reset_action[1] = asoc->last_reset_action[0];
4171 		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4172 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4173 		} else if (trunc) {
4174 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4175 		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
4176 			/* we can do it now */
4177 			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
4178 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4179 		} else {
4180 			/*
4181 			 * we must queue it up and thus wait for the TSN's
4182 			 * to arrive that are at or before tsn
4183 			 */
4184 			struct sctp_stream_reset_list *liste;
4185 			int siz;
4186 
4187 			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
4188 			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
4189 				    siz, SCTP_M_STRESET);
4190 			if (liste == NULL) {
4191 				/* gak out of memory */
4192 				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4193 				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4194 				return;
4195 			}
4196 			liste->seq = seq;
4197 			liste->tsn = tsn;
4198 			liste->number_entries = number_entries;
4199 			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
4200 			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
4201 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
4202 		}
4203 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4204 		asoc->str_reset_seq_in++;
4205 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4206 		/*
4207 		 * one seq back, just echo back last action since my
4208 		 * response was lost.
4209 		 */
4210 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4211 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4212 		/*
4213 		 * two seq back, just echo back last action since my
4214 		 * response was lost.
4215 		 */
4216 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4217 	} else {
4218 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4219 	}
4220 }
4221 
4222 static void
4223 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4224 			       struct sctp_stream_reset_add_strm  *str_add)
4225 {
4226 	/*
4227 	 * Peer is requesting to add more streams.
4228 	 * If its within our max-streams we will
4229 	 * allow it.
4230 	 */
4231 	uint32_t num_stream, i;
4232 	uint32_t seq;
4233 	struct sctp_association *asoc = &stcb->asoc;
4234 	struct sctp_queued_to_read *ctl, *nctl;
4235 
4236 	/* Get the number. */
4237 	seq = ntohl(str_add->request_seq);
4238 	num_stream = ntohs(str_add->number_of_streams);
4239 	/* Now what would be the new total? */
4240 	if (asoc->str_reset_seq_in == seq) {
4241 		num_stream += stcb->asoc.streamincnt;
4242 		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4243 		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4244 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4245 		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
4246 		           (num_stream > 0xffff)) {
4247 			/* We must reject it they ask for to many */
4248   denied:
4249 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4250 		} else {
4251 			/* Ok, we can do that :-) */
4252 			struct sctp_stream_in *oldstrm;
4253 
4254 			/* save off the old */
4255 			oldstrm = stcb->asoc.strmin;
4256 			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
4257 			            (num_stream * sizeof(struct sctp_stream_in)),
4258 			            SCTP_M_STRMI);
4259 			if (stcb->asoc.strmin == NULL) {
4260 				stcb->asoc.strmin = oldstrm;
4261 				goto denied;
4262 			}
4263 			/* copy off the old data */
4264 			for (i = 0; i < stcb->asoc.streamincnt; i++) {
4265 				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4266 				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4267 				stcb->asoc.strmin[i].sid = i;
4268 				stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
4269 				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
4270 				stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
4271 				/* now anything on those queues? */
4272 				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
4273 					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
4274 					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
4275 				}
4276 				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
4277 					TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
4278 					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
4279 				}
4280 			}
4281 			/* Init the new streams */
4282 			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
4283 				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4284 				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4285 				stcb->asoc.strmin[i].sid = i;
4286 				stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
4287 				stcb->asoc.strmin[i].pd_api_started = 0;
4288 				stcb->asoc.strmin[i].delivery_started = 0;
4289 			}
4290 			SCTP_FREE(oldstrm, SCTP_M_STRMI);
4291 			/* update the size */
4292 			stcb->asoc.streamincnt = num_stream;
4293 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4294 			sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4295 		}
4296 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4297 		asoc->str_reset_seq_in++;
4298 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4299 		/*
4300 		 * one seq back, just echo back last action since my
4301 		 * response was lost.
4302 		 */
4303 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4304 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4305 		/*
4306 		 * two seq back, just echo back last action since my
4307 		 * response was lost.
4308 		 */
4309 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4310 	} else {
4311 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4312 
4313 	}
4314 }
4315 
4316 static void
4317 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4318 				   struct sctp_stream_reset_add_strm  *str_add)
4319 {
4320 	/*
4321 	 * Peer is requesting to add more streams.
4322 	 * If its within our max-streams we will
4323 	 * allow it.
4324 	 */
4325 	uint16_t num_stream;
4326 	uint32_t seq;
4327 	struct sctp_association *asoc = &stcb->asoc;
4328 
4329 	/* Get the number. */
4330 	seq = ntohl(str_add->request_seq);
4331 	num_stream = ntohs(str_add->number_of_streams);
4332 	/* Now what would be the new total? */
4333 	if (asoc->str_reset_seq_in == seq) {
4334 		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4335 		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4336 			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4337 		} else if (stcb->asoc.stream_reset_outstanding) {
4338 			/* We must reject it we have something pending */
4339 			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4340 		} else {
4341 			/* Ok, we can do that :-) */
4342 			int mychk;
4343 			mychk = stcb->asoc.streamoutcnt;
4344 			mychk += num_stream;
4345 			if (mychk < 0x10000) {
4346 				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4347 				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
4348 					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4349 				}
4350 			} else {
4351 				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4352 			}
4353 		}
4354 		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4355 		asoc->str_reset_seq_in++;
4356 	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4357 		/*
4358 		 * one seq back, just echo back last action since my
4359 		 * response was lost.
4360 		 */
4361 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4362 	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4363 		/*
4364 		 * two seq back, just echo back last action since my
4365 		 * response was lost.
4366 		 */
4367 		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4368 	} else {
4369 		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4370 	}
4371 }
4372 
4373 #ifdef __GNUC__
4374 __attribute__ ((noinline))
4375 #endif
4376 static int
4377 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4378 			 struct sctp_chunkhdr *ch_req)
4379 {
4380 	uint16_t remaining_length, param_len, ptype;
4381 	struct sctp_paramhdr pstore;
4382 	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4383 	uint32_t seq = 0;
4384 	int num_req = 0;
4385 	int trunc = 0;
4386 	struct sctp_tmit_chunk *chk;
4387 	struct sctp_chunkhdr *ch;
4388 	struct sctp_paramhdr *ph;
4389 	int ret_code = 0;
4390 	int num_param = 0;
4391 
4392 	/* now it may be a reset or a reset-response */
4393 	remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
4394 
4395 	/* setup for adding the response */
4396 	sctp_alloc_a_chunk(stcb, chk);
4397 	if (chk == NULL) {
4398 		return (ret_code);
4399 	}
4400 	chk->copy_by_ref = 0;
4401 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4402 	chk->rec.chunk_id.can_take_data = 0;
4403 	chk->flags = 0;
4404 	chk->asoc = &stcb->asoc;
4405 	chk->no_fr_allowed = 0;
4406 	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4407 	chk->book_size_scale = 0;
4408 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4409 	if (chk->data == NULL) {
4410 	strres_nochunk:
4411 		if (chk->data) {
4412 			sctp_m_freem(chk->data);
4413 			chk->data = NULL;
4414 		}
4415 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4416 		return (ret_code);
4417 	}
4418 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4419 
4420 	/* setup chunk parameters */
4421 	chk->sent = SCTP_DATAGRAM_UNSENT;
4422 	chk->snd_count = 0;
4423 	chk->whoTo = NULL;
4424 
4425 	ch = mtod(chk->data, struct sctp_chunkhdr *);
4426 	ch->chunk_type = SCTP_STREAM_RESET;
4427 	ch->chunk_flags = 0;
4428 	ch->chunk_length = htons(chk->send_size);
4429 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4430 	offset += sizeof(struct sctp_chunkhdr);
4431 	while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4432 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4433 		if (ph == NULL) {
4434 			/* TSNH */
4435 			break;
4436 		}
4437 		param_len = ntohs(ph->param_length);
4438 		if ((param_len > remaining_length) ||
4439 		    (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4440 			/* bad parameter length */
4441 			break;
4442 		}
4443 		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4444 							   (uint8_t *)&cstore);
4445 		if (ph == NULL) {
4446 			/* TSNH */
4447 			break;
4448 		}
4449 		ptype = ntohs(ph->param_type);
4450 		num_param++;
4451 		if (param_len > sizeof(cstore)) {
4452 			trunc = 1;
4453 		} else {
4454 			trunc = 0;
4455 		}
4456 		if (num_param > SCTP_MAX_RESET_PARAMS) {
4457 			/* hit the max of parameters already sorry.. */
4458 			break;
4459 		}
4460 		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4461 			struct sctp_stream_reset_out_request *req_out;
4462 
4463 			if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4464 				break;
4465 			}
4466 			req_out = (struct sctp_stream_reset_out_request *)ph;
4467 			num_req++;
4468 			if (stcb->asoc.stream_reset_outstanding) {
4469 				seq = ntohl(req_out->response_seq);
4470 				if (seq == stcb->asoc.str_reset_seq_out) {
4471 					/* implicit ack */
4472 					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4473 				}
4474 			}
4475 			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4476 		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4477 			struct sctp_stream_reset_add_strm  *str_add;
4478 
4479 			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4480 				break;
4481 			}
4482 			str_add = (struct sctp_stream_reset_add_strm  *)ph;
4483 			num_req++;
4484 			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4485 		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4486 			struct sctp_stream_reset_add_strm  *str_add;
4487 
4488 			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4489 				break;
4490 			}
4491 			str_add = (struct sctp_stream_reset_add_strm  *)ph;
4492 			num_req++;
4493 			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4494 		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4495 			struct sctp_stream_reset_in_request *req_in;
4496 
4497 			num_req++;
4498 			req_in = (struct sctp_stream_reset_in_request *)ph;
4499 			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4500 		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4501 			struct sctp_stream_reset_tsn_request *req_tsn;
4502 
4503 			num_req++;
4504 			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4505 			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4506 				ret_code = 1;
4507 				goto strres_nochunk;
4508 			}
4509 			/* no more */
4510 			break;
4511 		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4512 			struct sctp_stream_reset_response *resp;
4513 			uint32_t result;
4514 
4515 			if (param_len < sizeof(struct sctp_stream_reset_response)) {
4516 				break;
4517 			}
4518 			resp = (struct sctp_stream_reset_response *)ph;
4519 			seq = ntohl(resp->response_seq);
4520 			result = ntohl(resp->result);
4521 			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4522 				ret_code = 1;
4523 				goto strres_nochunk;
4524 			}
4525 		} else {
4526 			break;
4527 		}
4528 		offset += SCTP_SIZE32(param_len);
4529 		if (remaining_length >= SCTP_SIZE32(param_len)) {
4530 			remaining_length -= SCTP_SIZE32(param_len);
4531 		} else {
4532 			remaining_length = 0;
4533 		}
4534 	}
4535 	if (num_req == 0) {
4536 		/* we have no response free the stuff */
4537 		goto strres_nochunk;
4538 	}
4539 	/* ok we have a chunk to link in */
4540 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4541 			  chk,
4542 			  sctp_next);
4543 	stcb->asoc.ctrl_queue_cnt++;
4544 	return (ret_code);
4545 }
4546 
4547 /*
4548  * Handle a router or endpoints report of a packet loss, there are two ways
4549  * to handle this, either we get the whole packet and must disect it
4550  * ourselves (possibly with truncation and or corruption) or it is a summary
4551  * from a middle box that did the disectting for us.
4552  */
4553 static void
4554 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4555     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4556 {
4557 	struct sctp_chunk_desc desc;
4558 	struct sctp_chunkhdr *chk_hdr;
4559 	struct sctp_data_chunk *data_chunk;
4560 	struct sctp_idata_chunk *idata_chunk;
4561 	uint32_t bottle_bw, on_queue;
4562 	uint32_t offset, chk_len;
4563 	uint16_t trunc_len;
4564 	uint16_t pktdrp_len;
4565 	uint8_t pktdrp_flags;
4566 
4567 	KASSERT(sizeof(struct sctp_pktdrop_chunk) <= limit,
4568 	        ("PKTDROP chunk too small"));
4569 	pktdrp_flags = cp->ch.chunk_flags;
4570 	pktdrp_len = ntohs(cp->ch.chunk_length);
4571 	KASSERT(limit <= pktdrp_len, ("Inconsistent limit"));
4572 	if (pktdrp_flags & SCTP_PACKET_TRUNCATED) {
4573 		trunc_len = ntohs(cp->trunc_len);
4574 		if (trunc_len <= pktdrp_len - sizeof(struct sctp_pktdrop_chunk)) {
4575 			/* The peer plays games with us. */
4576 			return;
4577 		}
4578 	} else {
4579 		trunc_len = 0;
4580 	}
4581 	limit -= sizeof(struct sctp_pktdrop_chunk);
4582 	offset = 0;
4583 	if (offset == limit) {
4584 		if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4585 			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4586 		}
4587 	} else if (offset + sizeof(struct sctphdr) > limit) {
4588 		/* Only a partial SCTP common header. */
4589 		SCTP_STAT_INCR(sctps_pdrpcrupt);
4590 		offset = limit;
4591 	} else {
4592 		/* XXX: Check embedded SCTP common header. */
4593 		offset += sizeof(struct sctphdr);
4594 	}
4595 	/* Now parse through the chunks themselves. */
4596 	while (offset < limit) {
4597 		if (offset + sizeof(struct sctp_chunkhdr) > limit) {
4598 			SCTP_STAT_INCR(sctps_pdrpcrupt);
4599 			break;
4600 		}
4601 		chk_hdr = (struct sctp_chunkhdr *)(cp->data + offset);
4602 		desc.chunk_type = chk_hdr->chunk_type;
4603 		/* get amount we need to move */
4604 		chk_len = (uint32_t)ntohs(chk_hdr->chunk_length);
4605 		if (chk_len < sizeof(struct sctp_chunkhdr)) {
4606 			/* Someone is lying... */
4607 			break;
4608 		}
4609 		if (desc.chunk_type == SCTP_DATA) {
4610 			if (stcb->asoc.idata_supported) {
4611 				/* Some is playing games with us. */
4612 				break;
4613 			}
4614 			if (chk_len <= sizeof(struct sctp_data_chunk)) {
4615 				/* Some is playing games with us. */
4616 				break;
4617 			}
4618 			if (chk_len < sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY) {
4619 				/* Not enough data bytes available in the chunk. */
4620 				SCTP_STAT_INCR(sctps_pdrpnedat);
4621 				goto next_chunk;
4622 			}
4623 			if (offset + sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
4624 				/* Not enough data in buffer. */
4625 				break;
4626 			}
4627 			data_chunk = (struct sctp_data_chunk *)(cp->data + offset);
4628 			memcpy(desc.data_bytes, data_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
4629 			desc.tsn_ifany = data_chunk->dp.tsn;
4630 			if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4631 				SCTP_STAT_INCR(sctps_pdrpmbda);
4632 			}
4633 		} else if (desc.chunk_type == SCTP_IDATA) {
4634 			if (!stcb->asoc.idata_supported) {
4635 				/* Some is playing games with us. */
4636 				break;
4637 			}
4638 			if (chk_len <= sizeof(struct sctp_idata_chunk)) {
4639 				/* Some is playing games with us. */
4640 				break;
4641 			}
4642 			if (chk_len < sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY) {
4643 				/* Not enough data bytes available in the chunk. */
4644 				SCTP_STAT_INCR(sctps_pdrpnedat);
4645 				goto next_chunk;
4646 			}
4647 			if (offset + sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
4648 				/* Not enough data in buffer. */
4649 				break;
4650 			}
4651 			idata_chunk = (struct sctp_idata_chunk *)(cp->data + offset);
4652 			memcpy(desc.data_bytes, idata_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
4653 			desc.tsn_ifany = idata_chunk->dp.tsn;
4654 			if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4655 				SCTP_STAT_INCR(sctps_pdrpmbda);
4656 			}
4657 		} else {
4658 			if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
4659 				SCTP_STAT_INCR(sctps_pdrpmbct);
4660 			}
4661 		}
4662 		if (process_chunk_drop(stcb, &desc, net, pktdrp_flags)) {
4663 			SCTP_STAT_INCR(sctps_pdrppdbrk);
4664 			break;
4665 		}
4666 next_chunk:
4667 		offset += SCTP_SIZE32(chk_len);
4668 	}
4669 	/* Now update any rwnd --- possibly */
4670 	if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4671 		/* From a peer, we get a rwnd report */
4672 		uint32_t a_rwnd;
4673 
4674 		SCTP_STAT_INCR(sctps_pdrpfehos);
4675 
4676 		bottle_bw = ntohl(cp->bottle_bw);
4677 		on_queue = ntohl(cp->current_onq);
4678 		if (bottle_bw && on_queue) {
4679 			/* a rwnd report is in here */
4680 			if (bottle_bw > on_queue)
4681 				a_rwnd = bottle_bw - on_queue;
4682 			else
4683 				a_rwnd = 0;
4684 
4685 			if (a_rwnd == 0)
4686 				stcb->asoc.peers_rwnd = 0;
4687 			else {
4688 				if (a_rwnd > stcb->asoc.total_flight) {
4689 					stcb->asoc.peers_rwnd =
4690 					    a_rwnd - stcb->asoc.total_flight;
4691 				} else {
4692 					stcb->asoc.peers_rwnd = 0;
4693 				}
4694 				if (stcb->asoc.peers_rwnd <
4695 				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4696 					/* SWS sender side engages */
4697 					stcb->asoc.peers_rwnd = 0;
4698 				}
4699 			}
4700 		}
4701 	} else {
4702 		SCTP_STAT_INCR(sctps_pdrpfmbox);
4703 	}
4704 
4705 	/* now middle boxes in sat networks get a cwnd bump */
4706 	if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) &&
4707 	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4708 	    (stcb->asoc.sat_network)) {
4709 		/*
4710 		 * This is debatable but for sat networks it makes sense
4711 		 * Note if a T3 timer has went off, we will prohibit any
4712 		 * changes to cwnd until we exit the t3 loss recovery.
4713 		 */
4714 		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4715 			net, cp, &bottle_bw, &on_queue);
4716 	}
4717 }
4718 
4719 /*
4720  * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4721  * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4722  * offset: offset into the mbuf chain to first chunkhdr - length: is the
4723  * length of the complete packet outputs: - length: modified to remaining
4724  * length after control processing - netp: modified to new sctp_nets after
4725  * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4726  * bad packet,...) otherwise return the tcb for this packet
4727  */
4728 #ifdef __GNUC__
4729 __attribute__ ((noinline))
4730 #endif
4731 static struct sctp_tcb *
4732 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4733     struct sockaddr *src, struct sockaddr *dst,
4734     struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4735     struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4736 #if defined(__FreeBSD__) && !defined(__Userspace__)
4737     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4738 #endif
4739     uint32_t vrf_id, uint16_t port)
4740 {
4741 	struct sctp_association *asoc;
4742 	struct mbuf *op_err;
4743 	char msg[SCTP_DIAG_INFO_LEN];
4744 	uint32_t vtag_in;
4745 	int num_chunks = 0;	/* number of control chunks processed */
4746 	uint32_t chk_length, contiguous;
4747 	int ret;
4748 	int abort_no_unlock = 0;
4749 	int ecne_seen = 0;
4750 	/*
4751 	 * How big should this be, and should it be alloc'd? Lets try the
4752 	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4753 	 * until we get into jumbo grams and such..
4754 	 */
4755 	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4756 	int got_auth = 0;
4757 	uint32_t auth_offset = 0, auth_len = 0;
4758 	int auth_skipped = 0;
4759 	int asconf_cnt = 0;
4760 #if defined(__APPLE__) && !defined(__Userspace__)
4761 	struct socket *so;
4762 #endif
4763 
4764 	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4765 		iphlen, *offset, length, (void *)stcb);
4766 
4767 	if (stcb) {
4768 		SCTP_TCB_LOCK_ASSERT(stcb);
4769 	}
4770 	/* validate chunk header length... */
4771 	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4772 		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4773 			ntohs(ch->chunk_length));
4774 		*offset = length;
4775 		return (stcb);
4776 	}
4777 	/*
4778 	 * validate the verification tag
4779 	 */
4780 	vtag_in = ntohl(sh->v_tag);
4781 
4782 	if (ch->chunk_type == SCTP_INITIATION) {
4783 		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4784 			ntohs(ch->chunk_length), vtag_in);
4785 		if (vtag_in != 0) {
4786 			/* protocol error- silently discard... */
4787 			SCTP_STAT_INCR(sctps_badvtag);
4788 			if (stcb != NULL) {
4789 				SCTP_TCB_UNLOCK(stcb);
4790 			}
4791 			return (NULL);
4792 		}
4793 	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4794 		/*
4795 		 * If there is no stcb, skip the AUTH chunk and process
4796 		 * later after a stcb is found (to validate the lookup was
4797 		 * valid.
4798 		 */
4799 		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4800 		    (stcb == NULL) &&
4801 		    (inp->auth_supported == 1)) {
4802 			/* save this chunk for later processing */
4803 			auth_skipped = 1;
4804 			auth_offset = *offset;
4805 			auth_len = ntohs(ch->chunk_length);
4806 
4807 			/* (temporarily) move past this chunk */
4808 			*offset += SCTP_SIZE32(auth_len);
4809 			if (*offset >= length) {
4810 				/* no more data left in the mbuf chain */
4811 				*offset = length;
4812 				return (NULL);
4813 			}
4814 			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4815 								   sizeof(struct sctp_chunkhdr), chunk_buf);
4816 		}
4817 		if (ch == NULL) {
4818 			/* Help */
4819 			*offset = length;
4820 			return (stcb);
4821 		}
4822 		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4823 			goto process_control_chunks;
4824 		}
4825 		/*
4826 		 * first check if it's an ASCONF with an unknown src addr we
4827 		 * need to look inside to find the association
4828 		 */
4829 		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4830 			struct sctp_chunkhdr *asconf_ch = ch;
4831 			uint32_t asconf_offset = 0, asconf_len = 0;
4832 
4833 			/* inp's refcount may be reduced */
4834 			SCTP_INP_INCR_REF(inp);
4835 
4836 			asconf_offset = *offset;
4837 			do {
4838 				asconf_len = ntohs(asconf_ch->chunk_length);
4839 				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4840 					break;
4841 				stcb = sctp_findassociation_ep_asconf(m,
4842 				                                      *offset,
4843 				                                      dst,
4844 				                                      sh, &inp, netp, vrf_id);
4845 				if (stcb != NULL)
4846 					break;
4847 				asconf_offset += SCTP_SIZE32(asconf_len);
4848 				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4849 										  sizeof(struct sctp_chunkhdr), chunk_buf);
4850 			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4851 			if (stcb == NULL) {
4852 				/*
4853 				 * reduce inp's refcount if not reduced in
4854 				 * sctp_findassociation_ep_asconf().
4855 				 */
4856 				SCTP_INP_DECR_REF(inp);
4857 			}
4858 
4859 			/* now go back and verify any auth chunk to be sure */
4860 			if (auth_skipped && (stcb != NULL)) {
4861 				struct sctp_auth_chunk *auth;
4862 
4863 				if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
4864 					auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
4865 					got_auth = 1;
4866 					auth_skipped = 0;
4867 				} else {
4868 					auth = NULL;
4869 				}
4870 				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4871 								       auth_offset)) {
4872 					/* auth HMAC failed so dump it */
4873 					*offset = length;
4874 					return (stcb);
4875 				} else {
4876 					/* remaining chunks are HMAC checked */
4877 					stcb->asoc.authenticated = 1;
4878 				}
4879 			}
4880 		}
4881 		if (stcb == NULL) {
4882 			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4883 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4884 			                             msg);
4885 			/* no association, so it's out of the blue... */
4886 			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4887 #if defined(__FreeBSD__) && !defined(__Userspace__)
4888 			                 mflowtype, mflowid, inp->fibnum,
4889 #endif
4890 					 vrf_id, port);
4891 			*offset = length;
4892 			return (NULL);
4893 		}
4894 		asoc = &stcb->asoc;
4895 		/* ABORT and SHUTDOWN can use either v_tag... */
4896 		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4897 		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4898 		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4899 			/* Take the T-bit always into account. */
4900 			if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4901 			     (vtag_in == asoc->my_vtag)) ||
4902 			    (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4903 			     (asoc->peer_vtag != htonl(0)) &&
4904 			     (vtag_in == asoc->peer_vtag))) {
4905 				/* this is valid */
4906 			} else {
4907 				/* drop this packet... */
4908 				SCTP_STAT_INCR(sctps_badvtag);
4909 				if (stcb != NULL) {
4910 					SCTP_TCB_UNLOCK(stcb);
4911 				}
4912 				return (NULL);
4913 			}
4914 		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4915 			if (vtag_in != asoc->my_vtag) {
4916 				/*
4917 				 * this could be a stale SHUTDOWN-ACK or the
4918 				 * peer never got the SHUTDOWN-COMPLETE and
4919 				 * is still hung; we have started a new asoc
4920 				 * but it won't complete until the shutdown
4921 				 * is completed
4922 				 */
4923 				if (stcb != NULL) {
4924 					SCTP_TCB_UNLOCK(stcb);
4925 				}
4926 				SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4927 				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4928 				                             msg);
4929 				sctp_handle_ootb(m, iphlen, *offset, src, dst,
4930 				                 sh, inp, op_err,
4931 #if defined(__FreeBSD__) && !defined(__Userspace__)
4932 				                 mflowtype, mflowid, fibnum,
4933 #endif
4934 				                 vrf_id, port);
4935 				return (NULL);
4936 			}
4937 		} else {
4938 			/* for all other chunks, vtag must match */
4939 			if (vtag_in != asoc->my_vtag) {
4940 				/* invalid vtag... */
4941 				SCTPDBG(SCTP_DEBUG_INPUT3,
4942 					"invalid vtag: %xh, expect %xh\n",
4943 					vtag_in, asoc->my_vtag);
4944 				SCTP_STAT_INCR(sctps_badvtag);
4945 				if (stcb != NULL) {
4946 					SCTP_TCB_UNLOCK(stcb);
4947 				}
4948 				*offset = length;
4949 				return (NULL);
4950 			}
4951 		}
4952 	}			/* end if !SCTP_COOKIE_ECHO */
4953 	/*
4954 	 * process all control chunks...
4955 	 */
4956 	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4957 	     (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4958 	     (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4959 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4960 		/* implied cookie-ack.. we must have lost the ack */
4961 		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4962 				       *netp);
4963 	}
4964 
4965  process_control_chunks:
4966 	while (IS_SCTP_CONTROL(ch)) {
4967 		/* validate chunk length */
4968 		chk_length = ntohs(ch->chunk_length);
4969 		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4970 			ch->chunk_type, chk_length);
4971 		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4972 		if (chk_length < sizeof(*ch) ||
4973 		    (*offset + (int)chk_length) > length) {
4974 			*offset = length;
4975 			return (stcb);
4976 		}
4977 		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4978 		/*
4979 		 * INIT and INIT-ACK only gets the init ack "header" portion
4980 		 * only because we don't have to process the peer's COOKIE.
4981 		 * All others get a complete chunk.
4982 		 */
4983 		switch (ch->chunk_type) {
4984 		case SCTP_INITIATION:
4985 			contiguous = sizeof(struct sctp_init_chunk);
4986 			break;
4987 		case SCTP_INITIATION_ACK:
4988 			contiguous = sizeof(struct sctp_init_ack_chunk);
4989 			break;
4990 		default:
4991 			contiguous = min(chk_length, sizeof(chunk_buf));
4992 			break;
4993 		}
4994 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4995 		                                           contiguous,
4996 		                                           chunk_buf);
4997 		if (ch == NULL) {
4998 			*offset = length;
4999 			if (stcb != NULL) {
5000 				SCTP_TCB_UNLOCK(stcb);
5001 			}
5002 			return (NULL);
5003 		}
5004 
5005 		num_chunks++;
5006 		/* Save off the last place we got a control from */
5007 		if (stcb != NULL) {
5008 			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
5009 				/*
5010 				 * allow last_control to be NULL if
5011 				 * ASCONF... ASCONF processing will find the
5012 				 * right net later
5013 				 */
5014 				if ((netp != NULL) && (*netp != NULL))
5015 					stcb->asoc.last_control_chunk_from = *netp;
5016 			}
5017 		}
5018 #ifdef SCTP_AUDITING_ENABLED
5019 		sctp_audit_log(0xB0, ch->chunk_type);
5020 #endif
5021 
5022 		/* check to see if this chunk required auth, but isn't */
5023 		if ((stcb != NULL) &&
5024 		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
5025 		    !stcb->asoc.authenticated) {
5026 			/* "silently" ignore */
5027 			SCTP_STAT_INCR(sctps_recvauthmissing);
5028 			goto next_chunk;
5029 		}
5030 		switch (ch->chunk_type) {
5031 		case SCTP_INITIATION:
5032 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
5033 			/* The INIT chunk must be the only chunk. */
5034 			if ((num_chunks > 1) ||
5035 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5036 				/* RFC 4960 requires that no ABORT is sent */
5037 				*offset = length;
5038 				if (stcb != NULL) {
5039 					SCTP_TCB_UNLOCK(stcb);
5040 				}
5041 				return (NULL);
5042 			}
5043 			/* Honor our resource limit. */
5044 			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
5045 				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5046 				sctp_abort_association(inp, stcb, m, iphlen,
5047 						       src, dst, sh, op_err,
5048 #if defined(__FreeBSD__) && !defined(__Userspace__)
5049 				                       mflowtype, mflowid,
5050 #endif
5051 				                       vrf_id, port);
5052 				*offset = length;
5053 				return (NULL);
5054 			}
5055 			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
5056 			                 (struct sctp_init_chunk *)ch, inp,
5057 			                 stcb, *netp, &abort_no_unlock,
5058 #if defined(__FreeBSD__) && !defined(__Userspace__)
5059 			                 mflowtype, mflowid,
5060 #endif
5061 			                 vrf_id, port);
5062 			*offset = length;
5063 			if ((!abort_no_unlock) && (stcb != NULL)) {
5064 				SCTP_TCB_UNLOCK(stcb);
5065 			}
5066 			return (NULL);
5067 			break;
5068 		case SCTP_PAD_CHUNK:
5069 			break;
5070 		case SCTP_INITIATION_ACK:
5071 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
5072 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5073 				/* We are not interested anymore */
5074 				if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
5075 					;
5076 				} else {
5077 					*offset = length;
5078 					if (stcb != NULL) {
5079 #if defined(__APPLE__) && !defined(__Userspace__)
5080 						so = SCTP_INP_SO(inp);
5081 						atomic_add_int(&stcb->asoc.refcnt, 1);
5082 						SCTP_TCB_UNLOCK(stcb);
5083 						SCTP_SOCKET_LOCK(so, 1);
5084 						SCTP_TCB_LOCK(stcb);
5085 						atomic_subtract_int(&stcb->asoc.refcnt, 1);
5086 #endif
5087 						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5088 						                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5089 #if defined(__APPLE__) && !defined(__Userspace__)
5090 						SCTP_SOCKET_UNLOCK(so, 1);
5091 #endif
5092 					}
5093 					return (NULL);
5094 				}
5095 			}
5096 			/* The INIT-ACK chunk must be the only chunk. */
5097 			if ((num_chunks > 1) ||
5098 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5099 				*offset = length;
5100 				return (stcb);
5101 			}
5102 			if ((netp != NULL) && (*netp != NULL)) {
5103 				ret = sctp_handle_init_ack(m, iphlen, *offset,
5104 				                           src, dst, sh,
5105 				                           (struct sctp_init_ack_chunk *)ch,
5106 				                           stcb, *netp,
5107 				                           &abort_no_unlock,
5108 #if defined(__FreeBSD__) && !defined(__Userspace__)
5109 				                           mflowtype, mflowid,
5110 #endif
5111 				                           vrf_id);
5112 			} else {
5113 				ret = -1;
5114 			}
5115 			*offset = length;
5116 			if (abort_no_unlock) {
5117 				return (NULL);
5118 			}
5119 			/*
5120 			 * Special case, I must call the output routine to
5121 			 * get the cookie echoed
5122 			 */
5123 			if ((stcb != NULL) && (ret == 0)) {
5124 				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5125 			}
5126 			return (stcb);
5127 			break;
5128 		case SCTP_SELECTIVE_ACK:
5129 		case SCTP_NR_SELECTIVE_ACK:
5130 		{
5131 			int abort_now = 0;
5132 			uint32_t a_rwnd, cum_ack;
5133 			uint16_t num_seg, num_nr_seg, num_dup;
5134 			uint8_t flags;
5135 			int offset_seg, offset_dup;
5136 
5137 			SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
5138 				ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
5139 			SCTP_STAT_INCR(sctps_recvsacks);
5140 			if (stcb == NULL) {
5141 				SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
5142 				        (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
5143 				break;
5144 			}
5145 			if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
5146 				if (chk_length < sizeof(struct sctp_sack_chunk)) {
5147 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
5148 					break;
5149 				}
5150 			} else {
5151 				if (stcb->asoc.nrsack_supported == 0) {
5152 					goto unknown_chunk;
5153 				}
5154 				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
5155 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
5156 					break;
5157 				}
5158 			}
5159 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
5160 				/*-
5161 				 * If we have sent a shutdown-ack, we will pay no
5162 				 * attention to a sack sent in to us since
5163 				 * we don't care anymore.
5164 				 */
5165 				break;
5166 			}
5167 			flags = ch->chunk_flags;
5168 			if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
5169 				struct sctp_sack_chunk *sack;
5170 
5171 				sack = (struct sctp_sack_chunk *)ch;
5172 				cum_ack = ntohl(sack->sack.cum_tsn_ack);
5173 				num_seg = ntohs(sack->sack.num_gap_ack_blks);
5174 				num_nr_seg = 0;
5175 				num_dup = ntohs(sack->sack.num_dup_tsns);
5176 				a_rwnd = ntohl(sack->sack.a_rwnd);
5177 				if (sizeof(struct sctp_sack_chunk) +
5178 				    num_seg * sizeof(struct sctp_gap_ack_block) +
5179 				    num_dup * sizeof(uint32_t) != chk_length) {
5180 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
5181 					break;
5182 				}
5183 				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
5184 				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
5185 			} else {
5186 				struct sctp_nr_sack_chunk *nr_sack;
5187 
5188 				nr_sack = (struct sctp_nr_sack_chunk *)ch;
5189 				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
5190 				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
5191 				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
5192 				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
5193 				a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
5194 				if (sizeof(struct sctp_nr_sack_chunk) +
5195 				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
5196 				    num_dup * sizeof(uint32_t) != chk_length) {
5197 					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
5198 					break;
5199 				}
5200 				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
5201 				offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
5202 			}
5203 			SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5204 				(ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
5205 			        cum_ack, num_seg, a_rwnd);
5206 			stcb->asoc.seen_a_sack_this_pkt = 1;
5207 			if ((stcb->asoc.pr_sctp_cnt == 0) &&
5208 			    (num_seg == 0) && (num_nr_seg == 0) &&
5209 			    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5210 			    (stcb->asoc.saw_sack_with_frags == 0) &&
5211 			    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5212 			    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
5213 				/*
5214 				 * We have a SIMPLE sack having no
5215 				 * prior segments and data on sent
5216 				 * queue to be acked. Use the
5217 				 * faster path sack processing. We
5218 				 * also allow window update sacks
5219 				 * with no missing segments to go
5220 				 * this way too.
5221 				 */
5222 				sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
5223 				                         &abort_now, ecne_seen);
5224 			} else {
5225 				if ((netp != NULL) && (*netp != NULL)) {
5226 					sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5227 					                 num_seg, num_nr_seg, num_dup, &abort_now, flags,
5228 					                 cum_ack, a_rwnd, ecne_seen);
5229 				}
5230 			}
5231 			if (abort_now) {
5232 				/* ABORT signal from sack processing */
5233 				*offset = length;
5234 				return (NULL);
5235 			}
5236 			if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5237 			    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5238 			    (stcb->asoc.stream_queue_cnt == 0)) {
5239 				sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_NOT_LOCKED);
5240 			}
5241 			break;
5242 		}
5243 		case SCTP_HEARTBEAT_REQUEST:
5244 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5245 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5246 				SCTP_STAT_INCR(sctps_recvheartbeat);
5247 				sctp_send_heartbeat_ack(stcb, m, *offset,
5248 							chk_length, *netp);
5249 			}
5250 			break;
5251 		case SCTP_HEARTBEAT_ACK:
5252 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
5253 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5254 				/* Its not ours */
5255 				*offset = length;
5256 				return (stcb);
5257 			}
5258 			SCTP_STAT_INCR(sctps_recvheartbeatack);
5259 			if ((netp != NULL) && (*netp != NULL)) {
5260 				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5261 							  stcb, *netp);
5262 			}
5263 			break;
5264 		case SCTP_ABORT_ASSOCIATION:
5265 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5266 				(void *)stcb);
5267 			*offset = length;
5268 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5269 				if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
5270 					return (NULL);
5271 				} else {
5272 					return (stcb);
5273 				}
5274 			} else {
5275 				return (NULL);
5276 			}
5277 			break;
5278 		case SCTP_SHUTDOWN:
5279 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5280 				(void *)stcb);
5281 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5282 				*offset = length;
5283 				return (stcb);
5284 			}
5285 			if ((netp != NULL) && (*netp != NULL)) {
5286 				int abort_flag = 0;
5287 
5288 				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5289 						     stcb, *netp, &abort_flag);
5290 				if (abort_flag) {
5291 					*offset = length;
5292 					return (NULL);
5293 				}
5294 			}
5295 			break;
5296 		case SCTP_SHUTDOWN_ACK:
5297 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
5298 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5299 				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5300 			}
5301 			*offset = length;
5302 			return (NULL);
5303 			break;
5304 		case SCTP_OPERATION_ERROR:
5305 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
5306 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
5307 			    sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
5308 				*offset = length;
5309 				return (NULL);
5310 			}
5311 			break;
5312 		case SCTP_COOKIE_ECHO:
5313 			SCTPDBG(SCTP_DEBUG_INPUT3,
5314 				"SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
5315 			if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
5316 				;
5317 			} else {
5318 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5319 					/* We are not interested anymore */
5320 				abend:
5321 					if (stcb != NULL) {
5322 						SCTP_TCB_UNLOCK(stcb);
5323 					}
5324 					*offset = length;
5325 					return (NULL);
5326 				}
5327 			}
5328 			/*-
5329 			 * First are we accepting? We do this again here
5330 			 * since it is possible that a previous endpoint WAS
5331 			 * listening responded to a INIT-ACK and then
5332 			 * closed. We opened and bound.. and are now no
5333 			 * longer listening.
5334 			 *
5335 			 * XXXGL: notes on checking listen queue length.
5336 			 * 1) SCTP_IS_LISTENING() doesn't necessarily mean
5337 			 *    SOLISTENING(), because a listening "UDP type"
5338 			 *    socket isn't listening in terms of the socket
5339 			 *    layer.  It is a normal data flow socket, that
5340 			 *    can fork off new connections.  Thus, we should
5341 			 *    look into sol_qlen only in case we are !UDP.
5342 			 * 2) Checking sol_qlen in general requires locking
5343 			 *    the socket, and this code lacks that.
5344 			 */
5345 			if ((stcb == NULL) &&
5346 			    (!SCTP_IS_LISTENING(inp) ||
5347 			     (!(inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
5348 #if defined(__FreeBSD__) && !defined(__Userspace__)
5349 			      inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) {
5350 #else
5351 			      inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) {
5352 #endif
5353 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5354 				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5355 					op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5356 					sctp_abort_association(inp, stcb, m, iphlen,
5357 					                       src, dst, sh, op_err,
5358 #if defined(__FreeBSD__) && !defined(__Userspace__)
5359 					                       mflowtype, mflowid,
5360 #endif
5361 					                       vrf_id, port);
5362 				}
5363 				*offset = length;
5364 				return (NULL);
5365 			} else {
5366 				struct mbuf *ret_buf;
5367 				struct sctp_inpcb *linp;
5368 				struct sctp_tmit_chunk *chk;
5369 
5370 				if (stcb) {
5371 					linp = NULL;
5372 				} else {
5373 					linp = inp;
5374 				}
5375 
5376 				if (linp != NULL) {
5377 					SCTP_ASOC_CREATE_LOCK(linp);
5378 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5379 					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5380 						SCTP_ASOC_CREATE_UNLOCK(linp);
5381 						goto abend;
5382 					}
5383 				}
5384 
5385 				if (netp != NULL) {
5386 					struct sctp_tcb *locked_stcb;
5387 
5388 					locked_stcb = stcb;
5389 					ret_buf =
5390 						sctp_handle_cookie_echo(m, iphlen,
5391 						                        *offset,
5392 						                        src, dst,
5393 						                        sh,
5394 						                        (struct sctp_cookie_echo_chunk *)ch,
5395 						                        &inp, &stcb, netp,
5396 						                        auth_skipped,
5397 						                        auth_offset,
5398 						                        auth_len,
5399 						                        &locked_stcb,
5400 #if defined(__FreeBSD__) && !defined(__Userspace__)
5401 						                        mflowtype,
5402 						                        mflowid,
5403 #endif
5404 						                        vrf_id,
5405 						                        port);
5406 					if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
5407 						SCTP_TCB_UNLOCK(locked_stcb);
5408 					}
5409 					if (stcb != NULL) {
5410 						SCTP_TCB_LOCK_ASSERT(stcb);
5411 					}
5412 				} else {
5413 					ret_buf = NULL;
5414 				}
5415 				if (linp != NULL) {
5416 					SCTP_ASOC_CREATE_UNLOCK(linp);
5417 				}
5418 				if (ret_buf == NULL) {
5419 					if (stcb != NULL) {
5420 						SCTP_TCB_UNLOCK(stcb);
5421 					}
5422 					SCTPDBG(SCTP_DEBUG_INPUT3,
5423 						"GAK, null buffer\n");
5424 					*offset = length;
5425 					return (NULL);
5426 				}
5427 				/* if AUTH skipped, see if it verified... */
5428 				if (auth_skipped) {
5429 					got_auth = 1;
5430 					auth_skipped = 0;
5431 				}
5432 				/* Restart the timer if we have pending data */
5433 				TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
5434 					if (chk->whoTo != NULL) {
5435 						break;
5436 					}
5437 				}
5438 				if (chk != NULL) {
5439 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5440 				}
5441 			}
5442 			break;
5443 		case SCTP_COOKIE_ACK:
5444 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
5445 			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5446 				return (stcb);
5447 			}
5448 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5449 				/* We are not interested anymore */
5450 				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5451 					;
5452 				} else if (stcb) {
5453 #if defined(__APPLE__) && !defined(__Userspace__)
5454 					so = SCTP_INP_SO(inp);
5455 					atomic_add_int(&stcb->asoc.refcnt, 1);
5456 					SCTP_TCB_UNLOCK(stcb);
5457 					SCTP_SOCKET_LOCK(so, 1);
5458 					SCTP_TCB_LOCK(stcb);
5459 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5460 #endif
5461 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5462 					                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5463 #if defined(__APPLE__) && !defined(__Userspace__)
5464 					SCTP_SOCKET_UNLOCK(so, 1);
5465 #endif
5466 					*offset = length;
5467 					return (NULL);
5468 				}
5469 			}
5470 			if ((netp != NULL) && (*netp != NULL)) {
5471 				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5472 			}
5473 			break;
5474 		case SCTP_ECN_ECHO:
5475 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
5476 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5477 				/* Its not ours */
5478 				*offset = length;
5479 				return (stcb);
5480 			}
5481 			if (stcb->asoc.ecn_supported == 0) {
5482 				goto unknown_chunk;
5483 			}
5484 			sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
5485 			ecne_seen = 1;
5486 			break;
5487 		case SCTP_ECN_CWR:
5488 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
5489 			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5490 				*offset = length;
5491 				return (stcb);
5492 			}
5493 			if (stcb->asoc.ecn_supported == 0) {
5494 				goto unknown_chunk;
5495 			}
5496 			sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5497 			break;
5498 		case SCTP_SHUTDOWN_COMPLETE:
5499 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
5500 			/* must be first and only chunk */
5501 			if ((num_chunks > 1) ||
5502 			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5503 				*offset = length;
5504 				return (stcb);
5505 			}
5506 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5507 				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5508 							      stcb, *netp);
5509 			}
5510 			*offset = length;
5511 			return (NULL);
5512 			break;
5513 		case SCTP_ASCONF:
5514 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5515 			if (stcb != NULL) {
5516 				if (stcb->asoc.asconf_supported == 0) {
5517 					goto unknown_chunk;
5518 				}
5519 				sctp_handle_asconf(m, *offset, src,
5520 						   (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5521 				asconf_cnt++;
5522 			}
5523 			break;
5524 		case SCTP_ASCONF_ACK:
5525 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
5526 			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5527 				/* Its not ours */
5528 				*offset = length;
5529 				return (stcb);
5530 			}
5531 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5532 				if (stcb->asoc.asconf_supported == 0) {
5533 					goto unknown_chunk;
5534 				}
5535 				/* He's alive so give him credit */
5536 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5537 					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5538 						       stcb->asoc.overall_error_count,
5539 						       0,
5540 						       SCTP_FROM_SCTP_INPUT,
5541 						       __LINE__);
5542 				}
5543 				stcb->asoc.overall_error_count = 0;
5544 				sctp_handle_asconf_ack(m, *offset,
5545 						       (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5546 				if (abort_no_unlock)
5547 					return (NULL);
5548 			}
5549 			break;
5550 		case SCTP_FORWARD_CUM_TSN:
5551 		case SCTP_IFORWARD_CUM_TSN:
5552 			SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
5553 				ch->chunk_type == SCTP_FORWARD_CUM_TSN ? "FORWARD_TSN" : "I_FORWARD_TSN");
5554 			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5555 				/* Its not ours */
5556 				*offset = length;
5557 				return (stcb);
5558 			}
5559 
5560 			if (stcb != NULL) {
5561 				int abort_flag = 0;
5562 
5563 				if (stcb->asoc.prsctp_supported == 0) {
5564 					goto unknown_chunk;
5565 				}
5566 				if (((stcb->asoc.idata_supported == 1) && (ch->chunk_type == SCTP_FORWARD_CUM_TSN)) ||
5567 				    ((stcb->asoc.idata_supported == 0) && (ch->chunk_type == SCTP_IFORWARD_CUM_TSN))) {
5568 					if (ch->chunk_type == SCTP_FORWARD_CUM_TSN) {
5569 						SCTP_SNPRINTF(msg, sizeof(msg), "%s", "FORWARD-TSN chunk received when I-FORWARD-TSN was negotiated");
5570 					} else {
5571 						SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-FORWARD-TSN chunk received when FORWARD-TSN was negotiated");
5572 					}
5573 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5574 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
5575 					*offset = length;
5576 					return (NULL);
5577 				}
5578 				*fwd_tsn_seen = 1;
5579 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5580 					/* We are not interested anymore */
5581 #if defined(__APPLE__) && !defined(__Userspace__)
5582 					so = SCTP_INP_SO(inp);
5583 					atomic_add_int(&stcb->asoc.refcnt, 1);
5584 					SCTP_TCB_UNLOCK(stcb);
5585 					SCTP_SOCKET_LOCK(so, 1);
5586 					SCTP_TCB_LOCK(stcb);
5587 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5588 #endif
5589 					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5590 					                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
5591 #if defined(__APPLE__) && !defined(__Userspace__)
5592 					SCTP_SOCKET_UNLOCK(so, 1);
5593 #endif
5594 					*offset = length;
5595 					return (NULL);
5596 				}
5597 				/*
5598 				 * For sending a SACK this looks like DATA
5599 				 * chunks.
5600 				 */
5601 				stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
5602 				sctp_handle_forward_tsn(stcb,
5603 							(struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5604 				if (abort_flag) {
5605 					*offset = length;
5606 					return (NULL);
5607 				}
5608 			}
5609 			break;
5610 		case SCTP_STREAM_RESET:
5611 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5612 			if ((stcb == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req))) {
5613 				/* Its not ours */
5614 				*offset = length;
5615 				return (stcb);
5616 			}
5617 			if (stcb->asoc.reconfig_supported == 0) {
5618 				goto unknown_chunk;
5619 			}
5620 			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5621 				/* stop processing */
5622 				*offset = length;
5623 				return (NULL);
5624 			}
5625 			break;
5626 		case SCTP_PACKET_DROPPED:
5627 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5628 			/* re-get it all please */
5629 			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5630 				/* Its not ours */
5631 				*offset = length;
5632 				return (stcb);
5633 			}
5634 
5635 			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5636 				if (stcb->asoc.pktdrop_supported == 0) {
5637 					goto unknown_chunk;
5638 				}
5639 				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5640 							   stcb, *netp,
5641 							   min(chk_length, contiguous));
5642 			}
5643 			break;
5644 		case SCTP_AUTHENTICATION:
5645 			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5646 			if (stcb == NULL) {
5647 				/* save the first AUTH for later processing */
5648 				if (auth_skipped == 0) {
5649 					auth_offset = *offset;
5650 					auth_len = chk_length;
5651 					auth_skipped = 1;
5652 				}
5653 				/* skip this chunk (temporarily) */
5654 				goto next_chunk;
5655 			}
5656 			if (stcb->asoc.auth_supported == 0) {
5657 				goto unknown_chunk;
5658 			}
5659 			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5660 			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5661 					   SCTP_AUTH_DIGEST_LEN_MAX))) {
5662 				/* Its not ours */
5663 				*offset = length;
5664 				return (stcb);
5665 			}
5666 			if (got_auth == 1) {
5667 				/* skip this chunk... it's already auth'd */
5668 				goto next_chunk;
5669 			}
5670 			got_auth = 1;
5671 			if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, m, *offset)) {
5672 				/* auth HMAC failed so dump the packet */
5673 				*offset = length;
5674 				return (stcb);
5675 			} else {
5676 				/* remaining chunks are HMAC checked */
5677 				stcb->asoc.authenticated = 1;
5678 			}
5679 			break;
5680 
5681 		default:
5682 		unknown_chunk:
5683 			/* it's an unknown chunk! */
5684 			if ((ch->chunk_type & 0x40) &&
5685 			    (stcb != NULL) &&
5686 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_EMPTY) &&
5687 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_INUSE) &&
5688 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5689 				struct sctp_gen_error_cause *cause;
5690 				int len;
5691 
5692 				op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
5693 				                               0, M_NOWAIT, 1, MT_DATA);
5694 				if (op_err != NULL) {
5695 					len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
5696 					cause = mtod(op_err, struct sctp_gen_error_cause *);
5697 					cause->code =  htons(SCTP_CAUSE_UNRECOG_CHUNK);
5698 					cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
5699 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5700 					SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
5701 					if (SCTP_BUF_NEXT(op_err) != NULL) {
5702 #ifdef SCTP_MBUF_LOGGING
5703 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5704 							sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
5705 						}
5706 #endif
5707 						sctp_queue_op_err(stcb, op_err);
5708 					} else {
5709 						sctp_m_freem(op_err);
5710 					}
5711 				}
5712 			}
5713 			if ((ch->chunk_type & 0x80) == 0) {
5714 				/* discard this packet */
5715 				*offset = length;
5716 				return (stcb);
5717 			}	/* else skip this bad chunk and continue... */
5718 			break;
5719 		}		/* switch (ch->chunk_type) */
5720 
5721 
5722 	next_chunk:
5723 		/* get the next chunk */
5724 		*offset += SCTP_SIZE32(chk_length);
5725 		if (*offset >= length) {
5726 			/* no more data left in the mbuf chain */
5727 			break;
5728 		}
5729 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5730 							   sizeof(struct sctp_chunkhdr), chunk_buf);
5731 		if (ch == NULL) {
5732 			*offset = length;
5733 			return (stcb);
5734 		}
5735 	}			/* while */
5736 
5737 	if ((asconf_cnt > 0) && (stcb != NULL)) {
5738 		sctp_send_asconf_ack(stcb);
5739 	}
5740 	return (stcb);
5741 }
5742 
5743 
5744 /*
5745  * common input chunk processing (v4 and v6)
5746  */
5747 void
5748 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5749                              struct sockaddr *src, struct sockaddr *dst,
5750                              struct sctphdr *sh, struct sctp_chunkhdr *ch,
5751                              uint8_t compute_crc,
5752                              uint8_t ecn_bits,
5753 #if defined(__FreeBSD__) && !defined(__Userspace__)
5754                              uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
5755 #endif
5756                              uint32_t vrf_id, uint16_t port)
5757 {
5758 	uint32_t high_tsn;
5759 	int fwd_tsn_seen = 0, data_processed = 0;
5760 	struct mbuf *m = *mm, *op_err;
5761 	char msg[SCTP_DIAG_INFO_LEN];
5762 	int un_sent;
5763 	int cnt_ctrl_ready = 0;
5764 	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5765 	struct sctp_tcb *stcb = NULL;
5766 	struct sctp_nets *net = NULL;
5767 #if defined(__Userspace__)
5768 	struct socket *upcall_socket = NULL;
5769 #endif
5770 
5771 	SCTP_STAT_INCR(sctps_recvdatagrams);
5772 #ifdef SCTP_AUDITING_ENABLED
5773 	sctp_audit_log(0xE0, 1);
5774 	sctp_auditing(0, inp, stcb, net);
5775 #endif
5776 	if (compute_crc != 0) {
5777 		uint32_t check, calc_check;
5778 
5779 		check = sh->checksum;
5780 		sh->checksum = 0;
5781 		calc_check = sctp_calculate_cksum(m, iphlen);
5782 		sh->checksum = check;
5783 		if (calc_check != check) {
5784 			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5785 			        calc_check, check, (void *)m, length, iphlen);
5786 			stcb = sctp_findassociation_addr(m, offset, src, dst,
5787 			                                 sh, ch, &inp, &net, vrf_id);
5788 #if defined(INET) || defined(INET6)
5789 			if ((ch->chunk_type != SCTP_INITIATION) &&
5790 			    (net != NULL) && (net->port != port)) {
5791 				if (net->port == 0) {
5792 					/* UDP encapsulation turned on. */
5793 					net->mtu -= sizeof(struct udphdr);
5794 					if (stcb->asoc.smallest_mtu > net->mtu) {
5795 						sctp_pathmtu_adjustment(stcb, net->mtu);
5796 					}
5797 				} else if (port == 0) {
5798 					/* UDP encapsulation turned off. */
5799 					net->mtu += sizeof(struct udphdr);
5800 					/* XXX Update smallest_mtu */
5801 				}
5802 				net->port = port;
5803 			}
5804 #endif
5805 #if defined(__FreeBSD__) && !defined(__Userspace__)
5806 			if (net != NULL) {
5807 				net->flowtype = mflowtype;
5808 				net->flowid = mflowid;
5809 			}
5810 			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5811 #endif
5812 			if ((inp != NULL) && (stcb != NULL)) {
5813 				sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5814 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5815 			} else if ((inp != NULL) && (stcb == NULL)) {
5816 				inp_decr = inp;
5817 			}
5818 			SCTP_STAT_INCR(sctps_badsum);
5819 			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5820 			goto out;
5821 		}
5822 	}
5823 	/* Destination port of 0 is illegal, based on RFC4960. */
5824 	if (sh->dest_port == 0) {
5825 		SCTP_STAT_INCR(sctps_hdrops);
5826 		goto out;
5827 	}
5828 	stcb = sctp_findassociation_addr(m, offset, src, dst,
5829 	                                 sh, ch, &inp, &net, vrf_id);
5830 #if defined(INET) || defined(INET6)
5831 	if ((ch->chunk_type != SCTP_INITIATION) &&
5832 	    (net != NULL) && (net->port != port)) {
5833 		if (net->port == 0) {
5834 			/* UDP encapsulation turned on. */
5835 			net->mtu -= sizeof(struct udphdr);
5836 			if (stcb->asoc.smallest_mtu > net->mtu) {
5837 				sctp_pathmtu_adjustment(stcb, net->mtu);
5838 			}
5839 		} else if (port == 0) {
5840 			/* UDP encapsulation turned off. */
5841 			net->mtu += sizeof(struct udphdr);
5842 			/* XXX Update smallest_mtu */
5843 		}
5844 		net->port = port;
5845 	}
5846 #endif
5847 #if defined(__FreeBSD__) && !defined(__Userspace__)
5848 	if (net != NULL) {
5849 		net->flowtype = mflowtype;
5850 		net->flowid = mflowid;
5851 	}
5852 #endif
5853 	if (inp == NULL) {
5854 #if defined(__FreeBSD__) && !defined(__Userspace__)
5855 		SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5856 #endif
5857 		SCTP_STAT_INCR(sctps_noport);
5858 #if defined(__FreeBSD__) && !defined(__Userspace__)
5859 		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5860 			goto out;
5861 		}
5862 #endif
5863 		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5864 			sctp_send_shutdown_complete2(src, dst, sh,
5865 #if defined(__FreeBSD__) && !defined(__Userspace__)
5866 			                             mflowtype, mflowid, fibnum,
5867 #endif
5868 			                             vrf_id, port);
5869 			goto out;
5870 		}
5871 		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5872 			goto out;
5873 		}
5874 		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5875 			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5876 			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5877 			     (ch->chunk_type != SCTP_INIT))) {
5878 				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5879 				                             "Out of the blue");
5880 				sctp_send_abort(m, iphlen, src, dst,
5881 				                sh, 0, op_err,
5882 #if defined(__FreeBSD__) && !defined(__Userspace__)
5883 				                mflowtype, mflowid, fibnum,
5884 #endif
5885 				                vrf_id, port);
5886 			}
5887 		}
5888 		goto out;
5889 	} else if (stcb == NULL) {
5890 		inp_decr = inp;
5891 	}
5892 	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5893 		(void *)m, iphlen, offset, length, (void *)stcb);
5894 	if (stcb) {
5895 		/* always clear this before beginning a packet */
5896 		stcb->asoc.authenticated = 0;
5897 		stcb->asoc.seen_a_sack_this_pkt = 0;
5898 		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5899 			(void *)stcb, stcb->asoc.state);
5900 
5901 		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5902 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5903 			/*-
5904 			 * If we hit here, we had a ref count
5905 			 * up when the assoc was aborted and the
5906 			 * timer is clearing out the assoc, we should
5907 			 * NOT respond to any packet.. its OOTB.
5908 			 */
5909 			SCTP_TCB_UNLOCK(stcb);
5910 			stcb = NULL;
5911 #if defined(__FreeBSD__) && !defined(__Userspace__)
5912 			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5913 #endif
5914 			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5915 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5916 			                             msg);
5917 			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5918 #if defined(__FreeBSD__) && !defined(__Userspace__)
5919 			                 mflowtype, mflowid, inp->fibnum,
5920 #endif
5921 			                 vrf_id, port);
5922 			goto out;
5923 		}
5924 	}
5925 #if defined(__Userspace__)
5926 	if ((stcb != NULL) &&
5927 	    !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
5928 	    (stcb->sctp_socket != NULL)) {
5929 		if (stcb->sctp_socket->so_head != NULL) {
5930 			upcall_socket = stcb->sctp_socket->so_head;
5931 		} else {
5932 			upcall_socket = stcb->sctp_socket;
5933 		}
5934 		SOCK_LOCK(upcall_socket);
5935 		soref(upcall_socket);
5936 		SOCK_UNLOCK(upcall_socket);
5937 	}
5938 #endif
5939 	if (IS_SCTP_CONTROL(ch)) {
5940 		/* process the control portion of the SCTP packet */
5941 		/* sa_ignore NO_NULL_CHK */
5942 		stcb = sctp_process_control(m, iphlen, &offset, length,
5943 		                            src, dst, sh, ch,
5944 		                            inp, stcb, &net, &fwd_tsn_seen,
5945 #if defined(__FreeBSD__) && !defined(__Userspace__)
5946 		                            mflowtype, mflowid, fibnum,
5947 #endif
5948 		                            vrf_id, port);
5949 		if (stcb) {
5950 			/* This covers us if the cookie-echo was there
5951 			 * and it changes our INP.
5952 			 */
5953 			inp = stcb->sctp_ep;
5954 #if defined(INET) || defined(INET6)
5955 			if ((ch->chunk_type != SCTP_INITIATION) &&
5956 			    (net != NULL) && (net->port != port)) {
5957 				if (net->port == 0) {
5958 					/* UDP encapsulation turned on. */
5959 					net->mtu -= sizeof(struct udphdr);
5960 					if (stcb->asoc.smallest_mtu > net->mtu) {
5961 						sctp_pathmtu_adjustment(stcb, net->mtu);
5962 					}
5963 				} else if (port == 0) {
5964 					/* UDP encapsulation turned off. */
5965 					net->mtu += sizeof(struct udphdr);
5966 					/* XXX Update smallest_mtu */
5967 				}
5968 				net->port = port;
5969 			}
5970 #endif
5971 		}
5972 	} else {
5973 		/*
5974 		 * no control chunks, so pre-process DATA chunks (these
5975 		 * checks are taken care of by control processing)
5976 		 */
5977 
5978 		/*
5979 		 * if DATA only packet, and auth is required, then punt...
5980 		 * can't have authenticated without any AUTH (control)
5981 		 * chunks
5982 		 */
5983 		if ((stcb != NULL) &&
5984 		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5985 			/* "silently" ignore */
5986 #if defined(__FreeBSD__) && !defined(__Userspace__)
5987 			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5988 #endif
5989 			SCTP_STAT_INCR(sctps_recvauthmissing);
5990 			goto out;
5991 		}
5992 		if (stcb == NULL) {
5993 			/* out of the blue DATA chunk */
5994 #if defined(__FreeBSD__) && !defined(__Userspace__)
5995 			SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh);
5996 #endif
5997 			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5998 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5999 			                             msg);
6000 			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6001 #if defined(__FreeBSD__) && !defined(__Userspace__)
6002 			                 mflowtype, mflowid, fibnum,
6003 #endif
6004 					 vrf_id, port);
6005 			goto out;
6006 		}
6007 		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
6008 			/* v_tag mismatch! */
6009 #if defined(__FreeBSD__) && !defined(__Userspace__)
6010 			SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
6011 #endif
6012 			SCTP_STAT_INCR(sctps_badvtag);
6013 			goto out;
6014 		}
6015 	}
6016 
6017 #if defined(__FreeBSD__) && !defined(__Userspace__)
6018 	SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
6019 #endif
6020 	if (stcb == NULL) {
6021 		/*
6022 		 * no valid TCB for this packet, or we found it's a bad
6023 		 * packet while processing control, or we're done with this
6024 		 * packet (done or skip rest of data), so we drop it...
6025 		 */
6026 		goto out;
6027 	}
6028 #if defined(__Userspace__)
6029 	if ((upcall_socket == NULL) &&
6030 	    !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
6031 	    (stcb->sctp_socket != NULL)) {
6032 		if (stcb->sctp_socket->so_head != NULL) {
6033 			upcall_socket = stcb->sctp_socket->so_head;
6034 		} else {
6035 			upcall_socket = stcb->sctp_socket;
6036 		}
6037 		SOCK_LOCK(upcall_socket);
6038 		soref(upcall_socket);
6039 		SOCK_UNLOCK(upcall_socket);
6040 	}
6041 #endif
6042 
6043 	/*
6044 	 * DATA chunk processing
6045 	 */
6046 	/* plow through the data chunks while length > offset */
6047 
6048 	/*
6049 	 * Rest should be DATA only.  Check authentication state if AUTH for
6050 	 * DATA is required.
6051 	 */
6052 	if ((length > offset) &&
6053 	    (stcb != NULL) &&
6054 	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
6055 	    !stcb->asoc.authenticated) {
6056 		/* "silently" ignore */
6057 		SCTP_STAT_INCR(sctps_recvauthmissing);
6058 		SCTPDBG(SCTP_DEBUG_AUTH1,
6059 			"Data chunk requires AUTH, skipped\n");
6060 		goto trigger_send;
6061 	}
6062 	if (length > offset) {
6063 		int retval;
6064 
6065 		/*
6066 		 * First check to make sure our state is correct. We would
6067 		 * not get here unless we really did have a tag, so we don't
6068 		 * abort if this happens, just dump the chunk silently.
6069 		 */
6070 		switch (SCTP_GET_STATE(stcb)) {
6071 		case SCTP_STATE_COOKIE_ECHOED:
6072 			/*
6073 			 * we consider data with valid tags in this state
6074 			 * shows us the cookie-ack was lost. Imply it was
6075 			 * there.
6076 			 */
6077 			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
6078 			break;
6079 		case SCTP_STATE_COOKIE_WAIT:
6080 			/*
6081 			 * We consider OOTB any data sent during asoc setup.
6082 			 */
6083 			SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6084 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6085 			                             msg);
6086 			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6087 #if defined(__FreeBSD__) && !defined(__Userspace__)
6088 			                 mflowtype, mflowid, inp->fibnum,
6089 #endif
6090 					 vrf_id, port);
6091 			goto out;
6092 			/*sa_ignore NOTREACHED*/
6093 			break;
6094 		case SCTP_STATE_EMPTY:	/* should not happen */
6095 		case SCTP_STATE_INUSE:	/* should not happen */
6096 		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
6097 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
6098 		default:
6099 			goto out;
6100 			/*sa_ignore NOTREACHED*/
6101 			break;
6102 		case SCTP_STATE_OPEN:
6103 		case SCTP_STATE_SHUTDOWN_SENT:
6104 			break;
6105 		}
6106 		/* plow through the data chunks while length > offset */
6107 		retval = sctp_process_data(mm, iphlen, &offset, length,
6108 		                           inp, stcb, net, &high_tsn);
6109 		if (retval == 2) {
6110 			/*
6111 			 * The association aborted, NO UNLOCK needed since
6112 			 * the association is destroyed.
6113 			 */
6114 			stcb = NULL;
6115 			goto out;
6116 		}
6117 		data_processed = 1;
6118 		/*
6119 		 * Anything important needs to have been m_copy'ed in
6120 		 * process_data
6121 		 */
6122 	}
6123 
6124 	/* take care of ecn */
6125 	if ((data_processed == 1) &&
6126 	    (stcb->asoc.ecn_supported == 1) &&
6127 	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
6128 		/* Yep, we need to add a ECNE */
6129 		sctp_send_ecn_echo(stcb, net, high_tsn);
6130 	}
6131 
6132 	if ((data_processed == 0) && (fwd_tsn_seen)) {
6133 		int was_a_gap;
6134 		uint32_t highest_tsn;
6135 
6136 		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
6137 			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
6138 		} else {
6139 			highest_tsn = stcb->asoc.highest_tsn_inside_map;
6140 		}
6141 		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
6142 		stcb->asoc.send_sack = 1;
6143 		sctp_sack_check(stcb, was_a_gap);
6144 	} else if (fwd_tsn_seen) {
6145 		stcb->asoc.send_sack = 1;
6146 	}
6147 	/* trigger send of any chunks in queue... */
6148 trigger_send:
6149 #ifdef SCTP_AUDITING_ENABLED
6150 	sctp_audit_log(0xE0, 2);
6151 	sctp_auditing(1, inp, stcb, net);
6152 #endif
6153 	SCTPDBG(SCTP_DEBUG_INPUT1,
6154 		"Check for chunk output prw:%d tqe:%d tf=%d\n",
6155 		stcb->asoc.peers_rwnd,
6156 		TAILQ_EMPTY(&stcb->asoc.control_send_queue),
6157 		stcb->asoc.total_flight);
6158 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
6159 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
6160 		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
6161 	}
6162 	if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
6163 	    cnt_ctrl_ready ||
6164 	    stcb->asoc.trigger_reset ||
6165 	    ((un_sent > 0) &&
6166 	     (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) {
6167 		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
6168 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
6169 		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
6170 	}
6171 #ifdef SCTP_AUDITING_ENABLED
6172 	sctp_audit_log(0xE0, 3);
6173 	sctp_auditing(2, inp, stcb, net);
6174 #endif
6175  out:
6176 	if (stcb != NULL) {
6177 		SCTP_TCB_UNLOCK(stcb);
6178 	}
6179 #if defined(__Userspace__)
6180 	if (upcall_socket != NULL) {
6181 		if (upcall_socket->so_upcall != NULL) {
6182 			if (soreadable(upcall_socket) ||
6183 			    sowriteable(upcall_socket) ||
6184 			    upcall_socket->so_error) {
6185 				(*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
6186 			}
6187 		}
6188 		ACCEPT_LOCK();
6189 		SOCK_LOCK(upcall_socket);
6190 		sorele(upcall_socket);
6191 	}
6192 #endif
6193 	if (inp_decr != NULL) {
6194 		/* reduce ref-count */
6195 		SCTP_INP_WLOCK(inp_decr);
6196 		SCTP_INP_DECR_REF(inp_decr);
6197 		SCTP_INP_WUNLOCK(inp_decr);
6198 	}
6199 	return;
6200 }
6201 
6202 #ifdef INET
6203 #if !defined(__Userspace__)
6204 void
6205 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
6206 {
6207 	struct mbuf *m;
6208 	int iphlen;
6209 	uint32_t vrf_id = 0;
6210 	uint8_t ecn_bits;
6211 	struct sockaddr_in src, dst;
6212 	struct ip *ip;
6213 	struct sctphdr *sh;
6214 	struct sctp_chunkhdr *ch;
6215 	int length, offset;
6216 	uint8_t compute_crc;
6217 #if defined(__FreeBSD__) && !defined(__Userspace__)
6218 	uint32_t mflowid;
6219 	uint8_t mflowtype;
6220 	uint16_t fibnum;
6221 #endif
6222 #if defined(__Userspace__)
6223 	uint16_t port = 0;
6224 #endif
6225 
6226 	iphlen = off;
6227 	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6228 		SCTP_RELEASE_PKT(i_pak);
6229 		return;
6230 	}
6231 	m = SCTP_HEADER_TO_CHAIN(i_pak);
6232 #ifdef SCTP_MBUF_LOGGING
6233 	/* Log in any input mbufs */
6234 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6235 		sctp_log_mbc(m, SCTP_MBUF_INPUT);
6236 	}
6237 #endif
6238 #ifdef SCTP_PACKET_LOGGING
6239 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6240 		sctp_packet_log(m);
6241 	}
6242 #endif
6243 #if defined(__FreeBSD__) && !defined(__Userspace__)
6244 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6245 	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6246 	        m->m_pkthdr.len,
6247 	        if_name(m->m_pkthdr.rcvif),
6248 	        (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6249 #endif
6250 #if defined(__APPLE__) && !defined(__Userspace__)
6251 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6252 	        "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
6253 	        m->m_pkthdr.len,
6254 	        m->m_pkthdr.rcvif->if_name,
6255 	        m->m_pkthdr.rcvif->if_unit,
6256 	        m->m_pkthdr.csum_flags);
6257 #endif
6258 #if defined(_WIN32) && !defined(__Userspace__)
6259 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6260 	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6261 	        m->m_pkthdr.len,
6262 	        m->m_pkthdr.rcvif->if_xname,
6263 	        m->m_pkthdr.csum_flags);
6264 #endif
6265 #if defined(__FreeBSD__) && !defined(__Userspace__)
6266 	mflowid = m->m_pkthdr.flowid;
6267 	mflowtype = M_HASHTYPE_GET(m);
6268 	fibnum = M_GETFIB(m);
6269 #endif
6270 	SCTP_STAT_INCR(sctps_recvpackets);
6271 	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6272 	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
6273 	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6274 	if (SCTP_BUF_LEN(m) < offset) {
6275 		if ((m = m_pullup(m, offset)) == NULL) {
6276 			SCTP_STAT_INCR(sctps_hdrops);
6277 			return;
6278 		}
6279 	}
6280 	ip = mtod(m, struct ip *);
6281 	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6282 	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6283 	offset -= sizeof(struct sctp_chunkhdr);
6284 	memset(&src, 0, sizeof(struct sockaddr_in));
6285 	src.sin_family = AF_INET;
6286 #ifdef HAVE_SIN_LEN
6287 	src.sin_len = sizeof(struct sockaddr_in);
6288 #endif
6289 	src.sin_port = sh->src_port;
6290 	src.sin_addr = ip->ip_src;
6291 	memset(&dst, 0, sizeof(struct sockaddr_in));
6292 	dst.sin_family = AF_INET;
6293 #ifdef HAVE_SIN_LEN
6294 	dst.sin_len = sizeof(struct sockaddr_in);
6295 #endif
6296 	dst.sin_port = sh->dest_port;
6297 	dst.sin_addr = ip->ip_dst;
6298 #if defined(_WIN32) && !defined(__Userspace__)
6299 	NTOHS(ip->ip_len);
6300 #endif
6301 #if defined(__linux__) || (defined(_WIN32) && defined(__Userspace__))
6302 	ip->ip_len = ntohs(ip->ip_len);
6303 #endif
6304 #if defined(__Userspace__)
6305 #if defined(__linux__) || defined(_WIN32)
6306 	length = ip->ip_len;
6307 #else
6308 	length = ip->ip_len + iphlen;
6309 #endif
6310 #elif defined(__FreeBSD__)
6311 	length = ntohs(ip->ip_len);
6312 #elif defined(__APPLE__)
6313 	length = ip->ip_len + iphlen;
6314 #else
6315 	length = ip->ip_len;
6316 #endif
6317 	/* Validate mbuf chain length with IP payload length. */
6318 	if (SCTP_HEADER_LEN(m) != length) {
6319 		SCTPDBG(SCTP_DEBUG_INPUT1,
6320 		        "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6321 		SCTP_STAT_INCR(sctps_hdrops);
6322 		goto out;
6323 	}
6324 	/* SCTP does not allow broadcasts or multicasts */
6325 	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6326 		goto out;
6327 	}
6328 	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6329 		goto out;
6330 	}
6331 	ecn_bits = ip->ip_tos;
6332 #if defined(__FreeBSD__) && !defined(__Userspace__)
6333 	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6334 		SCTP_STAT_INCR(sctps_recvhwcrc);
6335 		compute_crc = 0;
6336 	} else {
6337 #else
6338 	if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
6339 	    ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
6340 	     (SCTP_IS_IT_LOOPBACK(m)))) {
6341 		SCTP_STAT_INCR(sctps_recvhwcrc);
6342 		compute_crc = 0;
6343 	} else {
6344 #endif
6345 		SCTP_STAT_INCR(sctps_recvswcrc);
6346 		compute_crc = 1;
6347 	}
6348 	sctp_common_input_processing(&m, iphlen, offset, length,
6349 	                             (struct sockaddr *)&src,
6350 	                             (struct sockaddr *)&dst,
6351 	                             sh, ch,
6352 	                             compute_crc,
6353 	                             ecn_bits,
6354 #if defined(__FreeBSD__) && !defined(__Userspace__)
6355 	                             mflowtype, mflowid, fibnum,
6356 #endif
6357 	                             vrf_id, port);
6358  out:
6359 	if (m) {
6360 		sctp_m_freem(m);
6361 	}
6362 	return;
6363 }
6364 
6365 #if defined(__FreeBSD__) && !defined(__Userspace__)
6366 #if defined(SCTP_MCORE_INPUT) && defined(SMP)
6367 extern int *sctp_cpuarry;
6368 #endif
6369 #endif
6370 
6371 #if defined(__FreeBSD__) && !defined(__Userspace__)
6372 int
6373 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
6374 {
6375 	struct mbuf *m;
6376 	int off;
6377 
6378 	m = *mp;
6379 	off = *offp;
6380 #else
6381 void
6382 sctp_input(struct mbuf *m, int off)
6383 {
6384 #endif
6385 #if defined(__FreeBSD__) && !defined(__Userspace__)
6386 #if defined(SCTP_MCORE_INPUT) && defined(SMP)
6387 	if (mp_ncpus > 1) {
6388 		struct ip *ip;
6389 		struct sctphdr *sh;
6390 		int offset;
6391 		int cpu_to_use;
6392 		uint32_t flowid, tag;
6393 
6394 		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
6395 			flowid = m->m_pkthdr.flowid;
6396 		} else {
6397 			/* No flow id built by lower layers
6398 			 * fix it so we create one.
6399 			 */
6400 			offset = off + sizeof(struct sctphdr);
6401 			if (SCTP_BUF_LEN(m) < offset) {
6402 				if ((m = m_pullup(m, offset)) == NULL) {
6403 					SCTP_STAT_INCR(sctps_hdrops);
6404 					return (IPPROTO_DONE);
6405 				}
6406 			}
6407 			ip = mtod(m, struct ip *);
6408 			sh = (struct sctphdr *)((caddr_t)ip + off);
6409 			tag = htonl(sh->v_tag);
6410 			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6411 			m->m_pkthdr.flowid = flowid;
6412 			M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
6413 		}
6414 		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6415 		sctp_queue_to_mcore(m, off, cpu_to_use);
6416 		return (IPPROTO_DONE);
6417 	}
6418 #endif
6419 #endif
6420 	sctp_input_with_port(m, off, 0);
6421 #if defined(__FreeBSD__) && !defined(__Userspace__)
6422 	return (IPPROTO_DONE);
6423 #endif
6424 }
6425 #endif
6426 #endif
6427