• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 363440 2020-07-23 01:35:24Z tuexen $");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #if defined(__FreeBSD__) && !defined(__Userspace__)
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_timer.h>
53 #include <netinet/sctp_asconf.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_bsd_addr.h>
56 #include <netinet/sctp_input.h>
57 #include <netinet/sctp_crc32.h>
58 #if defined(__FreeBSD__) && !defined(__Userspace__)
59 #include <netinet/sctp_lock_bsd.h>
60 #endif
61 /*
62  * NOTES: On the outbound side of things I need to check the sack timer to
63  * see if I should generate a sack into the chunk queue (if I have data to
64  * send that is and will be sending it .. for bundling.
65  *
66  * The callback in sctp_usrreq.c will get called when the socket is read from.
67  * This will cause sctp_service_queues() to get called on the top entry in
68  * the list.
69  */
70 static uint32_t
71 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72 			struct sctp_stream_in *strm,
73 			struct sctp_tcb *stcb,
74 			struct sctp_association *asoc,
75 			struct sctp_tmit_chunk *chk, int hold_rlock);
76 
77 
78 void
sctp_set_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)79 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80 {
81 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
82 }
83 
84 /* Calculate what the rwnd would be */
85 uint32_t
sctp_calc_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)86 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
87 {
88 	uint32_t calc = 0;
89 
90 	/*
91 	 * This is really set wrong with respect to a 1-2-m socket. Since
92 	 * the sb_cc is the count that everyone as put up. When we re-write
93 	 * sctp_soreceive then we will fix this so that ONLY this
94 	 * associations data is taken into account.
95 	 */
96 	if (stcb->sctp_socket == NULL) {
97 		return (calc);
98 	}
99 
100 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
101 	        ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
102 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
103 	        ("size_on_all_streams is %u", asoc->size_on_all_streams));
104 	if (stcb->asoc.sb_cc == 0 &&
105 	    asoc->cnt_on_reasm_queue == 0 &&
106 	    asoc->cnt_on_all_streams == 0) {
107 		/* Full rwnd granted */
108 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
109 		return (calc);
110 	}
111 	/* get actual space */
112 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
113 	/*
114 	 * take out what has NOT been put on socket queue and we yet hold
115 	 * for putting up.
116 	 */
117 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
118 	                                         asoc->cnt_on_reasm_queue * MSIZE));
119 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
120 	                                         asoc->cnt_on_all_streams * MSIZE));
121 	if (calc == 0) {
122 		/* out of space */
123 		return (calc);
124 	}
125 
126 	/* what is the overhead of all these rwnd's */
127 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
128 	/* If the window gets too small due to ctrl-stuff, reduce it
129 	 * to 1, even it is 0. SWS engaged
130 	 */
131 	if (calc < stcb->asoc.my_rwnd_control_len) {
132 		calc = 1;
133 	}
134 	return (calc);
135 }
136 
137 
138 
139 /*
140  * Build out our readq entry based on the incoming packet.
141  */
142 struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t tsn,uint32_t ppid,uint32_t context,uint16_t sid,uint32_t mid,uint8_t flags,struct mbuf * dm)143 sctp_build_readq_entry(struct sctp_tcb *stcb,
144     struct sctp_nets *net,
145     uint32_t tsn, uint32_t ppid,
146     uint32_t context, uint16_t sid,
147     uint32_t mid, uint8_t flags,
148     struct mbuf *dm)
149 {
150 	struct sctp_queued_to_read *read_queue_e = NULL;
151 
152 	sctp_alloc_a_readq(stcb, read_queue_e);
153 	if (read_queue_e == NULL) {
154 		goto failed_build;
155 	}
156 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
157 	read_queue_e->sinfo_stream = sid;
158 	read_queue_e->sinfo_flags = (flags << 8);
159 	read_queue_e->sinfo_ppid = ppid;
160 	read_queue_e->sinfo_context = context;
161 	read_queue_e->sinfo_tsn = tsn;
162 	read_queue_e->sinfo_cumtsn = tsn;
163 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
164 	read_queue_e->mid = mid;
165 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
166 	TAILQ_INIT(&read_queue_e->reasm);
167 	read_queue_e->whoFrom = net;
168 	atomic_add_int(&net->ref_count, 1);
169 	read_queue_e->data = dm;
170 	read_queue_e->stcb = stcb;
171 	read_queue_e->port_from = stcb->rport;
172 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
173 		read_queue_e->do_not_ref_stcb = 1;
174 	}
175 failed_build:
176 	return (read_queue_e);
177 }
178 
179 struct mbuf *
sctp_build_ctl_nchunk(struct sctp_inpcb * inp,struct sctp_sndrcvinfo * sinfo)180 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
181 {
182 	struct sctp_extrcvinfo *seinfo;
183 	struct sctp_sndrcvinfo *outinfo;
184 	struct sctp_rcvinfo *rcvinfo;
185 	struct sctp_nxtinfo *nxtinfo;
186 #if defined(_WIN32)
187 	WSACMSGHDR *cmh;
188 #else
189 	struct cmsghdr *cmh;
190 #endif
191 	struct mbuf *ret;
192 	int len;
193 	int use_extended;
194 	int provide_nxt;
195 
196 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
197 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
198 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
199 		/* user does not want any ancillary data */
200 		return (NULL);
201 	}
202 
203 	len = 0;
204 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
205 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
206 	}
207 	seinfo = (struct sctp_extrcvinfo *)sinfo;
208 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
209 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
210 		provide_nxt = 1;
211 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
212 	} else {
213 		provide_nxt = 0;
214 	}
215 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
216 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
217 			use_extended = 1;
218 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
219 		} else {
220 			use_extended = 0;
221 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
222 		}
223 	} else {
224 		use_extended = 0;
225 	}
226 
227 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
228 	if (ret == NULL) {
229 		/* No space */
230 		return (ret);
231 	}
232 	SCTP_BUF_LEN(ret) = 0;
233 
234 	/* We need a CMSG header followed by the struct */
235 #if defined(_WIN32)
236 	cmh = mtod(ret, WSACMSGHDR *);
237 #else
238 	cmh = mtod(ret, struct cmsghdr *);
239 #endif
240 	/*
241 	 * Make sure that there is no un-initialized padding between
242 	 * the cmsg header and cmsg data and after the cmsg data.
243 	 */
244 	memset(cmh, 0, len);
245 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
248 		cmh->cmsg_type = SCTP_RCVINFO;
249 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
250 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
251 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
252 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
253 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
254 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
255 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
256 		rcvinfo->rcv_context = sinfo->sinfo_context;
257 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
258 #if defined(_WIN32)
259 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
260 #else
261 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
262 #endif
263 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
264 	}
265 	if (provide_nxt) {
266 		cmh->cmsg_level = IPPROTO_SCTP;
267 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
268 		cmh->cmsg_type = SCTP_NXTINFO;
269 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
270 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
271 		nxtinfo->nxt_flags = 0;
272 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
273 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
274 		}
275 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
276 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
277 		}
278 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
279 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
280 		}
281 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
282 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
283 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
284 #if defined(_WIN32)
285 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
286 #else
287 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
288 #endif
289 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 	}
291 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 		cmh->cmsg_level = IPPROTO_SCTP;
293 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 		if (use_extended) {
295 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 			cmh->cmsg_type = SCTP_EXTRCV;
297 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 		} else {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 			cmh->cmsg_type = SCTP_SNDRCV;
302 			*outinfo = *sinfo;
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 		}
305 	}
306 	return (ret);
307 }
308 
309 
310 static void
sctp_mark_non_revokable(struct sctp_association * asoc,uint32_t tsn)311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 	uint32_t gap, i, cumackp1;
314 	int fnd = 0;
315 	int in_r=0, in_nr=0;
316 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 		return;
318 	}
319 	cumackp1 = asoc->cumulative_tsn + 1;
320 	if (SCTP_TSN_GT(cumackp1, tsn)) {
321 		/* this tsn is behind the cum ack and thus we don't
322 		 * need to worry about it being moved from one to the other.
323 		 */
324 		return;
325 	}
326 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
327 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
328 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
329 	if ((in_r == 0) && (in_nr == 0)) {
330 #ifdef INVARIANTS
331 		panic("Things are really messed up now");
332 #else
333 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
334 		sctp_print_mapping_array(asoc);
335 #endif
336 	}
337 	if (in_nr == 0)
338 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
339 	if (in_r)
340 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
341 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
342 		asoc->highest_tsn_inside_nr_map = tsn;
343 	}
344 	if (tsn == asoc->highest_tsn_inside_map) {
345 		/* We must back down to see what the new highest is */
346 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
347 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
348 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
349 				asoc->highest_tsn_inside_map = i;
350 				fnd = 1;
351 				break;
352 			}
353 		}
354 		if (!fnd) {
355 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
356 		}
357 	}
358 }
359 
360 static int
sctp_place_control_in_stream(struct sctp_stream_in * strm,struct sctp_association * asoc,struct sctp_queued_to_read * control)361 sctp_place_control_in_stream(struct sctp_stream_in *strm,
362 			     struct sctp_association *asoc,
363 			     struct sctp_queued_to_read *control)
364 {
365 	struct sctp_queued_to_read *at;
366 	struct sctp_readhead *q;
367 	uint8_t flags, unordered;
368 
369 	flags = (control->sinfo_flags >> 8);
370 	unordered = flags & SCTP_DATA_UNORDERED;
371 	if (unordered) {
372 		q = &strm->uno_inqueue;
373 		if (asoc->idata_supported == 0) {
374 			if (!TAILQ_EMPTY(q)) {
375 				/* Only one stream can be here in old style  -- abort */
376 				return (-1);
377 			}
378 			TAILQ_INSERT_TAIL(q, control, next_instrm);
379 			control->on_strm_q = SCTP_ON_UNORDERED;
380 			return (0);
381 		}
382 	} else {
383 		q = &strm->inqueue;
384 	}
385 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
386 		control->end_added = 1;
387 		control->first_frag_seen = 1;
388 		control->last_frag_seen = 1;
389 	}
390 	if (TAILQ_EMPTY(q)) {
391 		/* Empty queue */
392 		TAILQ_INSERT_HEAD(q, control, next_instrm);
393 		if (unordered) {
394 			control->on_strm_q = SCTP_ON_UNORDERED;
395 		} else {
396 			control->on_strm_q = SCTP_ON_ORDERED;
397 		}
398 		return (0);
399 	} else {
400 		TAILQ_FOREACH(at, q, next_instrm) {
401 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
402 				/*
403 				 * one in queue is bigger than the
404 				 * new one, insert before this one
405 				 */
406 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
407 				if (unordered) {
408 					control->on_strm_q = SCTP_ON_UNORDERED;
409 				} else {
410 					control->on_strm_q = SCTP_ON_ORDERED ;
411 				}
412 				break;
413 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
414 				/*
415 				 * Gak, He sent me a duplicate msg
416 				 * id number?? return -1 to abort.
417 				 */
418 				return (-1);
419 			} else {
420 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
421 					/*
422 					 * We are at the end, insert
423 					 * it after this one
424 					 */
425 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
426 						sctp_log_strm_del(control, at,
427 								  SCTP_STR_LOG_FROM_INSERT_TL);
428 					}
429 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
430 					if (unordered) {
431 						control->on_strm_q = SCTP_ON_UNORDERED ;
432 					} else {
433 						control->on_strm_q = SCTP_ON_ORDERED ;
434 					}
435 					break;
436 				}
437 			}
438 		}
439 	}
440 	return (0);
441 }
442 
443 static void
sctp_abort_in_reasm(struct sctp_tcb * stcb,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag,int opspot)444 sctp_abort_in_reasm(struct sctp_tcb *stcb,
445                     struct sctp_queued_to_read *control,
446                     struct sctp_tmit_chunk *chk,
447                     int *abort_flag, int opspot)
448 {
449 	char msg[SCTP_DIAG_INFO_LEN];
450 	struct mbuf *oper;
451 
452 	if (stcb->asoc.idata_supported) {
453 		SCTP_SNPRINTF(msg, sizeof(msg),
454 		              "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
455 		              opspot,
456 		              control->fsn_included,
457 		              chk->rec.data.tsn,
458 		              chk->rec.data.sid,
459 		              chk->rec.data.fsn, chk->rec.data.mid);
460 	} else {
461 		SCTP_SNPRINTF(msg, sizeof(msg),
462 		              "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
463 		              opspot,
464 		              control->fsn_included,
465 		              chk->rec.data.tsn,
466 		              chk->rec.data.sid,
467 		              chk->rec.data.fsn,
468 		              (uint16_t)chk->rec.data.mid);
469 	}
470 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
471 	sctp_m_freem(chk->data);
472 	chk->data = NULL;
473 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
475 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
476 	*abort_flag = 1;
477 }
478 
479 static void
sctp_clean_up_control(struct sctp_tcb * stcb,struct sctp_queued_to_read * control)480 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
481 {
482 	/*
483 	 * The control could not be placed and must be cleaned.
484 	 */
485 	struct sctp_tmit_chunk *chk, *nchk;
486 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
487 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
488 		if (chk->data)
489 			sctp_m_freem(chk->data);
490 		chk->data = NULL;
491 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
492 	}
493 	sctp_free_remote_addr(control->whoFrom);
494 	if (control->data) {
495 		sctp_m_freem(control->data);
496 		control->data = NULL;
497 	}
498 	sctp_free_a_readq(stcb, control);
499 }
500 
501 /*
502  * Queue the chunk either right into the socket buffer if it is the next one
503  * to go OR put it in the correct place in the delivery queue.  If we do
504  * append to the so_buf, keep doing so until we are out of order as
505  * long as the control's entered are non-fragmented.
506  */
507 static void
sctp_queue_data_to_stream(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,int * abort_flag,int * need_reasm)508 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
509     struct sctp_association *asoc,
510     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
511 {
512 	/*
513 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
514 	 * all the data in one stream this could happen quite rapidly. One
515 	 * could use the TSN to keep track of things, but this scheme breaks
516 	 * down in the other type of stream usage that could occur. Send a
517 	 * single msg to stream 0, send 4Billion messages to stream 1, now
518 	 * send a message to stream 0. You have a situation where the TSN
519 	 * has wrapped but not in the stream. Is this worth worrying about
520 	 * or should we just change our queue sort at the bottom to be by
521 	 * TSN.
522 	 *
523 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
524 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
525 	 * assignment this could happen... and I don't see how this would be
526 	 * a violation. So for now I am undecided an will leave the sort by
527 	 * SSN alone. Maybe a hybred approach is the answer
528 	 *
529 	 */
530 	struct sctp_queued_to_read *at;
531 	int queue_needed;
532 	uint32_t nxt_todel;
533 	struct mbuf *op_err;
534 	struct sctp_stream_in *strm;
535 	char msg[SCTP_DIAG_INFO_LEN];
536 
537 	strm = &asoc->strmin[control->sinfo_stream];
538 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
539 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
540 	}
541 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
542 		/* The incoming sseq is behind where we last delivered? */
543 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
544 			strm->last_mid_delivered, control->mid);
545 		/*
546 		 * throw it in the stream so it gets cleaned up in
547 		 * association destruction
548 		 */
549 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
550 		if (asoc->idata_supported) {
551 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
552 			              strm->last_mid_delivered, control->sinfo_tsn,
553 			              control->sinfo_stream, control->mid);
554 		} else {
555 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
556 			              (uint16_t)strm->last_mid_delivered,
557 			              control->sinfo_tsn,
558 			              control->sinfo_stream,
559 			              (uint16_t)control->mid);
560 		}
561 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
562 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
563 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
564 		*abort_flag = 1;
565 		return;
566 
567 	}
568 	queue_needed = 1;
569 	asoc->size_on_all_streams += control->length;
570 	sctp_ucount_incr(asoc->cnt_on_all_streams);
571 	nxt_todel = strm->last_mid_delivered + 1;
572 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
573 #if defined(__APPLE__) && !defined(__Userspace__)
574 		struct socket *so;
575 
576 		so = SCTP_INP_SO(stcb->sctp_ep);
577 		atomic_add_int(&stcb->asoc.refcnt, 1);
578 		SCTP_TCB_UNLOCK(stcb);
579 		SCTP_SOCKET_LOCK(so, 1);
580 		SCTP_TCB_LOCK(stcb);
581 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
582 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
583 			SCTP_SOCKET_UNLOCK(so, 1);
584 			return;
585 		}
586 #endif
587 		/* can be delivered right away? */
588 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
589 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
590 		}
591 		/* EY it wont be queued if it could be delivered directly */
592 		queue_needed = 0;
593 		if (asoc->size_on_all_streams >= control->length) {
594 			asoc->size_on_all_streams -= control->length;
595 		} else {
596 #ifdef INVARIANTS
597 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
598 #else
599 			asoc->size_on_all_streams = 0;
600 #endif
601 		}
602 		sctp_ucount_decr(asoc->cnt_on_all_streams);
603 		strm->last_mid_delivered++;
604 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
605 		sctp_add_to_readq(stcb->sctp_ep, stcb,
606 		                  control,
607 		                  &stcb->sctp_socket->so_rcv, 1,
608 		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
609 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
610 			/* all delivered */
611 			nxt_todel = strm->last_mid_delivered + 1;
612 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
613 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
614 				if (control->on_strm_q == SCTP_ON_ORDERED) {
615 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
616 					if (asoc->size_on_all_streams >= control->length) {
617 						asoc->size_on_all_streams -= control->length;
618 					} else {
619 #ifdef INVARIANTS
620 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
621 #else
622 						asoc->size_on_all_streams = 0;
623 #endif
624 					}
625 					sctp_ucount_decr(asoc->cnt_on_all_streams);
626 #ifdef INVARIANTS
627 				} else {
628 					panic("Huh control: %p is on_strm_q: %d",
629 					      control, control->on_strm_q);
630 #endif
631 				}
632 				control->on_strm_q = 0;
633 				strm->last_mid_delivered++;
634 				/*
635 				 * We ignore the return of deliver_data here
636 				 * since we always can hold the chunk on the
637 				 * d-queue. And we have a finite number that
638 				 * can be delivered from the strq.
639 				 */
640 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
641 					sctp_log_strm_del(control, NULL,
642 							  SCTP_STR_LOG_FROM_IMMED_DEL);
643 				}
644 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
645 				sctp_add_to_readq(stcb->sctp_ep, stcb,
646 				                  control,
647 				                  &stcb->sctp_socket->so_rcv, 1,
648 				                  SCTP_READ_LOCK_NOT_HELD,
649 				                  SCTP_SO_LOCKED);
650 				continue;
651 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
652 				*need_reasm = 1;
653 			}
654 			break;
655 		}
656 #if defined(__APPLE__) && !defined(__Userspace__)
657 		SCTP_SOCKET_UNLOCK(so, 1);
658 #endif
659 	}
660 	if (queue_needed) {
661 		/*
662 		 * Ok, we did not deliver this guy, find the correct place
663 		 * to put it on the queue.
664 		 */
665 		if (sctp_place_control_in_stream(strm, asoc, control)) {
666 			SCTP_SNPRINTF(msg, sizeof(msg),
667 			              "Queue to str MID: %u duplicate", control->mid);
668 			sctp_clean_up_control(stcb, control);
669 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
670 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
671 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
672 			*abort_flag = 1;
673 		}
674 	}
675 }
676 
677 
678 static void
sctp_setup_tail_pointer(struct sctp_queued_to_read * control)679 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
680 {
681 	struct mbuf *m, *prev = NULL;
682 	struct sctp_tcb *stcb;
683 
684 	stcb = control->stcb;
685 	control->held_length = 0;
686 	control->length = 0;
687 	m = control->data;
688 	while (m) {
689 		if (SCTP_BUF_LEN(m) == 0) {
690 			/* Skip mbufs with NO length */
691 			if (prev == NULL) {
692 				/* First one */
693 				control->data = sctp_m_free(m);
694 				m = control->data;
695 			} else {
696 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 				m = SCTP_BUF_NEXT(prev);
698 			}
699 			if (m == NULL) {
700 				control->tail_mbuf = prev;
701 			}
702 			continue;
703 		}
704 		prev = m;
705 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
706 		if (control->on_read_q) {
707 			/*
708 			 * On read queue so we must increment the
709 			 * SB stuff, we assume caller has done any locks of SB.
710 			 */
711 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
712 		}
713 		m = SCTP_BUF_NEXT(m);
714 	}
715 	if (prev) {
716 		control->tail_mbuf = prev;
717 	}
718 }
719 
720 static void
sctp_add_to_tail_pointer(struct sctp_queued_to_read * control,struct mbuf * m,uint32_t * added)721 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
722 {
723 	struct mbuf *prev=NULL;
724 	struct sctp_tcb *stcb;
725 
726 	stcb = control->stcb;
727 	if (stcb == NULL) {
728 #ifdef INVARIANTS
729 		panic("Control broken");
730 #else
731 		return;
732 #endif
733 	}
734 	if (control->tail_mbuf == NULL) {
735 		/* TSNH */
736 		sctp_m_freem(control->data);
737 		control->data = m;
738 		sctp_setup_tail_pointer(control);
739 		return;
740 	}
741 	control->tail_mbuf->m_next = m;
742 	while (m) {
743 		if (SCTP_BUF_LEN(m) == 0) {
744 			/* Skip mbufs with NO length */
745 			if (prev == NULL) {
746 				/* First one */
747 				control->tail_mbuf->m_next = sctp_m_free(m);
748 				m = control->tail_mbuf->m_next;
749 			} else {
750 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
751 				m = SCTP_BUF_NEXT(prev);
752 			}
753 			if (m == NULL) {
754 				control->tail_mbuf = prev;
755 			}
756 			continue;
757 		}
758 		prev = m;
759 		if (control->on_read_q) {
760 			/*
761 			 * On read queue so we must increment the
762 			 * SB stuff, we assume caller has done any locks of SB.
763 			 */
764 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
765 		}
766 		*added += SCTP_BUF_LEN(m);
767 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
768 		m = SCTP_BUF_NEXT(m);
769 	}
770 	if (prev) {
771 		control->tail_mbuf = prev;
772 	}
773 }
774 
775 static void
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read * nc,struct sctp_queued_to_read * control)776 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
777 {
778 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
779 	nc->sinfo_stream = control->sinfo_stream;
780 	nc->mid = control->mid;
781 	TAILQ_INIT(&nc->reasm);
782 	nc->top_fsn = control->top_fsn;
783 	nc->mid = control->mid;
784 	nc->sinfo_flags = control->sinfo_flags;
785 	nc->sinfo_ppid = control->sinfo_ppid;
786 	nc->sinfo_context = control->sinfo_context;
787 	nc->fsn_included = 0xffffffff;
788 	nc->sinfo_tsn = control->sinfo_tsn;
789 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
790 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
791 	nc->whoFrom = control->whoFrom;
792 	atomic_add_int(&nc->whoFrom->ref_count, 1);
793 	nc->stcb = control->stcb;
794 	nc->port_from = control->port_from;
795 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
796 }
797 
798 static void
sctp_reset_a_control(struct sctp_queued_to_read * control,struct sctp_inpcb * inp,uint32_t tsn)799 sctp_reset_a_control(struct sctp_queued_to_read *control,
800                      struct sctp_inpcb *inp, uint32_t tsn)
801 {
802 	control->fsn_included = tsn;
803 	if (control->on_read_q) {
804 		/*
805 		 * We have to purge it from there,
806 		 * hopefully this will work :-)
807 		 */
808 		TAILQ_REMOVE(&inp->read_queue, control, next);
809 		control->on_read_q = 0;
810 	}
811 }
812 
813 static int
sctp_handle_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,struct sctp_queued_to_read * control,uint32_t pd_point,int inp_read_lock_held)814 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
815                                struct sctp_association *asoc,
816                                struct sctp_stream_in *strm,
817                                struct sctp_queued_to_read *control,
818                                uint32_t pd_point,
819                                int inp_read_lock_held)
820 {
821 	/* Special handling for the old un-ordered data chunk.
822 	 * All the chunks/TSN's go to mid 0. So
823 	 * we have to do the old style watching to see
824 	 * if we have it all. If you return one, no other
825 	 * control entries on the un-ordered queue will
826 	 * be looked at. In theory there should be no others
827 	 * entries in reality, unless the guy is sending both
828 	 * unordered NDATA and unordered DATA...
829 	 */
830 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
831 	uint32_t fsn;
832 	struct sctp_queued_to_read *nc;
833 	int cnt_added;
834 
835 	if (control->first_frag_seen == 0) {
836 		/* Nothing we can do, we have not seen the first piece yet */
837 		return (1);
838 	}
839 	/* Collapse any we can */
840 	cnt_added = 0;
841 restart:
842 	fsn = control->fsn_included + 1;
843 	/* Now what can we add? */
844 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
845 		if (chk->rec.data.fsn == fsn) {
846 			/* Ok lets add it */
847 			sctp_alloc_a_readq(stcb, nc);
848 			if (nc == NULL) {
849 				break;
850 			}
851 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
852 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
853 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
854 			fsn++;
855 			cnt_added++;
856 			chk = NULL;
857 			if (control->end_added) {
858 				/* We are done */
859 				if (!TAILQ_EMPTY(&control->reasm)) {
860 					/*
861 					 * Ok we have to move anything left on
862 					 * the control queue to a new control.
863 					 */
864 					sctp_build_readq_entry_from_ctl(nc, control);
865 					tchk = TAILQ_FIRST(&control->reasm);
866 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
867 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
869 							asoc->size_on_reasm_queue -= tchk->send_size;
870 						} else {
871 #ifdef INVARIANTS
872 						panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
873 #else
874 						asoc->size_on_reasm_queue = 0;
875 #endif
876 						}
877 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
878 						nc->first_frag_seen = 1;
879 						nc->fsn_included = tchk->rec.data.fsn;
880 						nc->data = tchk->data;
881 						nc->sinfo_ppid = tchk->rec.data.ppid;
882 						nc->sinfo_tsn = tchk->rec.data.tsn;
883 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
884 						tchk->data = NULL;
885 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
886 						sctp_setup_tail_pointer(nc);
887 						tchk = TAILQ_FIRST(&control->reasm);
888 					}
889 					/* Spin the rest onto the queue */
890 					while (tchk) {
891 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
892 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
893 						tchk = TAILQ_FIRST(&control->reasm);
894 					}
895 					/* Now lets add it to the queue after removing control */
896 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
897 					nc->on_strm_q = SCTP_ON_UNORDERED;
898 					if (control->on_strm_q) {
899 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
900 						control->on_strm_q = 0;
901 					}
902 				}
903 				if (control->pdapi_started) {
904 					strm->pd_api_started = 0;
905 					control->pdapi_started = 0;
906 				}
907 				if (control->on_strm_q) {
908 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
909 					control->on_strm_q = 0;
910 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
911 				}
912 				if (control->on_read_q == 0) {
913 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
914 							  &stcb->sctp_socket->so_rcv, control->end_added,
915 							  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
916 #if defined(__Userspace__)
917 				} else {
918 					sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
919 #endif
920 				}
921 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
922 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
923 					/* Switch to the new guy and continue */
924 					control = nc;
925 					goto restart;
926 				} else {
927 					if (nc->on_strm_q == 0) {
928 						sctp_free_a_readq(stcb, nc);
929 					}
930 				}
931 				return (1);
932 			} else {
933 				sctp_free_a_readq(stcb, nc);
934 			}
935 		} else {
936 			/* Can't add more */
937 			break;
938 		}
939 	}
940 	if (cnt_added && strm->pd_api_started) {
941 #if defined(__Userspace__)
942 		sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
943 #endif
944 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
945 	}
946 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
947 		strm->pd_api_started = 1;
948 		control->pdapi_started = 1;
949 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
950 		                  &stcb->sctp_socket->so_rcv, control->end_added,
951 		                  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
952 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
953 		return (0);
954 	} else {
955 		return (1);
956 	}
957 }
958 
959 static void
sctp_inject_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag)960 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
961                                struct sctp_association *asoc,
962                                struct sctp_queued_to_read *control,
963                                struct sctp_tmit_chunk *chk,
964                                int *abort_flag)
965 {
966 	struct sctp_tmit_chunk *at;
967 	int inserted;
968 	/*
969 	 * Here we need to place the chunk into the control structure
970 	 * sorted in the correct order.
971 	 */
972 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
973 		/* Its the very first one. */
974 		SCTPDBG(SCTP_DEBUG_XXX,
975 			"chunk is a first fsn: %u becomes fsn_included\n",
976 			chk->rec.data.fsn);
977 		at = TAILQ_FIRST(&control->reasm);
978 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
979 			/*
980 			 * The first chunk in the reassembly is
981 			 * a smaller TSN than this one, even though
982 			 * this has a first, it must be from a subsequent
983 			 * msg.
984 			 */
985 			goto place_chunk;
986 		}
987 		if (control->first_frag_seen) {
988 			/*
989 			 * In old un-ordered we can reassembly on
990 			 * one control multiple messages. As long
991 			 * as the next FIRST is greater then the old
992 			 * first (TSN i.e. FSN wise)
993 			 */
994 			struct mbuf *tdata;
995 			uint32_t tmp;
996 
997 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
998 				/* Easy way the start of a new guy beyond the lowest */
999 				goto place_chunk;
1000 			}
1001 			if ((chk->rec.data.fsn == control->fsn_included) ||
1002 			    (control->pdapi_started)) {
1003 				/*
1004 				 * Ok this should not happen, if it does
1005 				 * we started the pd-api on the higher TSN (since
1006 				 * the equals part is a TSN failure it must be that).
1007 				 *
1008 				 * We are completly hosed in that case since I have
1009 				 * no way to recover. This really will only happen
1010 				 * if we can get more TSN's higher before the pd-api-point.
1011 				 */
1012 				sctp_abort_in_reasm(stcb, control, chk,
1013 						    abort_flag,
1014 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1015 
1016 				return;
1017 			}
1018 			/*
1019 			 * Ok we have two firsts and the one we just got
1020 			 * is smaller than the one we previously placed.. yuck!
1021 			 * We must swap them out.
1022 			 */
1023 			/* swap the mbufs */
1024 			tdata = control->data;
1025 			control->data = chk->data;
1026 			chk->data = tdata;
1027 			/* Save the lengths */
1028 			chk->send_size = control->length;
1029 			/* Recompute length of control and tail pointer */
1030 			sctp_setup_tail_pointer(control);
1031 			/* Fix the FSN included */
1032 			tmp = control->fsn_included;
1033 			control->fsn_included = chk->rec.data.fsn;
1034 			chk->rec.data.fsn = tmp;
1035 			/* Fix the TSN included */
1036 			tmp = control->sinfo_tsn;
1037 			control->sinfo_tsn = chk->rec.data.tsn;
1038 			chk->rec.data.tsn = tmp;
1039 			/* Fix the PPID included */
1040 			tmp = control->sinfo_ppid;
1041 			control->sinfo_ppid = chk->rec.data.ppid;
1042 			chk->rec.data.ppid = tmp;
1043 			/* Fix tail pointer */
1044 			goto place_chunk;
1045 		}
1046 		control->first_frag_seen = 1;
1047 		control->fsn_included = chk->rec.data.fsn;
1048 		control->top_fsn = chk->rec.data.fsn;
1049 		control->sinfo_tsn = chk->rec.data.tsn;
1050 		control->sinfo_ppid = chk->rec.data.ppid;
1051 		control->data = chk->data;
1052 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1053 		chk->data = NULL;
1054 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1055 		sctp_setup_tail_pointer(control);
1056 		return;
1057 	}
1058 place_chunk:
1059 	inserted = 0;
1060 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1061 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1062 			/*
1063 			 * This one in queue is bigger than the new one, insert
1064 			 * the new one before at.
1065 			 */
1066 			asoc->size_on_reasm_queue += chk->send_size;
1067 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1068 			inserted = 1;
1069 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1070 			break;
1071 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1072 			/*
1073 			 * They sent a duplicate fsn number. This
1074 			 * really should not happen since the FSN is
1075 			 * a TSN and it should have been dropped earlier.
1076 			 */
1077 			sctp_abort_in_reasm(stcb, control, chk,
1078 			                    abort_flag,
1079 			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1080 			return;
1081 		}
1082 
1083 	}
1084 	if (inserted == 0) {
1085 		/* Its at the end */
1086 		asoc->size_on_reasm_queue += chk->send_size;
1087 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1088 		control->top_fsn = chk->rec.data.fsn;
1089 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1090 	}
1091 }
1092 
1093 static int
sctp_deliver_reasm_check(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,int inp_read_lock_held)1094 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1095                          struct sctp_stream_in *strm, int inp_read_lock_held)
1096 {
1097 	/*
1098 	 * Given a stream, strm, see if any of
1099 	 * the SSN's on it that are fragmented
1100 	 * are ready to deliver. If so go ahead
1101 	 * and place them on the read queue. In
1102 	 * so placing if we have hit the end, then
1103 	 * we need to remove them from the stream's queue.
1104 	 */
1105 	struct sctp_queued_to_read *control, *nctl = NULL;
1106 	uint32_t next_to_del;
1107 	uint32_t pd_point;
1108 	int ret = 0;
1109 
1110 	if (stcb->sctp_socket) {
1111 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1112 			       stcb->sctp_ep->partial_delivery_point);
1113 	} else {
1114 		pd_point = stcb->sctp_ep->partial_delivery_point;
1115 	}
1116 	control = TAILQ_FIRST(&strm->uno_inqueue);
1117 
1118 	if ((control != NULL) &&
1119 	    (asoc->idata_supported == 0)) {
1120 		/* Special handling needed for "old" data format */
1121 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1122 			goto done_un;
1123 		}
1124 	}
1125 	if (strm->pd_api_started) {
1126 		/* Can't add more */
1127 		return (0);
1128 	}
1129 	while (control) {
1130 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1131 			control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1132 		nctl = TAILQ_NEXT(control, next_instrm);
1133 		if (control->end_added) {
1134 			/* We just put the last bit on */
1135 			if (control->on_strm_q) {
1136 #ifdef INVARIANTS
1137 				if (control->on_strm_q != SCTP_ON_UNORDERED ) {
1138 					panic("Huh control: %p on_q: %d -- not unordered?",
1139 					      control, control->on_strm_q);
1140 				}
1141 #endif
1142 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1143 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1144 				if (asoc->size_on_all_streams >= control->length) {
1145 					asoc->size_on_all_streams -= control->length;
1146 				} else {
1147 #ifdef INVARIANTS
1148 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1149 #else
1150 					asoc->size_on_all_streams = 0;
1151 #endif
1152 				}
1153 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1154 				control->on_strm_q = 0;
1155 			}
1156 			if (control->on_read_q == 0) {
1157 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1158 						  control,
1159 						  &stcb->sctp_socket->so_rcv, control->end_added,
1160 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1161 			}
1162 		} else {
1163 			/* Can we do a PD-API for this un-ordered guy? */
1164 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1165 				strm->pd_api_started = 1;
1166 				control->pdapi_started = 1;
1167 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1168 						  control,
1169 						  &stcb->sctp_socket->so_rcv, control->end_added,
1170 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1171 
1172 				break;
1173 			}
1174 		}
1175 		control = nctl;
1176 	}
1177 done_un:
1178 	control = TAILQ_FIRST(&strm->inqueue);
1179 	if (strm->pd_api_started) {
1180 		/* Can't add more */
1181 		return (0);
1182 	}
1183 	if (control == NULL) {
1184 		return (ret);
1185 	}
1186 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1187 		/* Ok the guy at the top was being partially delivered
1188 		 * completed, so we remove it. Note
1189 		 * the pd_api flag was taken off when the
1190 		 * chunk was merged on in sctp_queue_data_for_reasm below.
1191 		 */
1192 		nctl = TAILQ_NEXT(control, next_instrm);
1193 		SCTPDBG(SCTP_DEBUG_XXX,
1194 			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1195 			control, control->end_added, control->mid,
1196 			control->top_fsn, control->fsn_included,
1197 			strm->last_mid_delivered);
1198 		if (control->end_added) {
1199 			if (control->on_strm_q) {
1200 #ifdef INVARIANTS
1201 				if (control->on_strm_q != SCTP_ON_ORDERED ) {
1202 					panic("Huh control: %p on_q: %d -- not ordered?",
1203 					      control, control->on_strm_q);
1204 				}
1205 #endif
1206 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1207 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1208 				if (asoc->size_on_all_streams >= control->length) {
1209 					asoc->size_on_all_streams -= control->length;
1210 				} else {
1211 #ifdef INVARIANTS
1212 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1213 #else
1214 					asoc->size_on_all_streams = 0;
1215 #endif
1216 				}
1217 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1218 				control->on_strm_q = 0;
1219 			}
1220 			if (strm->pd_api_started && control->pdapi_started) {
1221 				control->pdapi_started = 0;
1222 				strm->pd_api_started = 0;
1223 			}
1224 			if (control->on_read_q == 0) {
1225 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1226 						  control,
1227 						  &stcb->sctp_socket->so_rcv, control->end_added,
1228 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1229 			}
1230 			control = nctl;
1231 		}
1232 	}
1233 	if (strm->pd_api_started) {
1234 		/* Can't add more must have gotten an un-ordered above being partially delivered. */
1235 		return (0);
1236 	}
1237 deliver_more:
1238 	next_to_del = strm->last_mid_delivered + 1;
1239 	if (control) {
1240 		SCTPDBG(SCTP_DEBUG_XXX,
1241 			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1242 			control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1243 			next_to_del);
1244 		nctl = TAILQ_NEXT(control, next_instrm);
1245 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1246 		    (control->first_frag_seen)) {
1247 			int done;
1248 
1249 			/* Ok we can deliver it onto the stream. */
1250 			if (control->end_added) {
1251 				/* We are done with it afterwards */
1252 				if (control->on_strm_q) {
1253 #ifdef INVARIANTS
1254 					if (control->on_strm_q != SCTP_ON_ORDERED ) {
1255 						panic("Huh control: %p on_q: %d -- not ordered?",
1256 						      control, control->on_strm_q);
1257 					}
1258 #endif
1259 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1260 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1261 					if (asoc->size_on_all_streams >= control->length) {
1262 						asoc->size_on_all_streams -= control->length;
1263 					} else {
1264 #ifdef INVARIANTS
1265 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1266 #else
1267 						asoc->size_on_all_streams = 0;
1268 #endif
1269 					}
1270 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1271 					control->on_strm_q = 0;
1272 				}
1273 				ret++;
1274 			}
1275 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1276 				/* A singleton now slipping through - mark it non-revokable too */
1277 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1278 			} else if (control->end_added == 0) {
1279 				/* Check if we can defer adding until its all there */
1280 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1281 					/* Don't need it or cannot add more (one being delivered that way) */
1282 					goto out;
1283 				}
1284 			}
1285 			done = (control->end_added) && (control->last_frag_seen);
1286 			if (control->on_read_q == 0) {
1287 				if (!done) {
1288 					if (asoc->size_on_all_streams >= control->length) {
1289 						asoc->size_on_all_streams -= control->length;
1290 					} else {
1291 #ifdef INVARIANTS
1292 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1293 #else
1294 						asoc->size_on_all_streams = 0;
1295 #endif
1296 					}
1297 					strm->pd_api_started = 1;
1298 					control->pdapi_started = 1;
1299 				}
1300 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1301 						  control,
1302 						  &stcb->sctp_socket->so_rcv, control->end_added,
1303 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1304 			}
1305 			strm->last_mid_delivered = next_to_del;
1306 			if (done) {
1307 				control = nctl;
1308 				goto deliver_more;
1309 			}
1310 		}
1311 	}
1312 out:
1313 	return (ret);
1314 }
1315 
1316 
1317 uint32_t
sctp_add_chk_to_control(struct sctp_queued_to_read * control,struct sctp_stream_in * strm,struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int hold_rlock)1318 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1319 			struct sctp_stream_in *strm,
1320 			struct sctp_tcb *stcb, struct sctp_association *asoc,
1321 			struct sctp_tmit_chunk *chk, int hold_rlock)
1322 {
1323 	/*
1324 	 * Given a control and a chunk, merge the
1325 	 * data from the chk onto the control and free
1326 	 * up the chunk resources.
1327 	 */
1328 	uint32_t added=0;
1329 	int i_locked = 0;
1330 
1331 	if (control->on_read_q && (hold_rlock == 0)) {
1332 		/*
1333 		 * Its being pd-api'd so we must
1334 		 * do some locks.
1335 		 */
1336 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1337 		i_locked = 1;
1338 	}
1339 	if (control->data == NULL) {
1340 		control->data = chk->data;
1341 		sctp_setup_tail_pointer(control);
1342 	} else {
1343 		sctp_add_to_tail_pointer(control, chk->data, &added);
1344 	}
1345 	control->fsn_included = chk->rec.data.fsn;
1346 	asoc->size_on_reasm_queue -= chk->send_size;
1347 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1348 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1349 	chk->data = NULL;
1350 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1351 		control->first_frag_seen = 1;
1352 		control->sinfo_tsn = chk->rec.data.tsn;
1353 		control->sinfo_ppid = chk->rec.data.ppid;
1354 	}
1355 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1356 		/* Its complete */
1357 		if ((control->on_strm_q) && (control->on_read_q)) {
1358 			if (control->pdapi_started) {
1359 				control->pdapi_started = 0;
1360 				strm->pd_api_started = 0;
1361 			}
1362 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1363 				/* Unordered */
1364 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1365 				control->on_strm_q = 0;
1366 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1367 				/* Ordered */
1368 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1369 				/*
1370 				 * Don't need to decrement size_on_all_streams,
1371 				 * since control is on the read queue.
1372 				 */
1373 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1374 				control->on_strm_q = 0;
1375 #ifdef INVARIANTS
1376 			} else if (control->on_strm_q) {
1377 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1378 				      control->on_strm_q);
1379 #endif
1380 			}
1381 		}
1382 		control->end_added = 1;
1383 		control->last_frag_seen = 1;
1384 	}
1385 	if (i_locked) {
1386 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1387 	}
1388 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1389 	return (added);
1390 }
1391 
1392 /*
1393  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1394  * queue, see if anthing can be delivered. If so pull it off (or as much as
1395  * we can. If we run out of space then we must dump what we can and set the
1396  * appropriate flag to say we queued what we could.
1397  */
1398 static void
sctp_queue_data_for_reasm(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int created_control,int * abort_flag,uint32_t tsn)1399 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1400 			  struct sctp_queued_to_read *control,
1401 			  struct sctp_tmit_chunk *chk,
1402 			  int created_control,
1403 			  int *abort_flag, uint32_t tsn)
1404 {
1405 	uint32_t next_fsn;
1406 	struct sctp_tmit_chunk *at, *nat;
1407 	struct sctp_stream_in *strm;
1408 	int do_wakeup, unordered;
1409 	uint32_t lenadded;
1410 
1411 	strm = &asoc->strmin[control->sinfo_stream];
1412 	/*
1413 	 * For old un-ordered data chunks.
1414 	 */
1415 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1416 		unordered = 1;
1417 	} else {
1418 		unordered = 0;
1419 	}
1420 	/* Must be added to the stream-in queue */
1421 	if (created_control) {
1422 		if ((unordered == 0) || (asoc->idata_supported)) {
1423 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1424 		}
1425 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1426 			/* Duplicate SSN? */
1427 			sctp_abort_in_reasm(stcb, control, chk,
1428 					    abort_flag,
1429 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1430 			sctp_clean_up_control(stcb, control);
1431 			return;
1432 		}
1433 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1434 			/* Ok we created this control and now
1435 			 * lets validate that its legal i.e. there
1436 			 * is a B bit set, if not and we have
1437 			 * up to the cum-ack then its invalid.
1438 			 */
1439 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1440 				sctp_abort_in_reasm(stcb, control, chk,
1441 				                    abort_flag,
1442 				                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1443 				return;
1444 			}
1445 		}
1446 	}
1447 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1448 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1449 		return;
1450 	}
1451 	/*
1452 	 * Ok we must queue the chunk into the reasembly portion:
1453 	 *  o if its the first it goes to the control mbuf.
1454 	 *  o if its not first but the next in sequence it goes to the control,
1455 	 *    and each succeeding one in order also goes.
1456 	 *  o if its not in order we place it on the list in its place.
1457 	 */
1458 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1459 		/* Its the very first one. */
1460 		SCTPDBG(SCTP_DEBUG_XXX,
1461 			"chunk is a first fsn: %u becomes fsn_included\n",
1462 			chk->rec.data.fsn);
1463 		if (control->first_frag_seen) {
1464 			/*
1465 			 * Error on senders part, they either
1466 			 * sent us two data chunks with FIRST,
1467 			 * or they sent two un-ordered chunks that
1468 			 * were fragmented at the same time in the same stream.
1469 			 */
1470 			sctp_abort_in_reasm(stcb, control, chk,
1471 			                    abort_flag,
1472 			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1473 			return;
1474 		}
1475 		control->first_frag_seen = 1;
1476 		control->sinfo_ppid = chk->rec.data.ppid;
1477 		control->sinfo_tsn = chk->rec.data.tsn;
1478 		control->fsn_included = chk->rec.data.fsn;
1479 		control->data = chk->data;
1480 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1481 		chk->data = NULL;
1482 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1483 		sctp_setup_tail_pointer(control);
1484 		asoc->size_on_all_streams += control->length;
1485 	} else {
1486 		/* Place the chunk in our list */
1487 		int inserted=0;
1488 		if (control->last_frag_seen == 0) {
1489 			/* Still willing to raise highest FSN seen */
1490 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1491 				SCTPDBG(SCTP_DEBUG_XXX,
1492 					"We have a new top_fsn: %u\n",
1493 					chk->rec.data.fsn);
1494 				control->top_fsn = chk->rec.data.fsn;
1495 			}
1496 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1497 				SCTPDBG(SCTP_DEBUG_XXX,
1498 					"The last fsn is now in place fsn: %u\n",
1499 					chk->rec.data.fsn);
1500 				control->last_frag_seen = 1;
1501 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1502 					SCTPDBG(SCTP_DEBUG_XXX,
1503 						"New fsn: %u is not at top_fsn: %u -- abort\n",
1504 						chk->rec.data.fsn,
1505 						control->top_fsn);
1506 					sctp_abort_in_reasm(stcb, control, chk,
1507 							    abort_flag,
1508 							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1509 					return;
1510 				}
1511 			}
1512 			if (asoc->idata_supported || control->first_frag_seen) {
1513 				/*
1514 				 * For IDATA we always check since we know that
1515 				 * the first fragment is 0. For old DATA we have
1516 				 * to receive the first before we know the first FSN
1517 				 * (which is the TSN).
1518 				 */
1519 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1520 					/* We have already delivered up to this so its a dup */
1521 					sctp_abort_in_reasm(stcb, control, chk,
1522 							    abort_flag,
1523 							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1524 					return;
1525 				}
1526 			}
1527 		} else {
1528 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1529 				/* Second last? huh? */
1530 				SCTPDBG(SCTP_DEBUG_XXX,
1531 					"Duplicate last fsn: %u (top: %u) -- abort\n",
1532 					chk->rec.data.fsn, control->top_fsn);
1533 				sctp_abort_in_reasm(stcb, control,
1534 						    chk, abort_flag,
1535 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1536 				return;
1537 			}
1538 			if (asoc->idata_supported || control->first_frag_seen) {
1539 				/*
1540 				 * For IDATA we always check since we know that
1541 				 * the first fragment is 0. For old DATA we have
1542 				 * to receive the first before we know the first FSN
1543 				 * (which is the TSN).
1544 				 */
1545 
1546 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1547 					/* We have already delivered up to this so its a dup */
1548 					SCTPDBG(SCTP_DEBUG_XXX,
1549 						"New fsn: %u is already seen in included_fsn: %u -- abort\n",
1550 						chk->rec.data.fsn, control->fsn_included);
1551 					sctp_abort_in_reasm(stcb, control, chk,
1552 							    abort_flag,
1553 							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1554 					return;
1555 				}
1556 			}
1557 			/* validate not beyond top FSN if we have seen last one */
1558 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1559 				SCTPDBG(SCTP_DEBUG_XXX,
1560 					"New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1561 					chk->rec.data.fsn,
1562 					control->top_fsn);
1563 				sctp_abort_in_reasm(stcb, control, chk,
1564 						    abort_flag,
1565 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1566 				return;
1567 			}
1568 		}
1569 		/*
1570 		 * If we reach here, we need to place the
1571 		 * new chunk in the reassembly for this
1572 		 * control.
1573 		 */
1574 		SCTPDBG(SCTP_DEBUG_XXX,
1575 			"chunk is a not first fsn: %u needs to be inserted\n",
1576 			chk->rec.data.fsn);
1577 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1578 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1579 				if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1580 					/* Last not at the end? huh? */
1581 					SCTPDBG(SCTP_DEBUG_XXX,
1582 					        "Last fragment not last in list: -- abort\n");
1583 					sctp_abort_in_reasm(stcb, control,
1584 					                    chk, abort_flag,
1585 					                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1586 					return;
1587 				}
1588 				/*
1589 				 * This one in queue is bigger than the new one, insert
1590 				 * the new one before at.
1591 				 */
1592 				SCTPDBG(SCTP_DEBUG_XXX,
1593 					"Insert it before fsn: %u\n",
1594 					at->rec.data.fsn);
1595 				asoc->size_on_reasm_queue += chk->send_size;
1596 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1597 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1598 				inserted = 1;
1599 				break;
1600 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1601 				/* Gak, He sent me a duplicate str seq number */
1602 				/*
1603 				 * foo bar, I guess I will just free this new guy,
1604 				 * should we abort too? FIX ME MAYBE? Or it COULD be
1605 				 * that the SSN's have wrapped. Maybe I should
1606 				 * compare to TSN somehow... sigh for now just blow
1607 				 * away the chunk!
1608 				 */
1609 				SCTPDBG(SCTP_DEBUG_XXX,
1610 					"Duplicate to fsn: %u -- abort\n",
1611 					at->rec.data.fsn);
1612 				sctp_abort_in_reasm(stcb, control,
1613 						    chk, abort_flag,
1614 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1615 				return;
1616 			}
1617 		}
1618 		if (inserted == 0) {
1619 			/* Goes on the end */
1620 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1621 				chk->rec.data.fsn);
1622 			asoc->size_on_reasm_queue += chk->send_size;
1623 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1624 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1625 		}
1626 	}
1627 	/*
1628 	 * Ok lets see if we can suck any up into the control
1629 	 * structure that are in seq if it makes sense.
1630 	 */
1631 	do_wakeup = 0;
1632 	/*
1633 	 * If the first fragment has not been
1634 	 * seen there is no sense in looking.
1635 	 */
1636 	if (control->first_frag_seen) {
1637 		next_fsn = control->fsn_included + 1;
1638 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1639 			if (at->rec.data.fsn == next_fsn) {
1640 				/* We can add this one now to the control */
1641 				SCTPDBG(SCTP_DEBUG_XXX,
1642 					"Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1643 					control, at,
1644 					at->rec.data.fsn,
1645 					next_fsn, control->fsn_included);
1646 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1647 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1648 				if (control->on_read_q) {
1649 					do_wakeup = 1;
1650 				} else {
1651 					/*
1652 					 * We only add to the size-on-all-streams
1653 					 * if its not on the read q. The read q
1654 					 * flag will cause a sballoc so its accounted
1655 					 * for there.
1656 					 */
1657 					asoc->size_on_all_streams += lenadded;
1658 				}
1659 				next_fsn++;
1660 				if (control->end_added && control->pdapi_started) {
1661 					if (strm->pd_api_started) {
1662 						strm->pd_api_started = 0;
1663 						control->pdapi_started = 0;
1664 					}
1665 					if (control->on_read_q == 0) {
1666 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1667 								  control,
1668 								  &stcb->sctp_socket->so_rcv, control->end_added,
1669 								  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1670 					}
1671 					break;
1672 				}
1673 			} else {
1674 				break;
1675 			}
1676 		}
1677 	}
1678 	if (do_wakeup) {
1679 #if defined(__Userspace__)
1680 		sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1681 #endif
1682 		/* Need to wakeup the reader */
1683 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1684 	}
1685 }
1686 
1687 static struct sctp_queued_to_read *
sctp_find_reasm_entry(struct sctp_stream_in * strm,uint32_t mid,int ordered,int idata_supported)1688 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1689 {
1690 	struct sctp_queued_to_read *control;
1691 
1692 	if (ordered) {
1693 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1694 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1695 				break;
1696 			}
1697 		}
1698 	} else {
1699 		if (idata_supported) {
1700 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1701 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1702 					break;
1703 				}
1704 			}
1705 		} else {
1706 			control = TAILQ_FIRST(&strm->uno_inqueue);
1707 		}
1708 	}
1709 	return (control);
1710 }
1711 
1712 static int
sctp_process_a_data_chunk(struct sctp_tcb * stcb,struct sctp_association * asoc,struct mbuf ** m,int offset,int chk_length,struct sctp_nets * net,uint32_t * high_tsn,int * abort_flag,int * break_flag,int last_chunk,uint8_t chk_type)1713 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1714 			  struct mbuf **m, int offset,  int chk_length,
1715 			  struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1716 			  int *break_flag, int last_chunk, uint8_t chk_type)
1717 {
1718 	struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1719 	struct sctp_stream_in *strm;
1720 	uint32_t tsn, fsn, gap, mid;
1721 	struct mbuf *dmbuf;
1722 	int the_len;
1723 	int need_reasm_check = 0;
1724 	uint16_t sid;
1725 	struct mbuf *op_err;
1726 	char msg[SCTP_DIAG_INFO_LEN];
1727 	struct sctp_queued_to_read *control, *ncontrol;
1728 	uint32_t ppid;
1729 	uint8_t chk_flags;
1730 	struct sctp_stream_reset_list *liste;
1731 	int ordered;
1732 	size_t clen;
1733 	int created_control = 0;
1734 
1735 	if (chk_type == SCTP_IDATA) {
1736 		struct sctp_idata_chunk *chunk, chunk_buf;
1737 
1738 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1739 		                                                 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1740 		chk_flags = chunk->ch.chunk_flags;
1741 		clen = sizeof(struct sctp_idata_chunk);
1742 		tsn = ntohl(chunk->dp.tsn);
1743 		sid = ntohs(chunk->dp.sid);
1744 		mid = ntohl(chunk->dp.mid);
1745 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1746 			fsn = 0;
1747 			ppid = chunk->dp.ppid_fsn.ppid;
1748 		} else {
1749 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1750 			ppid = 0xffffffff; /* Use as an invalid value. */
1751 		}
1752 	} else {
1753 		struct sctp_data_chunk *chunk, chunk_buf;
1754 
1755 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1756 		                                                sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1757 		chk_flags = chunk->ch.chunk_flags;
1758 		clen = sizeof(struct sctp_data_chunk);
1759 		tsn = ntohl(chunk->dp.tsn);
1760 		sid = ntohs(chunk->dp.sid);
1761 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1762 		fsn = tsn;
1763 		ppid = chunk->dp.ppid;
1764 	}
1765 	if ((size_t)chk_length == clen) {
1766 		/*
1767 		 * Need to send an abort since we had a
1768 		 * empty data chunk.
1769 		 */
1770 		op_err = sctp_generate_no_user_data_cause(tsn);
1771 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1772 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1773 		*abort_flag = 1;
1774 		return (0);
1775 	}
1776 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1777 		asoc->send_sack = 1;
1778 	}
1779 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1780 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1781 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1782 	}
1783 	if (stcb == NULL) {
1784 		return (0);
1785 	}
1786 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1787 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1788 		/* It is a duplicate */
1789 		SCTP_STAT_INCR(sctps_recvdupdata);
1790 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1791 			/* Record a dup for the next outbound sack */
1792 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1793 			asoc->numduptsns++;
1794 		}
1795 		asoc->send_sack = 1;
1796 		return (0);
1797 	}
1798 	/* Calculate the number of TSN's between the base and this TSN */
1799 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1800 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1801 		/* Can't hold the bit in the mapping at max array, toss it */
1802 		return (0);
1803 	}
1804 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1805 		SCTP_TCB_LOCK_ASSERT(stcb);
1806 		if (sctp_expand_mapping_array(asoc, gap)) {
1807 			/* Can't expand, drop it */
1808 			return (0);
1809 		}
1810 	}
1811 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1812 		*high_tsn = tsn;
1813 	}
1814 	/* See if we have received this one already */
1815 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1816 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1817 		SCTP_STAT_INCR(sctps_recvdupdata);
1818 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1819 			/* Record a dup for the next outbound sack */
1820 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1821 			asoc->numduptsns++;
1822 		}
1823 		asoc->send_sack = 1;
1824 		return (0);
1825 	}
1826 	/*
1827 	 * Check to see about the GONE flag, duplicates would cause a sack
1828 	 * to be sent up above
1829 	 */
1830 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1831 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1832 	     (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1833 		/*
1834 		 * wait a minute, this guy is gone, there is no longer a
1835 		 * receiver. Send peer an ABORT!
1836 		 */
1837 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1838 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1839 		*abort_flag = 1;
1840 		return (0);
1841 	}
1842 	/*
1843 	 * Now before going further we see if there is room. If NOT then we
1844 	 * MAY let one through only IF this TSN is the one we are waiting
1845 	 * for on a partial delivery API.
1846 	 */
1847 
1848 	/* Is the stream valid? */
1849 	if (sid >= asoc->streamincnt) {
1850 		struct sctp_error_invalid_stream *cause;
1851 
1852 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1853 		                               0, M_NOWAIT, 1, MT_DATA);
1854 		if (op_err != NULL) {
1855 			/* add some space up front so prepend will work well */
1856 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1857 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1858 			/*
1859 			 * Error causes are just param's and this one has
1860 			 * two back to back phdr, one with the error type
1861 			 * and size, the other with the streamid and a rsvd
1862 			 */
1863 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1864 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1865 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1866 			cause->stream_id = htons(sid);
1867 			cause->reserved = htons(0);
1868 			sctp_queue_op_err(stcb, op_err);
1869 		}
1870 		SCTP_STAT_INCR(sctps_badsid);
1871 		SCTP_TCB_LOCK_ASSERT(stcb);
1872 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1873 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1874 			asoc->highest_tsn_inside_nr_map = tsn;
1875 		}
1876 		if (tsn == (asoc->cumulative_tsn + 1)) {
1877 			/* Update cum-ack */
1878 			asoc->cumulative_tsn = tsn;
1879 		}
1880 		return (0);
1881 	}
1882 	/*
1883 	 * If its a fragmented message, lets see if we can
1884 	 * find the control on the reassembly queues.
1885 	 */
1886 	if ((chk_type == SCTP_IDATA) &&
1887 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1888 	    (fsn == 0)) {
1889 		/*
1890 		 *  The first *must* be fsn 0, and other
1891 		 *  (middle/end) pieces can *not* be fsn 0.
1892 		 * XXX: This can happen in case of a wrap around.
1893 		 *      Ignore is for now.
1894 		 */
1895 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1896 		goto err_out;
1897 	}
1898 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1899 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1900 		chk_flags, control);
1901 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1902 		/* See if we can find the re-assembly entity */
1903 		if (control != NULL) {
1904 			/* We found something, does it belong? */
1905 			if (ordered && (mid != control->mid)) {
1906 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1907 			err_out:
1908 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1909 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1910 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1911 				*abort_flag = 1;
1912 				return (0);
1913 			}
1914 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1915 				/* We can't have a switched order with an unordered chunk */
1916 				SCTP_SNPRINTF(msg, sizeof(msg),
1917 				              "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1918 				              tsn);
1919 				goto err_out;
1920 			}
1921 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1922 				/* We can't have a switched unordered with a ordered chunk */
1923 				SCTP_SNPRINTF(msg, sizeof(msg),
1924 				             "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1925 				             tsn);
1926 				goto err_out;
1927 			}
1928 		}
1929 	} else {
1930 		/* Its a complete segment. Lets validate we
1931 		 * don't have a re-assembly going on with
1932 		 * the same Stream/Seq (for ordered) or in
1933 		 * the same Stream for unordered.
1934 		 */
1935 		if (control != NULL) {
1936 			if (ordered || asoc->idata_supported) {
1937 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1938 					chk_flags, mid);
1939 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1940 				goto err_out;
1941 			} else {
1942 				if ((tsn == control->fsn_included + 1) &&
1943 				    (control->end_added == 0)) {
1944 					SCTP_SNPRINTF(msg, sizeof(msg),
1945 					              "Illegal message sequence, missing end for MID: %8.8x",
1946 					              control->fsn_included);
1947 					goto err_out;
1948 				} else {
1949 					control = NULL;
1950 				}
1951 			}
1952 		}
1953 	}
1954 	/* now do the tests */
1955 	if (((asoc->cnt_on_all_streams +
1956 	      asoc->cnt_on_reasm_queue +
1957 	      asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1958 	    (((int)asoc->my_rwnd) <= 0)) {
1959 		/*
1960 		 * When we have NO room in the rwnd we check to make sure
1961 		 * the reader is doing its job...
1962 		 */
1963 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1964 			/* some to read, wake-up */
1965 #if defined(__APPLE__) && !defined(__Userspace__)
1966 			struct socket *so;
1967 
1968 			so = SCTP_INP_SO(stcb->sctp_ep);
1969 			atomic_add_int(&stcb->asoc.refcnt, 1);
1970 			SCTP_TCB_UNLOCK(stcb);
1971 			SCTP_SOCKET_LOCK(so, 1);
1972 			SCTP_TCB_LOCK(stcb);
1973 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1974 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1975 				/* assoc was freed while we were unlocked */
1976 				SCTP_SOCKET_UNLOCK(so, 1);
1977 				return (0);
1978 			}
1979 #endif
1980 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1981 #if defined(__APPLE__) && !defined(__Userspace__)
1982 			SCTP_SOCKET_UNLOCK(so, 1);
1983 #endif
1984 		}
1985 		/* now is it in the mapping array of what we have accepted? */
1986 		if (chk_type == SCTP_DATA) {
1987 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1988 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1989 				/* Nope not in the valid range dump it */
1990 			dump_packet:
1991 				sctp_set_rwnd(stcb, asoc);
1992 				if ((asoc->cnt_on_all_streams +
1993 				     asoc->cnt_on_reasm_queue +
1994 				     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1995 					SCTP_STAT_INCR(sctps_datadropchklmt);
1996 				} else {
1997 					SCTP_STAT_INCR(sctps_datadroprwnd);
1998 				}
1999 				*break_flag = 1;
2000 				return (0);
2001 			}
2002 		} else {
2003 			if (control == NULL) {
2004 				goto dump_packet;
2005 			}
2006 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
2007 				goto dump_packet;
2008 			}
2009 		}
2010 	}
2011 #ifdef SCTP_ASOCLOG_OF_TSNS
2012 	SCTP_TCB_LOCK_ASSERT(stcb);
2013 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2014 		asoc->tsn_in_at = 0;
2015 		asoc->tsn_in_wrapped = 1;
2016 	}
2017 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2018 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2019 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2020 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2021 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2022 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2023 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2024 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2025 	asoc->tsn_in_at++;
2026 #endif
2027 	/*
2028 	 * Before we continue lets validate that we are not being fooled by
2029 	 * an evil attacker. We can only have Nk chunks based on our TSN
2030 	 * spread allowed by the mapping array N * 8 bits, so there is no
2031 	 * way our stream sequence numbers could have wrapped. We of course
2032 	 * only validate the FIRST fragment so the bit must be set.
2033 	 */
2034 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2035 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2036 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2037 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2038 		/* The incoming sseq is behind where we last delivered? */
2039 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2040 			mid, asoc->strmin[sid].last_mid_delivered);
2041 
2042 		if (asoc->idata_supported) {
2043 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2044 			              asoc->strmin[sid].last_mid_delivered,
2045 			              tsn,
2046 			              sid,
2047 			              mid);
2048 		} else {
2049 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2050 			              (uint16_t)asoc->strmin[sid].last_mid_delivered,
2051 			              tsn,
2052 			              sid,
2053 			              (uint16_t)mid);
2054 		}
2055 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2056 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2057 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2058 		*abort_flag = 1;
2059 		return (0);
2060 	}
2061 	if (chk_type == SCTP_IDATA) {
2062 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2063 	} else {
2064 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2065 	}
2066 	if (last_chunk == 0) {
2067 		if (chk_type == SCTP_IDATA) {
2068 			dmbuf = SCTP_M_COPYM(*m,
2069 					     (offset + sizeof(struct sctp_idata_chunk)),
2070 					     the_len, M_NOWAIT);
2071 		} else {
2072 			dmbuf = SCTP_M_COPYM(*m,
2073 					     (offset + sizeof(struct sctp_data_chunk)),
2074 					     the_len, M_NOWAIT);
2075 		}
2076 #ifdef SCTP_MBUF_LOGGING
2077 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2078 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2079 		}
2080 #endif
2081 	} else {
2082 		/* We can steal the last chunk */
2083 		int l_len;
2084 		dmbuf = *m;
2085 		/* lop off the top part */
2086 		if (chk_type == SCTP_IDATA) {
2087 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2088 		} else {
2089 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2090 		}
2091 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2092 			l_len = SCTP_BUF_LEN(dmbuf);
2093 		} else {
2094 			/* need to count up the size hopefully
2095 			 * does not hit this to often :-0
2096 			 */
2097 			struct mbuf *lat;
2098 
2099 			l_len = 0;
2100 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2101 				l_len += SCTP_BUF_LEN(lat);
2102 			}
2103 		}
2104 		if (l_len > the_len) {
2105 			/* Trim the end round bytes off  too */
2106 			m_adj(dmbuf, -(l_len - the_len));
2107 		}
2108 	}
2109 	if (dmbuf == NULL) {
2110 		SCTP_STAT_INCR(sctps_nomem);
2111 		return (0);
2112 	}
2113 	/*
2114 	 * Now no matter what, we need a control, get one
2115 	 * if we don't have one (we may have gotten it
2116 	 * above when we found the message was fragmented
2117 	 */
2118 	if (control == NULL) {
2119 		sctp_alloc_a_readq(stcb, control);
2120 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2121 					   ppid,
2122 					   sid,
2123 					   chk_flags,
2124 					   NULL, fsn, mid);
2125 		if (control == NULL) {
2126 			SCTP_STAT_INCR(sctps_nomem);
2127 			return (0);
2128 		}
2129 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2130 			struct mbuf *mm;
2131 
2132 			control->data = dmbuf;
2133 			control->tail_mbuf = NULL;
2134 			for (mm = control->data; mm; mm = mm->m_next) {
2135 				control->length += SCTP_BUF_LEN(mm);
2136 				if (SCTP_BUF_NEXT(mm) == NULL) {
2137 					control->tail_mbuf = mm;
2138 				}
2139 			}
2140 			control->end_added = 1;
2141 			control->last_frag_seen = 1;
2142 			control->first_frag_seen = 1;
2143 			control->fsn_included = fsn;
2144 			control->top_fsn = fsn;
2145 		}
2146 		created_control = 1;
2147 	}
2148 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2149 		chk_flags, ordered, mid, control);
2150 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2151 	    TAILQ_EMPTY(&asoc->resetHead) &&
2152 	    ((ordered == 0) ||
2153 	     (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2154 	      TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2155 		/* Candidate for express delivery */
2156 		/*
2157 		 * Its not fragmented, No PD-API is up, Nothing in the
2158 		 * delivery queue, Its un-ordered OR ordered and the next to
2159 		 * deliver AND nothing else is stuck on the stream queue,
2160 		 * And there is room for it in the socket buffer. Lets just
2161 		 * stuff it up the buffer....
2162 		 */
2163 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2164 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2165 			asoc->highest_tsn_inside_nr_map = tsn;
2166 		}
2167 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2168 			control, mid);
2169 
2170 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2171 		                  control, &stcb->sctp_socket->so_rcv,
2172 		                  1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2173 
2174 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2175 			/* for ordered, bump what we delivered */
2176 			asoc->strmin[sid].last_mid_delivered++;
2177 		}
2178 		SCTP_STAT_INCR(sctps_recvexpress);
2179 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2180 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2181 					      SCTP_STR_LOG_FROM_EXPRS_DEL);
2182 		}
2183 		control = NULL;
2184 		goto finish_express_del;
2185 	}
2186 
2187 	/* Now will we need a chunk too? */
2188 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2189 		sctp_alloc_a_chunk(stcb, chk);
2190 		if (chk == NULL) {
2191 			/* No memory so we drop the chunk */
2192 			SCTP_STAT_INCR(sctps_nomem);
2193 			if (last_chunk == 0) {
2194 				/* we copied it, free the copy */
2195 				sctp_m_freem(dmbuf);
2196 			}
2197 			return (0);
2198 		}
2199 		chk->rec.data.tsn = tsn;
2200 		chk->no_fr_allowed = 0;
2201 		chk->rec.data.fsn = fsn;
2202 		chk->rec.data.mid = mid;
2203 		chk->rec.data.sid = sid;
2204 		chk->rec.data.ppid = ppid;
2205 		chk->rec.data.context = stcb->asoc.context;
2206 		chk->rec.data.doing_fast_retransmit = 0;
2207 		chk->rec.data.rcv_flags = chk_flags;
2208 		chk->asoc = asoc;
2209 		chk->send_size = the_len;
2210 		chk->whoTo = net;
2211 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2212 			chk,
2213 			control, mid);
2214 		atomic_add_int(&net->ref_count, 1);
2215 		chk->data = dmbuf;
2216 	}
2217 	/* Set the appropriate TSN mark */
2218 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2219 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2220 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2221 			asoc->highest_tsn_inside_nr_map = tsn;
2222 		}
2223 	} else {
2224 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2225 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2226 			asoc->highest_tsn_inside_map = tsn;
2227 		}
2228 	}
2229 	/* Now is it complete (i.e. not fragmented)? */
2230 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2231 		/*
2232 		 * Special check for when streams are resetting. We
2233 		 * could be more smart about this and check the
2234 		 * actual stream to see if it is not being reset..
2235 		 * that way we would not create a HOLB when amongst
2236 		 * streams being reset and those not being reset.
2237 		 *
2238 		 */
2239 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2240 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2241 			/*
2242 			 * yep its past where we need to reset... go
2243 			 * ahead and queue it.
2244 			 */
2245 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2246 				/* first one on */
2247 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2248 			} else {
2249 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2250 				unsigned char inserted = 0;
2251 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2252 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2253 
2254 						continue;
2255 					} else {
2256 						/* found it */
2257 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2258 						inserted = 1;
2259 						break;
2260 					}
2261 				}
2262 				if (inserted == 0) {
2263 					/*
2264 					 * must be put at end, use
2265 					 * prevP (all setup from
2266 					 * loop) to setup nextP.
2267 					 */
2268 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2269 				}
2270 			}
2271 			goto finish_express_del;
2272 		}
2273 		if (chk_flags & SCTP_DATA_UNORDERED) {
2274 			/* queue directly into socket buffer */
2275 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2276 				control, mid);
2277 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2278 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2279 			                  control,
2280 			                  &stcb->sctp_socket->so_rcv, 1,
2281 			                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2282 
2283 		} else {
2284 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2285 				mid);
2286 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2287 			if (*abort_flag) {
2288 				if (last_chunk) {
2289 					*m = NULL;
2290 				}
2291 				return (0);
2292 			}
2293 		}
2294 		goto finish_express_del;
2295 	}
2296 	/* If we reach here its a reassembly */
2297 	need_reasm_check = 1;
2298 	SCTPDBG(SCTP_DEBUG_XXX,
2299 		"Queue data to stream for reasm control: %p MID: %u\n",
2300 		control, mid);
2301 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2302 	if (*abort_flag) {
2303 		/*
2304 		 * the assoc is now gone and chk was put onto the
2305 		 * reasm queue, which has all been freed.
2306 		 */
2307 		if (last_chunk) {
2308 			*m = NULL;
2309 		}
2310 		return (0);
2311 	}
2312 finish_express_del:
2313 	/* Here we tidy up things */
2314 	if (tsn == (asoc->cumulative_tsn + 1)) {
2315 		/* Update cum-ack */
2316 		asoc->cumulative_tsn = tsn;
2317 	}
2318 	if (last_chunk) {
2319 		*m = NULL;
2320 	}
2321 	if (ordered) {
2322 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2323 	} else {
2324 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2325 	}
2326 	SCTP_STAT_INCR(sctps_recvdata);
2327 	/* Set it present please */
2328 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2329 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2330 	}
2331 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2332 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2333 			     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2334 	}
2335 	if (need_reasm_check) {
2336 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2337 		need_reasm_check = 0;
2338 	}
2339 	/* check the special flag for stream resets */
2340 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2341 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2342 		/*
2343 		 * we have finished working through the backlogged TSN's now
2344 		 * time to reset streams. 1: call reset function. 2: free
2345 		 * pending_reply space 3: distribute any chunks in
2346 		 * pending_reply_queue.
2347 		 */
2348 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2349 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2350 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2351 		SCTP_FREE(liste, SCTP_M_STRESET);
2352 		/*sa_ignore FREED_MEMORY*/
2353 		liste = TAILQ_FIRST(&asoc->resetHead);
2354 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2355 			/* All can be removed */
2356 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2357 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2358 				strm = &asoc->strmin[control->sinfo_stream];
2359 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2360 				if (*abort_flag) {
2361 					return (0);
2362 				}
2363 				if (need_reasm_check) {
2364 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2365 					need_reasm_check = 0;
2366 				}
2367 			}
2368 		} else {
2369 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2370 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2371 					break;
2372 				}
2373 				/*
2374 				 * if control->sinfo_tsn is <= liste->tsn we can
2375 				 * process it which is the NOT of
2376 				 * control->sinfo_tsn > liste->tsn
2377 				 */
2378 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2379 				strm = &asoc->strmin[control->sinfo_stream];
2380 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2381 				if (*abort_flag) {
2382 					return (0);
2383 				}
2384 				if (need_reasm_check) {
2385 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2386 					need_reasm_check = 0;
2387 				}
2388 			}
2389 		}
2390 	}
2391 	return (1);
2392 }
2393 
2394 static const int8_t sctp_map_lookup_tab[256] = {
2395   0, 1, 0, 2, 0, 1, 0, 3,
2396   0, 1, 0, 2, 0, 1, 0, 4,
2397   0, 1, 0, 2, 0, 1, 0, 3,
2398   0, 1, 0, 2, 0, 1, 0, 5,
2399   0, 1, 0, 2, 0, 1, 0, 3,
2400   0, 1, 0, 2, 0, 1, 0, 4,
2401   0, 1, 0, 2, 0, 1, 0, 3,
2402   0, 1, 0, 2, 0, 1, 0, 6,
2403   0, 1, 0, 2, 0, 1, 0, 3,
2404   0, 1, 0, 2, 0, 1, 0, 4,
2405   0, 1, 0, 2, 0, 1, 0, 3,
2406   0, 1, 0, 2, 0, 1, 0, 5,
2407   0, 1, 0, 2, 0, 1, 0, 3,
2408   0, 1, 0, 2, 0, 1, 0, 4,
2409   0, 1, 0, 2, 0, 1, 0, 3,
2410   0, 1, 0, 2, 0, 1, 0, 7,
2411   0, 1, 0, 2, 0, 1, 0, 3,
2412   0, 1, 0, 2, 0, 1, 0, 4,
2413   0, 1, 0, 2, 0, 1, 0, 3,
2414   0, 1, 0, 2, 0, 1, 0, 5,
2415   0, 1, 0, 2, 0, 1, 0, 3,
2416   0, 1, 0, 2, 0, 1, 0, 4,
2417   0, 1, 0, 2, 0, 1, 0, 3,
2418   0, 1, 0, 2, 0, 1, 0, 6,
2419   0, 1, 0, 2, 0, 1, 0, 3,
2420   0, 1, 0, 2, 0, 1, 0, 4,
2421   0, 1, 0, 2, 0, 1, 0, 3,
2422   0, 1, 0, 2, 0, 1, 0, 5,
2423   0, 1, 0, 2, 0, 1, 0, 3,
2424   0, 1, 0, 2, 0, 1, 0, 4,
2425   0, 1, 0, 2, 0, 1, 0, 3,
2426   0, 1, 0, 2, 0, 1, 0, 8
2427 };
2428 
2429 
2430 void
sctp_slide_mapping_arrays(struct sctp_tcb * stcb)2431 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2432 {
2433 	/*
2434 	 * Now we also need to check the mapping array in a couple of ways.
2435 	 * 1) Did we move the cum-ack point?
2436 	 *
2437 	 * When you first glance at this you might think
2438 	 * that all entries that make up the position
2439 	 * of the cum-ack would be in the nr-mapping array
2440 	 * only.. i.e. things up to the cum-ack are always
2441 	 * deliverable. Thats true with one exception, when
2442 	 * its a fragmented message we may not deliver the data
2443 	 * until some threshold (or all of it) is in place. So
2444 	 * we must OR the nr_mapping_array and mapping_array to
2445 	 * get a true picture of the cum-ack.
2446 	 */
2447 	struct sctp_association *asoc;
2448 	int at;
2449 	uint8_t val;
2450 	int slide_from, slide_end, lgap, distance;
2451 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2452 
2453 	asoc = &stcb->asoc;
2454 
2455 	old_cumack = asoc->cumulative_tsn;
2456 	old_base = asoc->mapping_array_base_tsn;
2457 	old_highest = asoc->highest_tsn_inside_map;
2458 	/*
2459 	 * We could probably improve this a small bit by calculating the
2460 	 * offset of the current cum-ack as the starting point.
2461 	 */
2462 	at = 0;
2463 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2464 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2465 		if (val == 0xff) {
2466 			at += 8;
2467 		} else {
2468 			/* there is a 0 bit */
2469 			at += sctp_map_lookup_tab[val];
2470 			break;
2471 		}
2472 	}
2473 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2474 
2475 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2476             SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2477 #ifdef INVARIANTS
2478 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2479 		      asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2480 #else
2481 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2482 			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2483 		sctp_print_mapping_array(asoc);
2484 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2485 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2486 		}
2487 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2488 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2489 #endif
2490 	}
2491 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2492 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2493 	} else {
2494 		highest_tsn = asoc->highest_tsn_inside_map;
2495 	}
2496 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2497 		/* The complete array was completed by a single FR */
2498 		/* highest becomes the cum-ack */
2499 		int clr;
2500 #ifdef INVARIANTS
2501 		unsigned int i;
2502 #endif
2503 
2504 		/* clear the array */
2505 		clr = ((at+7) >> 3);
2506 		if (clr > asoc->mapping_array_size) {
2507 			clr = asoc->mapping_array_size;
2508 		}
2509 		memset(asoc->mapping_array, 0, clr);
2510 		memset(asoc->nr_mapping_array, 0, clr);
2511 #ifdef INVARIANTS
2512 		for (i = 0; i < asoc->mapping_array_size; i++) {
2513 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2514 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2515 				sctp_print_mapping_array(asoc);
2516 			}
2517 		}
2518 #endif
2519 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2520 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2521 	} else if (at >= 8) {
2522 		/* we can slide the mapping array down */
2523 		/* slide_from holds where we hit the first NON 0xff byte */
2524 
2525 		/*
2526 		 * now calculate the ceiling of the move using our highest
2527 		 * TSN value
2528 		 */
2529 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2530 		slide_end = (lgap >> 3);
2531 		if (slide_end < slide_from) {
2532 			sctp_print_mapping_array(asoc);
2533 #ifdef INVARIANTS
2534 			panic("impossible slide");
2535 #else
2536 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2537 			            lgap, slide_end, slide_from, at);
2538 			return;
2539 #endif
2540 		}
2541 		if (slide_end > asoc->mapping_array_size) {
2542 #ifdef INVARIANTS
2543 			panic("would overrun buffer");
2544 #else
2545 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2546 			            asoc->mapping_array_size, slide_end);
2547 			slide_end = asoc->mapping_array_size;
2548 #endif
2549 		}
2550 		distance = (slide_end - slide_from) + 1;
2551 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2552 			sctp_log_map(old_base, old_cumack, old_highest,
2553 				     SCTP_MAP_PREPARE_SLIDE);
2554 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2555 				     (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2556 		}
2557 		if (distance + slide_from > asoc->mapping_array_size ||
2558 		    distance < 0) {
2559 			/*
2560 			 * Here we do NOT slide forward the array so that
2561 			 * hopefully when more data comes in to fill it up
2562 			 * we will be able to slide it forward. Really I
2563 			 * don't think this should happen :-0
2564 			 */
2565 
2566 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2567 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2568 					     (uint32_t) asoc->mapping_array_size,
2569 					     SCTP_MAP_SLIDE_NONE);
2570 			}
2571 		} else {
2572 			int ii;
2573 
2574 			for (ii = 0; ii < distance; ii++) {
2575 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2576 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2577 
2578 			}
2579 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2580 				asoc->mapping_array[ii] = 0;
2581 				asoc->nr_mapping_array[ii] = 0;
2582 			}
2583 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2584 				asoc->highest_tsn_inside_map += (slide_from << 3);
2585 			}
2586 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2587 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2588 			}
2589 			asoc->mapping_array_base_tsn += (slide_from << 3);
2590 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2591 				sctp_log_map(asoc->mapping_array_base_tsn,
2592 					     asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2593 					     SCTP_MAP_SLIDE_RESULT);
2594 			}
2595 		}
2596 	}
2597 }
2598 
2599 void
sctp_sack_check(struct sctp_tcb * stcb,int was_a_gap)2600 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2601 {
2602 	struct sctp_association *asoc;
2603 	uint32_t highest_tsn;
2604 	int is_a_gap;
2605 
2606 	sctp_slide_mapping_arrays(stcb);
2607 	asoc = &stcb->asoc;
2608 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2609 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2610 	} else {
2611 		highest_tsn = asoc->highest_tsn_inside_map;
2612 	}
2613 	/* Is there a gap now? */
2614 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2615 
2616 	/*
2617 	 * Now we need to see if we need to queue a sack or just start the
2618 	 * timer (if allowed).
2619 	 */
2620 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2621 		/*
2622 		 * Ok special case, in SHUTDOWN-SENT case. here we
2623 		 * maker sure SACK timer is off and instead send a
2624 		 * SHUTDOWN and a SACK
2625 		 */
2626 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2627 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2628 			                stcb->sctp_ep, stcb, NULL,
2629 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2630 		}
2631 		sctp_send_shutdown(stcb,
2632 		                   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2633 		if (is_a_gap) {
2634 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2635 		}
2636 	} else {
2637 		/*
2638 		 * CMT DAC algorithm: increase number of packets
2639 		 * received since last ack
2640 		 */
2641 		stcb->asoc.cmt_dac_pkts_rcvd++;
2642 
2643 		if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
2644 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2645 		                                         * longer is one */
2646 		    (stcb->asoc.numduptsns) ||          /* we have dup's */
2647 		    (is_a_gap) ||                       /* is still a gap */
2648 		    (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
2649 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2650 			) {
2651 
2652 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2653 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2654 			    (stcb->asoc.send_sack == 0) &&
2655 			    (stcb->asoc.numduptsns == 0) &&
2656 			    (stcb->asoc.delayed_ack) &&
2657 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2658 
2659 				/*
2660 				 * CMT DAC algorithm: With CMT,
2661 				 * delay acks even in the face of
2662 
2663 				 * reordering. Therefore, if acks
2664 				 * that do not have to be sent
2665 				 * because of the above reasons,
2666 				 * will be delayed. That is, acks
2667 				 * that would have been sent due to
2668 				 * gap reports will be delayed with
2669 				 * DAC. Start the delayed ack timer.
2670 				 */
2671 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2672 				                 stcb->sctp_ep, stcb, NULL);
2673 			} else {
2674 				/*
2675 				 * Ok we must build a SACK since the
2676 				 * timer is pending, we got our
2677 				 * first packet OR there are gaps or
2678 				 * duplicates.
2679 				 */
2680 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2681 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2682 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2683 			}
2684 		} else {
2685 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2686 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2687 				                 stcb->sctp_ep, stcb, NULL);
2688 			}
2689 		}
2690 	}
2691 }
2692 
2693 int
sctp_process_data(struct mbuf ** mm,int iphlen,int * offset,int length,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t * high_tsn)2694 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2695                   struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2696                   struct sctp_nets *net, uint32_t *high_tsn)
2697 {
2698 	struct sctp_chunkhdr *ch, chunk_buf;
2699 	struct sctp_association *asoc;
2700 	int num_chunks = 0;	/* number of control chunks processed */
2701 	int stop_proc = 0;
2702 	int break_flag, last_chunk;
2703 	int abort_flag = 0, was_a_gap;
2704 	struct mbuf *m;
2705 	uint32_t highest_tsn;
2706 	uint16_t chk_length;
2707 
2708 	/* set the rwnd */
2709 	sctp_set_rwnd(stcb, &stcb->asoc);
2710 
2711 	m = *mm;
2712 	SCTP_TCB_LOCK_ASSERT(stcb);
2713 	asoc = &stcb->asoc;
2714 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2715 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2716 	} else {
2717 		highest_tsn = asoc->highest_tsn_inside_map;
2718 	}
2719 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2720 	/*
2721 	 * setup where we got the last DATA packet from for any SACK that
2722 	 * may need to go out. Don't bump the net. This is done ONLY when a
2723 	 * chunk is assigned.
2724 	 */
2725 	asoc->last_data_chunk_from = net;
2726 
2727 	/*-
2728 	 * Now before we proceed we must figure out if this is a wasted
2729 	 * cluster... i.e. it is a small packet sent in and yet the driver
2730 	 * underneath allocated a full cluster for it. If so we must copy it
2731 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2732 	 * with cluster starvation.
2733 	 */
2734 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2735 		/* we only handle mbufs that are singletons.. not chains */
2736 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2737 		if (m) {
2738 			/* ok lets see if we can copy the data up */
2739 			caddr_t *from, *to;
2740 			/* get the pointers and copy */
2741 			to = mtod(m, caddr_t *);
2742 			from = mtod((*mm), caddr_t *);
2743 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2744 			/* copy the length and free up the old */
2745 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2746 			sctp_m_freem(*mm);
2747 			/* success, back copy */
2748 			*mm = m;
2749 		} else {
2750 			/* We are in trouble in the mbuf world .. yikes */
2751 			m = *mm;
2752 		}
2753 	}
2754 	/* get pointer to the first chunk header */
2755 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2756 	                                           sizeof(struct sctp_chunkhdr),
2757 	                                           (uint8_t *)&chunk_buf);
2758 	if (ch == NULL) {
2759 		return (1);
2760 	}
2761 	/*
2762 	 * process all DATA chunks...
2763 	 */
2764 	*high_tsn = asoc->cumulative_tsn;
2765 	break_flag = 0;
2766 	asoc->data_pkts_seen++;
2767 	while (stop_proc == 0) {
2768 		/* validate chunk length */
2769 		chk_length = ntohs(ch->chunk_length);
2770 		if (length - *offset < chk_length) {
2771 			/* all done, mutulated chunk */
2772 			stop_proc = 1;
2773 			continue;
2774 		}
2775 		if ((asoc->idata_supported == 1) &&
2776 		    (ch->chunk_type == SCTP_DATA)) {
2777 			struct mbuf *op_err;
2778 			char msg[SCTP_DIAG_INFO_LEN];
2779 
2780 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2781 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2782 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2783 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2784 			return (2);
2785 		}
2786 		if ((asoc->idata_supported == 0) &&
2787 		    (ch->chunk_type == SCTP_IDATA)) {
2788 			struct mbuf *op_err;
2789 			char msg[SCTP_DIAG_INFO_LEN];
2790 
2791 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2792 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2793 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2794 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2795 			return (2);
2796 		}
2797 		if ((ch->chunk_type == SCTP_DATA) ||
2798 		    (ch->chunk_type == SCTP_IDATA)) {
2799 			uint16_t clen;
2800 
2801 			if (ch->chunk_type == SCTP_DATA) {
2802 				clen = sizeof(struct sctp_data_chunk);
2803 			} else {
2804 				clen = sizeof(struct sctp_idata_chunk);
2805 			}
2806 			if (chk_length < clen) {
2807 				/*
2808 				 * Need to send an abort since we had a
2809 				 * invalid data chunk.
2810 				 */
2811 				struct mbuf *op_err;
2812 				char msg[SCTP_DIAG_INFO_LEN];
2813 
2814 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2815 				              ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2816 				              chk_length);
2817 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2818 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2819 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2820 				return (2);
2821 			}
2822 #ifdef SCTP_AUDITING_ENABLED
2823 			sctp_audit_log(0xB1, 0);
2824 #endif
2825 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2826 				last_chunk = 1;
2827 			} else {
2828 				last_chunk = 0;
2829 			}
2830 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2831 						      chk_length, net, high_tsn, &abort_flag, &break_flag,
2832 						      last_chunk, ch->chunk_type)) {
2833 				num_chunks++;
2834 			}
2835 			if (abort_flag)
2836 				return (2);
2837 
2838 			if (break_flag) {
2839 				/*
2840 				 * Set because of out of rwnd space and no
2841 				 * drop rep space left.
2842 				 */
2843 				stop_proc = 1;
2844 				continue;
2845 			}
2846 		} else {
2847 			/* not a data chunk in the data region */
2848 			switch (ch->chunk_type) {
2849 			case SCTP_INITIATION:
2850 			case SCTP_INITIATION_ACK:
2851 			case SCTP_SELECTIVE_ACK:
2852 			case SCTP_NR_SELECTIVE_ACK:
2853 			case SCTP_HEARTBEAT_REQUEST:
2854 			case SCTP_HEARTBEAT_ACK:
2855 			case SCTP_ABORT_ASSOCIATION:
2856 			case SCTP_SHUTDOWN:
2857 			case SCTP_SHUTDOWN_ACK:
2858 			case SCTP_OPERATION_ERROR:
2859 			case SCTP_COOKIE_ECHO:
2860 			case SCTP_COOKIE_ACK:
2861 			case SCTP_ECN_ECHO:
2862 			case SCTP_ECN_CWR:
2863 			case SCTP_SHUTDOWN_COMPLETE:
2864 			case SCTP_AUTHENTICATION:
2865 			case SCTP_ASCONF_ACK:
2866 			case SCTP_PACKET_DROPPED:
2867 			case SCTP_STREAM_RESET:
2868 			case SCTP_FORWARD_CUM_TSN:
2869 			case SCTP_ASCONF:
2870 			{
2871 				/*
2872 				 * Now, what do we do with KNOWN chunks that
2873 				 * are NOT in the right place?
2874 				 *
2875 				 * For now, I do nothing but ignore them. We
2876 				 * may later want to add sysctl stuff to
2877 				 * switch out and do either an ABORT() or
2878 				 * possibly process them.
2879 				 */
2880 				struct mbuf *op_err;
2881 				char msg[SCTP_DIAG_INFO_LEN];
2882 
2883 				SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2884 				              ch->chunk_type);
2885 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2886 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2887 				return (2);
2888 			}
2889 			default:
2890 				/*
2891 				 * Unknown chunk type: use bit rules after
2892 				 * checking length
2893 				 */
2894 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2895 					/*
2896 					 * Need to send an abort since we had a
2897 					 * invalid chunk.
2898 					 */
2899 					struct mbuf *op_err;
2900 					char msg[SCTP_DIAG_INFO_LEN];
2901 
2902 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2903 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2904 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2905 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2906 					return (2);
2907 				}
2908 				if (ch->chunk_type & 0x40) {
2909 					/* Add a error report to the queue */
2910 					struct mbuf *op_err;
2911 					struct sctp_gen_error_cause *cause;
2912 
2913 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2914 					                               0, M_NOWAIT, 1, MT_DATA);
2915 					if (op_err != NULL) {
2916 						cause  = mtod(op_err, struct sctp_gen_error_cause *);
2917 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2918 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2919 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2920 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2921 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2922 							sctp_queue_op_err(stcb, op_err);
2923 						} else {
2924 							sctp_m_freem(op_err);
2925 						}
2926 					}
2927 				}
2928 				if ((ch->chunk_type & 0x80) == 0) {
2929 					/* discard the rest of this packet */
2930 					stop_proc = 1;
2931 				}	/* else skip this bad chunk and
2932 					 * continue... */
2933 				break;
2934 			}	/* switch of chunk type */
2935 		}
2936 		*offset += SCTP_SIZE32(chk_length);
2937 		if ((*offset >= length) || stop_proc) {
2938 			/* no more data left in the mbuf chain */
2939 			stop_proc = 1;
2940 			continue;
2941 		}
2942 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2943 		                                           sizeof(struct sctp_chunkhdr),
2944 		                                           (uint8_t *)&chunk_buf);
2945 		if (ch == NULL) {
2946 			*offset = length;
2947 			stop_proc = 1;
2948 			continue;
2949 		}
2950 	}
2951 	if (break_flag) {
2952 		/*
2953 		 * we need to report rwnd overrun drops.
2954 		 */
2955 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2956 	}
2957 	if (num_chunks) {
2958 		/*
2959 		 * Did we get data, if so update the time for auto-close and
2960 		 * give peer credit for being alive.
2961 		 */
2962 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2963 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2964 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2965 				       stcb->asoc.overall_error_count,
2966 				       0,
2967 				       SCTP_FROM_SCTP_INDATA,
2968 				       __LINE__);
2969 		}
2970 		stcb->asoc.overall_error_count = 0;
2971 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2972 	}
2973 	/* now service all of the reassm queue if needed */
2974 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2975 		/* Assure that we ack right away */
2976 		stcb->asoc.send_sack = 1;
2977 	}
2978 	/* Start a sack timer or QUEUE a SACK for sending */
2979 	sctp_sack_check(stcb, was_a_gap);
2980 	return (0);
2981 }
2982 
2983 static int
sctp_process_segment_range(struct sctp_tcb * stcb,struct sctp_tmit_chunk ** p_tp1,uint32_t last_tsn,uint16_t frag_strt,uint16_t frag_end,int nr_sacking,int * num_frs,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int * rto_ok)2984 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2985 			   uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2986 			   int *num_frs,
2987 			   uint32_t *biggest_newly_acked_tsn,
2988 			   uint32_t  *this_sack_lowest_newack,
2989 			   int *rto_ok)
2990 {
2991 	struct sctp_tmit_chunk *tp1;
2992 	unsigned int theTSN;
2993 	int j, wake_him = 0, circled = 0;
2994 
2995 	/* Recover the tp1 we last saw */
2996 	tp1 = *p_tp1;
2997 	if (tp1 == NULL) {
2998 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2999 	}
3000 	for (j = frag_strt; j <= frag_end; j++) {
3001 		theTSN = j + last_tsn;
3002 		while (tp1) {
3003 			if (tp1->rec.data.doing_fast_retransmit)
3004 				(*num_frs) += 1;
3005 
3006 			/*-
3007 			 * CMT: CUCv2 algorithm. For each TSN being
3008 			 * processed from the sent queue, track the
3009 			 * next expected pseudo-cumack, or
3010 			 * rtx_pseudo_cumack, if required. Separate
3011 			 * cumack trackers for first transmissions,
3012 			 * and retransmissions.
3013 			 */
3014 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3015 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
3016 			    (tp1->snd_count == 1)) {
3017 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3018 				tp1->whoTo->find_pseudo_cumack = 0;
3019 			}
3020 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3021 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3022 			    (tp1->snd_count > 1)) {
3023 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3024 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3025 			}
3026 			if (tp1->rec.data.tsn == theTSN) {
3027 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3028 					/*-
3029 					 * must be held until
3030 					 * cum-ack passes
3031 					 */
3032 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3033 						/*-
3034 						 * If it is less than RESEND, it is
3035 						 * now no-longer in flight.
3036 						 * Higher values may already be set
3037 						 * via previous Gap Ack Blocks...
3038 						 * i.e. ACKED or RESEND.
3039 						 */
3040 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3041 						                *biggest_newly_acked_tsn)) {
3042 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3043 						}
3044 						/*-
3045 						 * CMT: SFR algo (and HTNA) - set
3046 						 * saw_newack to 1 for dest being
3047 						 * newly acked. update
3048 						 * this_sack_highest_newack if
3049 						 * appropriate.
3050 						 */
3051 						if (tp1->rec.data.chunk_was_revoked == 0)
3052 							tp1->whoTo->saw_newack = 1;
3053 
3054 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3055 						                tp1->whoTo->this_sack_highest_newack)) {
3056 							tp1->whoTo->this_sack_highest_newack =
3057 								tp1->rec.data.tsn;
3058 						}
3059 						/*-
3060 						 * CMT DAC algo: also update
3061 						 * this_sack_lowest_newack
3062 						 */
3063 						if (*this_sack_lowest_newack == 0) {
3064 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3065 								sctp_log_sack(*this_sack_lowest_newack,
3066 									      last_tsn,
3067 									      tp1->rec.data.tsn,
3068 									      0,
3069 									      0,
3070 									      SCTP_LOG_TSN_ACKED);
3071 							}
3072 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3073 						}
3074 						/*-
3075 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3076 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3077 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3078 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3079 						 * Separate pseudo_cumack trackers for first transmissions and
3080 						 * retransmissions.
3081 						 */
3082 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3083 							if (tp1->rec.data.chunk_was_revoked == 0) {
3084 								tp1->whoTo->new_pseudo_cumack = 1;
3085 							}
3086 							tp1->whoTo->find_pseudo_cumack = 1;
3087 						}
3088 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3089 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3090 						}
3091 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3092 							if (tp1->rec.data.chunk_was_revoked == 0) {
3093 								tp1->whoTo->new_pseudo_cumack = 1;
3094 							}
3095 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3096 						}
3097 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3098 							sctp_log_sack(*biggest_newly_acked_tsn,
3099 								      last_tsn,
3100 								      tp1->rec.data.tsn,
3101 								      frag_strt,
3102 								      frag_end,
3103 								      SCTP_LOG_TSN_ACKED);
3104 						}
3105 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3106 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3107 								       tp1->whoTo->flight_size,
3108 								       tp1->book_size,
3109 								       (uint32_t)(uintptr_t)tp1->whoTo,
3110 								       tp1->rec.data.tsn);
3111 						}
3112 						sctp_flight_size_decrease(tp1);
3113 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3114 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3115 														     tp1);
3116 						}
3117 						sctp_total_flight_decrease(stcb, tp1);
3118 
3119 						tp1->whoTo->net_ack += tp1->send_size;
3120 						if (tp1->snd_count < 2) {
3121 							/*-
3122 							 * True non-retransmitted chunk
3123 							 */
3124 							tp1->whoTo->net_ack2 += tp1->send_size;
3125 
3126 							/*-
3127 							 * update RTO too ?
3128 							 */
3129 							if (tp1->do_rtt) {
3130 								if (*rto_ok &&
3131 								    sctp_calculate_rto(stcb,
3132 								                       &stcb->asoc,
3133 								                       tp1->whoTo,
3134 								                       &tp1->sent_rcv_time,
3135 								                       SCTP_RTT_FROM_DATA)) {
3136 									*rto_ok = 0;
3137 								}
3138 								if (tp1->whoTo->rto_needed == 0) {
3139 									tp1->whoTo->rto_needed = 1;
3140 								}
3141 								tp1->do_rtt = 0;
3142 							}
3143 						}
3144 
3145 					}
3146 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3147 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3148 						                stcb->asoc.this_sack_highest_gap)) {
3149 							stcb->asoc.this_sack_highest_gap =
3150 								tp1->rec.data.tsn;
3151 						}
3152 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3153 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3154 #ifdef SCTP_AUDITING_ENABLED
3155 							sctp_audit_log(0xB2,
3156 								       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3157 #endif
3158 						}
3159 					}
3160 					/*-
3161 					 * All chunks NOT UNSENT fall through here and are marked
3162 					 * (leave PR-SCTP ones that are to skip alone though)
3163 					 */
3164 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3165 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3166 						tp1->sent = SCTP_DATAGRAM_MARKED;
3167 					}
3168 					if (tp1->rec.data.chunk_was_revoked) {
3169 						/* deflate the cwnd */
3170 						tp1->whoTo->cwnd -= tp1->book_size;
3171 						tp1->rec.data.chunk_was_revoked = 0;
3172 					}
3173 					/* NR Sack code here */
3174 					if (nr_sacking &&
3175 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3176 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3177 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3178 #ifdef INVARIANTS
3179 						} else {
3180 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3181 #endif
3182 						}
3183 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3184 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3185 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3186 							stcb->asoc.trigger_reset = 1;
3187 						}
3188 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3189 						if (tp1->data) {
3190 							/* sa_ignore NO_NULL_CHK */
3191 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3192 							sctp_m_freem(tp1->data);
3193 							tp1->data = NULL;
3194 						}
3195 						wake_him++;
3196 					}
3197 				}
3198 				break;
3199 			}	/* if (tp1->tsn == theTSN) */
3200 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3201 				break;
3202 			}
3203 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3204 			if ((tp1 == NULL) && (circled == 0)) {
3205 				circled++;
3206 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3207 			}
3208 		}	/* end while (tp1) */
3209 		if (tp1 == NULL) {
3210 			circled = 0;
3211 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3212 		}
3213 		/* In case the fragments were not in order we must reset */
3214 	} /* end for (j = fragStart */
3215 	*p_tp1 = tp1;
3216 	return (wake_him);	/* Return value only used for nr-sack */
3217 }
3218 
3219 
3220 static int
sctp_handle_segments(struct mbuf * m,int * offset,struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t last_tsn,uint32_t * biggest_tsn_acked,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int num_seg,int num_nr_seg,int * rto_ok)3221 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3222 		uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3223 		uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3224 		int num_seg, int num_nr_seg, int *rto_ok)
3225 {
3226 	struct sctp_gap_ack_block *frag, block;
3227 	struct sctp_tmit_chunk *tp1;
3228 	int i;
3229 	int num_frs = 0;
3230 	int chunk_freed;
3231 	int non_revocable;
3232 	uint16_t frag_strt, frag_end, prev_frag_end;
3233 
3234 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235 	prev_frag_end = 0;
3236 	chunk_freed = 0;
3237 
3238 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3239 		if (i == num_seg) {
3240 			prev_frag_end = 0;
3241 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3242 		}
3243 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3244 		                                                  sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3245 		*offset += sizeof(block);
3246 		if (frag == NULL) {
3247 			return (chunk_freed);
3248 		}
3249 		frag_strt = ntohs(frag->start);
3250 		frag_end = ntohs(frag->end);
3251 
3252 		if (frag_strt > frag_end) {
3253 			/* This gap report is malformed, skip it. */
3254 			continue;
3255 		}
3256 		if (frag_strt <= prev_frag_end) {
3257 			/* This gap report is not in order, so restart. */
3258 			 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3259 		}
3260 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3261 			*biggest_tsn_acked = last_tsn + frag_end;
3262 		}
3263 		if (i < num_seg) {
3264 			non_revocable = 0;
3265 		} else {
3266 			non_revocable = 1;
3267 		}
3268 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3269 		                               non_revocable, &num_frs, biggest_newly_acked_tsn,
3270 		                               this_sack_lowest_newack, rto_ok)) {
3271 			chunk_freed = 1;
3272 		}
3273 		prev_frag_end = frag_end;
3274 	}
3275 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3276 		if (num_frs)
3277 			sctp_log_fr(*biggest_tsn_acked,
3278 			            *biggest_newly_acked_tsn,
3279 			            last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3280 	}
3281 	return (chunk_freed);
3282 }
3283 
3284 static void
sctp_check_for_revoked(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t cumack,uint32_t biggest_tsn_acked)3285 sctp_check_for_revoked(struct sctp_tcb *stcb,
3286 		       struct sctp_association *asoc, uint32_t cumack,
3287 		       uint32_t biggest_tsn_acked)
3288 {
3289 	struct sctp_tmit_chunk *tp1;
3290 
3291 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3292 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3293 			/*
3294 			 * ok this guy is either ACK or MARKED. If it is
3295 			 * ACKED it has been previously acked but not this
3296 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3297 			 * again.
3298 			 */
3299 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3300 				break;
3301 			}
3302 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3303 				/* it has been revoked */
3304 				tp1->sent = SCTP_DATAGRAM_SENT;
3305 				tp1->rec.data.chunk_was_revoked = 1;
3306 				/* We must add this stuff back in to
3307 				 * assure timers and such get started.
3308 				 */
3309 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3310 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3311 						       tp1->whoTo->flight_size,
3312 						       tp1->book_size,
3313 						       (uint32_t)(uintptr_t)tp1->whoTo,
3314 						       tp1->rec.data.tsn);
3315 				}
3316 				sctp_flight_size_increase(tp1);
3317 				sctp_total_flight_increase(stcb, tp1);
3318 				/* We inflate the cwnd to compensate for our
3319 				 * artificial inflation of the flight_size.
3320 				 */
3321 				tp1->whoTo->cwnd += tp1->book_size;
3322 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3323 					sctp_log_sack(asoc->last_acked_seq,
3324 						      cumack,
3325 						      tp1->rec.data.tsn,
3326 						      0,
3327 						      0,
3328 						      SCTP_LOG_TSN_REVOKED);
3329 				}
3330 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3331 				/* it has been re-acked in this SACK */
3332 				tp1->sent = SCTP_DATAGRAM_ACKED;
3333 			}
3334 		}
3335 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3336 			break;
3337 	}
3338 }
3339 
3340 
3341 static void
sctp_strike_gap_ack_chunks(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t biggest_tsn_acked,uint32_t biggest_tsn_newly_acked,uint32_t this_sack_lowest_newack,int accum_moved)3342 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3343 			   uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3344 {
3345 	struct sctp_tmit_chunk *tp1;
3346 	int strike_flag = 0;
3347 	struct timeval now;
3348 	int tot_retrans = 0;
3349 	uint32_t sending_seq;
3350 	struct sctp_nets *net;
3351 	int num_dests_sacked = 0;
3352 
3353 	/*
3354 	 * select the sending_seq, this is either the next thing ready to be
3355 	 * sent but not transmitted, OR, the next seq we assign.
3356 	 */
3357 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3358 	if (tp1 == NULL) {
3359 		sending_seq = asoc->sending_seq;
3360 	} else {
3361 		sending_seq = tp1->rec.data.tsn;
3362 	}
3363 
3364 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3365 	if ((asoc->sctp_cmt_on_off > 0) &&
3366 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3367 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3368 			if (net->saw_newack)
3369 				num_dests_sacked++;
3370 		}
3371 	}
3372 	if (stcb->asoc.prsctp_supported) {
3373 		(void)SCTP_GETTIME_TIMEVAL(&now);
3374 	}
3375 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3376 		strike_flag = 0;
3377 		if (tp1->no_fr_allowed) {
3378 			/* this one had a timeout or something */
3379 			continue;
3380 		}
3381 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3382 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3383 				sctp_log_fr(biggest_tsn_newly_acked,
3384 					    tp1->rec.data.tsn,
3385 					    tp1->sent,
3386 					    SCTP_FR_LOG_CHECK_STRIKE);
3387 		}
3388 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3389 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3390 			/* done */
3391 			break;
3392 		}
3393 		if (stcb->asoc.prsctp_supported) {
3394 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3395 				/* Is it expired? */
3396 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
3397 				if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3398 #else
3399 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3400 #endif
3401 					/* Yes so drop it */
3402 					if (tp1->data != NULL) {
3403 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3404 										 SCTP_SO_NOT_LOCKED);
3405 					}
3406 					continue;
3407 				}
3408 			}
3409 
3410 		}
3411 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3412 		                !(accum_moved && asoc->fast_retran_loss_recovery)) {
3413 			/* we are beyond the tsn in the sack  */
3414 			break;
3415 		}
3416 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3417 			/* either a RESEND, ACKED, or MARKED */
3418 			/* skip */
3419 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3420 				/* Continue strikin FWD-TSN chunks */
3421 				tp1->rec.data.fwd_tsn_cnt++;
3422 			}
3423 			continue;
3424 		}
3425 		/*
3426 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3427 		 */
3428 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3429 			/*
3430 			 * No new acks were receieved for data sent to this
3431 			 * dest. Therefore, according to the SFR algo for
3432 			 * CMT, no data sent to this dest can be marked for
3433 			 * FR using this SACK.
3434 			 */
3435 			continue;
3436 		} else if (tp1->whoTo &&
3437 		           SCTP_TSN_GT(tp1->rec.data.tsn,
3438 		                       tp1->whoTo->this_sack_highest_newack) &&
3439 		           !(accum_moved && asoc->fast_retran_loss_recovery)) {
3440 			/*
3441 			 * CMT: New acks were receieved for data sent to
3442 			 * this dest. But no new acks were seen for data
3443 			 * sent after tp1. Therefore, according to the SFR
3444 			 * algo for CMT, tp1 cannot be marked for FR using
3445 			 * this SACK. This step covers part of the DAC algo
3446 			 * and the HTNA algo as well.
3447 			 */
3448 			continue;
3449 		}
3450 		/*
3451 		 * Here we check to see if we were have already done a FR
3452 		 * and if so we see if the biggest TSN we saw in the sack is
3453 		 * smaller than the recovery point. If so we don't strike
3454 		 * the tsn... otherwise we CAN strike the TSN.
3455 		 */
3456 		/*
3457 		 * @@@ JRI: Check for CMT
3458 		 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3459 		 */
3460 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3461 			/*
3462 			 * Strike the TSN if in fast-recovery and cum-ack
3463 			 * moved.
3464 			 */
3465 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3466 				sctp_log_fr(biggest_tsn_newly_acked,
3467 					    tp1->rec.data.tsn,
3468 					    tp1->sent,
3469 					    SCTP_FR_LOG_STRIKE_CHUNK);
3470 			}
3471 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3472 				tp1->sent++;
3473 			}
3474 			if ((asoc->sctp_cmt_on_off > 0) &&
3475 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3476 				/*
3477 				 * CMT DAC algorithm: If SACK flag is set to
3478 				 * 0, then lowest_newack test will not pass
3479 				 * because it would have been set to the
3480 				 * cumack earlier. If not already to be
3481 				 * rtx'd, If not a mixed sack and if tp1 is
3482 				 * not between two sacked TSNs, then mark by
3483 				 * one more.
3484 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3485 				 * two packets have been received after this missing TSN.
3486 				 */
3487 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3488 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3489 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3490 						sctp_log_fr(16 + num_dests_sacked,
3491 							    tp1->rec.data.tsn,
3492 							    tp1->sent,
3493 							    SCTP_FR_LOG_STRIKE_CHUNK);
3494 					}
3495 					tp1->sent++;
3496 				}
3497 			}
3498 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3499 		           (asoc->sctp_cmt_on_off == 0)) {
3500 			/*
3501 			 * For those that have done a FR we must take
3502 			 * special consideration if we strike. I.e the
3503 			 * biggest_newly_acked must be higher than the
3504 			 * sending_seq at the time we did the FR.
3505 			 */
3506 			if (
3507 #ifdef SCTP_FR_TO_ALTERNATE
3508 				/*
3509 				 * If FR's go to new networks, then we must only do
3510 				 * this for singly homed asoc's. However if the FR's
3511 				 * go to the same network (Armando's work) then its
3512 				 * ok to FR multiple times.
3513 				 */
3514 				(asoc->numnets < 2)
3515 #else
3516 				(1)
3517 #endif
3518 				) {
3519 
3520 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3521 				                tp1->rec.data.fast_retran_tsn)) {
3522 					/*
3523 					 * Strike the TSN, since this ack is
3524 					 * beyond where things were when we
3525 					 * did a FR.
3526 					 */
3527 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3528 						sctp_log_fr(biggest_tsn_newly_acked,
3529 							    tp1->rec.data.tsn,
3530 							    tp1->sent,
3531 							    SCTP_FR_LOG_STRIKE_CHUNK);
3532 					}
3533 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3534 						tp1->sent++;
3535 					}
3536 					strike_flag = 1;
3537 					if ((asoc->sctp_cmt_on_off > 0) &&
3538 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3539 						/*
3540 						 * CMT DAC algorithm: If
3541 						 * SACK flag is set to 0,
3542 						 * then lowest_newack test
3543 						 * will not pass because it
3544 						 * would have been set to
3545 						 * the cumack earlier. If
3546 						 * not already to be rtx'd,
3547 						 * If not a mixed sack and
3548 						 * if tp1 is not between two
3549 						 * sacked TSNs, then mark by
3550 						 * one more.
3551 						 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3552 						 * two packets have been received after this missing TSN.
3553 						 */
3554 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3555 						    (num_dests_sacked == 1) &&
3556 						    SCTP_TSN_GT(this_sack_lowest_newack,
3557 						                tp1->rec.data.tsn)) {
3558 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3559 								sctp_log_fr(32 + num_dests_sacked,
3560 									    tp1->rec.data.tsn,
3561 									    tp1->sent,
3562 									    SCTP_FR_LOG_STRIKE_CHUNK);
3563 							}
3564 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3565 								tp1->sent++;
3566 							}
3567 						}
3568 					}
3569 				}
3570 			}
3571 			/*
3572 			 * JRI: TODO: remove code for HTNA algo. CMT's
3573 			 * SFR algo covers HTNA.
3574 			 */
3575 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3576 		                       biggest_tsn_newly_acked)) {
3577 			/*
3578 			 * We don't strike these: This is the  HTNA
3579 			 * algorithm i.e. we don't strike If our TSN is
3580 			 * larger than the Highest TSN Newly Acked.
3581 			 */
3582 			;
3583 		} else {
3584 			/* Strike the TSN */
3585 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3586 				sctp_log_fr(biggest_tsn_newly_acked,
3587 					    tp1->rec.data.tsn,
3588 					    tp1->sent,
3589 					    SCTP_FR_LOG_STRIKE_CHUNK);
3590 			}
3591 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3592 				tp1->sent++;
3593 			}
3594 			if ((asoc->sctp_cmt_on_off > 0) &&
3595 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3596 				/*
3597 				 * CMT DAC algorithm: If SACK flag is set to
3598 				 * 0, then lowest_newack test will not pass
3599 				 * because it would have been set to the
3600 				 * cumack earlier. If not already to be
3601 				 * rtx'd, If not a mixed sack and if tp1 is
3602 				 * not between two sacked TSNs, then mark by
3603 				 * one more.
3604 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3605 				 * two packets have been received after this missing TSN.
3606 				 */
3607 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3608 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3609 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3610 						sctp_log_fr(48 + num_dests_sacked,
3611 							    tp1->rec.data.tsn,
3612 							    tp1->sent,
3613 							    SCTP_FR_LOG_STRIKE_CHUNK);
3614 					}
3615 					tp1->sent++;
3616 				}
3617 			}
3618 		}
3619 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3620 			struct sctp_nets *alt;
3621 
3622 			/* fix counts and things */
3623 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3624 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3625 					       (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3626 					       tp1->book_size,
3627 					       (uint32_t)(uintptr_t)tp1->whoTo,
3628 					       tp1->rec.data.tsn);
3629 			}
3630 			if (tp1->whoTo) {
3631 				tp1->whoTo->net_ack++;
3632 				sctp_flight_size_decrease(tp1);
3633 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3634 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3635 												     tp1);
3636 				}
3637 			}
3638 
3639 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3640 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3641 					      asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3642 			}
3643 			/* add back to the rwnd */
3644 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3645 
3646 			/* remove from the total flight */
3647 			sctp_total_flight_decrease(stcb, tp1);
3648 
3649 			if ((stcb->asoc.prsctp_supported) &&
3650 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3651 				/* Has it been retransmitted tv_sec times? - we store the retran count there. */
3652 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3653 					/* Yes, so drop it */
3654 					if (tp1->data != NULL) {
3655 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3656 										 SCTP_SO_NOT_LOCKED);
3657 					}
3658 					/* Make sure to flag we had a FR */
3659 					if (tp1->whoTo != NULL) {
3660 						tp1->whoTo->net_ack++;
3661 					}
3662 					continue;
3663 				}
3664 			}
3665 			/* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3666 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3667 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3668 					    0, SCTP_FR_MARKED);
3669 			}
3670 			if (strike_flag) {
3671 				/* This is a subsequent FR */
3672 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3673 			}
3674 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3675 			if (asoc->sctp_cmt_on_off > 0) {
3676 				/*
3677 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3678 				 * If CMT is being used, then pick dest with
3679 				 * largest ssthresh for any retransmission.
3680 				 */
3681 				tp1->no_fr_allowed = 1;
3682 				alt = tp1->whoTo;
3683 				/*sa_ignore NO_NULL_CHK*/
3684 				if (asoc->sctp_cmt_pf > 0) {
3685 					/* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3686 					alt = sctp_find_alternate_net(stcb, alt, 2);
3687 				} else {
3688 					/* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3689                                         /*sa_ignore NO_NULL_CHK*/
3690 					alt = sctp_find_alternate_net(stcb, alt, 1);
3691 				}
3692 				if (alt == NULL) {
3693 					alt = tp1->whoTo;
3694 				}
3695 				/*
3696 				 * CUCv2: If a different dest is picked for
3697 				 * the retransmission, then new
3698 				 * (rtx-)pseudo_cumack needs to be tracked
3699 				 * for orig dest. Let CUCv2 track new (rtx-)
3700 				 * pseudo-cumack always.
3701 				 */
3702 				if (tp1->whoTo) {
3703 					tp1->whoTo->find_pseudo_cumack = 1;
3704 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3705 				}
3706 
3707 			} else {/* CMT is OFF */
3708 
3709 #ifdef SCTP_FR_TO_ALTERNATE
3710 				/* Can we find an alternate? */
3711 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3712 #else
3713 				/*
3714 				 * default behavior is to NOT retransmit
3715 				 * FR's to an alternate. Armando Caro's
3716 				 * paper details why.
3717 				 */
3718 				alt = tp1->whoTo;
3719 #endif
3720 			}
3721 
3722 			tp1->rec.data.doing_fast_retransmit = 1;
3723 			tot_retrans++;
3724 			/* mark the sending seq for possible subsequent FR's */
3725 			/*
3726 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3727 			 * (uint32_t)tpi->rec.data.tsn);
3728 			 */
3729 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3730 				/*
3731 				 * If the queue of send is empty then its
3732 				 * the next sequence number that will be
3733 				 * assigned so we subtract one from this to
3734 				 * get the one we last sent.
3735 				 */
3736 				tp1->rec.data.fast_retran_tsn = sending_seq;
3737 			} else {
3738 				/*
3739 				 * If there are chunks on the send queue
3740 				 * (unsent data that has made it from the
3741 				 * stream queues but not out the door, we
3742 				 * take the first one (which will have the
3743 				 * lowest TSN) and subtract one to get the
3744 				 * one we last sent.
3745 				 */
3746 				struct sctp_tmit_chunk *ttt;
3747 
3748 				ttt = TAILQ_FIRST(&asoc->send_queue);
3749 				tp1->rec.data.fast_retran_tsn =
3750 					ttt->rec.data.tsn;
3751 			}
3752 
3753 			if (tp1->do_rtt) {
3754 				/*
3755 				 * this guy had a RTO calculation pending on
3756 				 * it, cancel it
3757 				 */
3758 				if ((tp1->whoTo != NULL) &&
3759 				    (tp1->whoTo->rto_needed == 0)) {
3760 					tp1->whoTo->rto_needed = 1;
3761 				}
3762 				tp1->do_rtt = 0;
3763 			}
3764 			if (alt != tp1->whoTo) {
3765 				/* yes, there is an alternate. */
3766 				sctp_free_remote_addr(tp1->whoTo);
3767 				/*sa_ignore FREED_MEMORY*/
3768 				tp1->whoTo = alt;
3769 				atomic_add_int(&alt->ref_count, 1);
3770 			}
3771 		}
3772 	}
3773 }
3774 
3775 struct sctp_tmit_chunk *
3776 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3777     struct sctp_association *asoc)
3778 {
3779 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3780 	struct timeval now;
3781 	int now_filled = 0;
3782 
3783 	if (asoc->prsctp_supported == 0) {
3784 		return (NULL);
3785 	}
3786 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3787 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3788 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3789 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3790 			/* no chance to advance, out of here */
3791 			break;
3792 		}
3793 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3794 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3795 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3796 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3797 					       asoc->advanced_peer_ack_point,
3798 					       tp1->rec.data.tsn, 0, 0);
3799 			}
3800 		}
3801 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3802 			/*
3803 			 * We can't fwd-tsn past any that are reliable aka
3804 			 * retransmitted until the asoc fails.
3805 			 */
3806 			break;
3807 		}
3808 		if (!now_filled) {
3809 			(void)SCTP_GETTIME_TIMEVAL(&now);
3810 			now_filled = 1;
3811 		}
3812 		/*
3813 		 * now we got a chunk which is marked for another
3814 		 * retransmission to a PR-stream but has run out its chances
3815 		 * already maybe OR has been marked to skip now. Can we skip
3816 		 * it if its a resend?
3817 		 */
3818 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3819 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3820 			/*
3821 			 * Now is this one marked for resend and its time is
3822 			 * now up?
3823 			 */
3824 #if !(defined(__FreeBSD__)  && !defined(__Userspace__))
3825 			if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3826 #else
3827 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3828 #endif
3829 				/* Yes so drop it */
3830 				if (tp1->data) {
3831 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3832 					    1, SCTP_SO_NOT_LOCKED);
3833 				}
3834 			} else {
3835 				/*
3836 				 * No, we are done when hit one for resend
3837 				 * whos time as not expired.
3838 				 */
3839 				break;
3840 			}
3841 		}
3842 		/*
3843 		 * Ok now if this chunk is marked to drop it we can clean up
3844 		 * the chunk, advance our peer ack point and we can check
3845 		 * the next chunk.
3846 		 */
3847 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3848 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3849 			/* advance PeerAckPoint goes forward */
3850 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3851 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3852 				a_adv = tp1;
3853 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3854 				/* No update but we do save the chk */
3855 				a_adv = tp1;
3856 			}
3857 		} else {
3858 			/*
3859 			 * If it is still in RESEND we can advance no
3860 			 * further
3861 			 */
3862 			break;
3863 		}
3864 	}
3865 	return (a_adv);
3866 }
3867 
3868 static int
3869 sctp_fs_audit(struct sctp_association *asoc)
3870 {
3871 	struct sctp_tmit_chunk *chk;
3872 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3873 	int ret;
3874 #ifndef INVARIANTS
3875 	int entry_flight, entry_cnt;
3876 #endif
3877 
3878 	ret = 0;
3879 #ifndef INVARIANTS
3880 	entry_flight = asoc->total_flight;
3881 	entry_cnt = asoc->total_flight_count;
3882 #endif
3883 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3884 		return (0);
3885 
3886 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3887 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3888 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3889 			            chk->rec.data.tsn,
3890 			            chk->send_size,
3891 			            chk->snd_count);
3892 			inflight++;
3893 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3894 			resend++;
3895 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3896 			inbetween++;
3897 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3898 			above++;
3899 		} else {
3900 			acked++;
3901 		}
3902 	}
3903 
3904 	if ((inflight > 0) || (inbetween > 0)) {
3905 #ifdef INVARIANTS
3906 		panic("Flight size-express incorrect? \n");
3907 #else
3908 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3909 		            entry_flight, entry_cnt);
3910 
3911 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3912 			    inflight, inbetween, resend, above, acked);
3913 		ret = 1;
3914 #endif
3915 	}
3916 	return (ret);
3917 }
3918 
3919 
3920 static void
3921 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3922 	                   struct sctp_association *asoc,
3923 			   struct sctp_tmit_chunk *tp1)
3924 {
3925 	tp1->window_probe = 0;
3926 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3927 		/* TSN's skipped we do NOT move back. */
3928 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3929 			       tp1->whoTo ? tp1->whoTo->flight_size : 0,
3930 			       tp1->book_size,
3931 			       (uint32_t)(uintptr_t)tp1->whoTo,
3932 			       tp1->rec.data.tsn);
3933 		return;
3934 	}
3935 	/* First setup this by shrinking flight */
3936 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3937 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3938 									     tp1);
3939 	}
3940 	sctp_flight_size_decrease(tp1);
3941 	sctp_total_flight_decrease(stcb, tp1);
3942 	/* Now mark for resend */
3943 	tp1->sent = SCTP_DATAGRAM_RESEND;
3944 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3945 
3946 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3947 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3948 			       tp1->whoTo->flight_size,
3949 			       tp1->book_size,
3950 			       (uint32_t)(uintptr_t)tp1->whoTo,
3951 			       tp1->rec.data.tsn);
3952 	}
3953 }
3954 
3955 void
3956 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3957                          uint32_t rwnd, int *abort_now, int ecne_seen)
3958 {
3959 	struct sctp_nets *net;
3960 	struct sctp_association *asoc;
3961 	struct sctp_tmit_chunk *tp1, *tp2;
3962 	uint32_t old_rwnd;
3963 	int win_probe_recovery = 0;
3964 	int win_probe_recovered = 0;
3965 	int j, done_once = 0;
3966 	int rto_ok = 1;
3967 	uint32_t send_s;
3968 
3969 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3970 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3971 		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3972 	}
3973 	SCTP_TCB_LOCK_ASSERT(stcb);
3974 #ifdef SCTP_ASOCLOG_OF_TSNS
3975 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3976 	stcb->asoc.cumack_log_at++;
3977 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3978 		stcb->asoc.cumack_log_at = 0;
3979 	}
3980 #endif
3981 	asoc = &stcb->asoc;
3982 	old_rwnd = asoc->peers_rwnd;
3983 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3984 		/* old ack */
3985 		return;
3986 	} else if (asoc->last_acked_seq == cumack) {
3987 		/* Window update sack */
3988 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3989 						    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3990 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3991 			/* SWS sender side engages */
3992 			asoc->peers_rwnd = 0;
3993 		}
3994 		if (asoc->peers_rwnd > old_rwnd) {
3995 			goto again;
3996 		}
3997 		return;
3998 	}
3999 
4000 	/* First setup for CC stuff */
4001 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4002 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4003 			/* Drag along the window_tsn for cwr's */
4004 			net->cwr_window_tsn = cumack;
4005 		}
4006 		net->prev_cwnd = net->cwnd;
4007 		net->net_ack = 0;
4008 		net->net_ack2 = 0;
4009 
4010 		/*
4011 		 * CMT: Reset CUC and Fast recovery algo variables before
4012 		 * SACK processing
4013 		 */
4014 		net->new_pseudo_cumack = 0;
4015 		net->will_exit_fast_recovery = 0;
4016 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4017 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4018 		}
4019 	}
4020 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4021 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4022 				 sctpchunk_listhead);
4023 		send_s = tp1->rec.data.tsn + 1;
4024 	} else {
4025 		send_s = asoc->sending_seq;
4026 	}
4027 	if (SCTP_TSN_GE(cumack, send_s)) {
4028 		struct mbuf *op_err;
4029 		char msg[SCTP_DIAG_INFO_LEN];
4030 
4031 		*abort_now = 1;
4032 		/* XXX */
4033 		SCTP_SNPRINTF(msg, sizeof(msg),
4034 		              "Cum ack %8.8x greater or equal than TSN %8.8x",
4035 		              cumack, send_s);
4036 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4037 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4038 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4039 		return;
4040 	}
4041 	asoc->this_sack_highest_gap = cumack;
4042 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4043 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4044 			       stcb->asoc.overall_error_count,
4045 			       0,
4046 			       SCTP_FROM_SCTP_INDATA,
4047 			       __LINE__);
4048 	}
4049 	stcb->asoc.overall_error_count = 0;
4050 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4051 		/* process the new consecutive TSN first */
4052 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4053 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4054 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4055 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4056 				}
4057 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4058 					/*
4059 					 * If it is less than ACKED, it is
4060 					 * now no-longer in flight. Higher
4061 					 * values may occur during marking
4062 					 */
4063 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4064 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4065 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4066 								       tp1->whoTo->flight_size,
4067 								       tp1->book_size,
4068 								       (uint32_t)(uintptr_t)tp1->whoTo,
4069 								       tp1->rec.data.tsn);
4070 						}
4071 						sctp_flight_size_decrease(tp1);
4072 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4073 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4074 														     tp1);
4075 						}
4076 						/* sa_ignore NO_NULL_CHK */
4077 						sctp_total_flight_decrease(stcb, tp1);
4078 					}
4079 					tp1->whoTo->net_ack += tp1->send_size;
4080 					if (tp1->snd_count < 2) {
4081 						/*
4082 						 * True non-retransmitted
4083 						 * chunk
4084 						 */
4085 						tp1->whoTo->net_ack2 +=
4086 							tp1->send_size;
4087 
4088 						/* update RTO too? */
4089 						if (tp1->do_rtt) {
4090 							if (rto_ok &&
4091 							    sctp_calculate_rto(stcb,
4092 									       &stcb->asoc,
4093 									       tp1->whoTo,
4094 									       &tp1->sent_rcv_time,
4095 									       SCTP_RTT_FROM_DATA)) {
4096 								rto_ok = 0;
4097 							}
4098 							if (tp1->whoTo->rto_needed == 0) {
4099 								tp1->whoTo->rto_needed = 1;
4100 							}
4101 							tp1->do_rtt = 0;
4102 						}
4103 					}
4104 					/*
4105 					 * CMT: CUCv2 algorithm. From the
4106 					 * cumack'd TSNs, for each TSN being
4107 					 * acked for the first time, set the
4108 					 * following variables for the
4109 					 * corresp destination.
4110 					 * new_pseudo_cumack will trigger a
4111 					 * cwnd update.
4112 					 * find_(rtx_)pseudo_cumack will
4113 					 * trigger search for the next
4114 					 * expected (rtx-)pseudo-cumack.
4115 					 */
4116 					tp1->whoTo->new_pseudo_cumack = 1;
4117 					tp1->whoTo->find_pseudo_cumack = 1;
4118 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4119 
4120 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4121 						/* sa_ignore NO_NULL_CHK */
4122 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4123 					}
4124 				}
4125 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4126 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4127 				}
4128 				if (tp1->rec.data.chunk_was_revoked) {
4129 					/* deflate the cwnd */
4130 					tp1->whoTo->cwnd -= tp1->book_size;
4131 					tp1->rec.data.chunk_was_revoked = 0;
4132 				}
4133 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4134 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4135 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4136 #ifdef INVARIANTS
4137 					} else {
4138 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4139 #endif
4140 					}
4141 				}
4142 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4143 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4144 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4145 					asoc->trigger_reset = 1;
4146 				}
4147 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4148 				if (tp1->data) {
4149 					/* sa_ignore NO_NULL_CHK */
4150 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4151 					sctp_m_freem(tp1->data);
4152 					tp1->data = NULL;
4153 				}
4154 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4155 					sctp_log_sack(asoc->last_acked_seq,
4156 						      cumack,
4157 						      tp1->rec.data.tsn,
4158 						      0,
4159 						      0,
4160 						      SCTP_LOG_FREE_SENT);
4161 				}
4162 				asoc->sent_queue_cnt--;
4163 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4164 			} else {
4165 				break;
4166 			}
4167 		}
4168 
4169 	}
4170 #if defined(__Userspace__)
4171 	if (stcb->sctp_ep->recv_callback) {
4172 		if (stcb->sctp_socket) {
4173 			uint32_t inqueue_bytes, sb_free_now;
4174 			struct sctp_inpcb *inp;
4175 
4176 			inp = stcb->sctp_ep;
4177 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4178 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4179 
4180 			/* check if the amount free in the send socket buffer crossed the threshold */
4181 			if (inp->send_callback &&
4182 			    (((inp->send_sb_threshold > 0) &&
4183 			      (sb_free_now >= inp->send_sb_threshold) &&
4184 			      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4185 			     (inp->send_sb_threshold == 0))) {
4186 				atomic_add_int(&stcb->asoc.refcnt, 1);
4187 				SCTP_TCB_UNLOCK(stcb);
4188 				inp->send_callback(stcb->sctp_socket, sb_free_now);
4189 				SCTP_TCB_LOCK(stcb);
4190 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4191 			}
4192 		}
4193 	} else if (stcb->sctp_socket) {
4194 #else
4195 	/* sa_ignore NO_NULL_CHK */
4196 	if (stcb->sctp_socket) {
4197 #endif
4198 #if defined(__APPLE__) && !defined(__Userspace__)
4199 		struct socket *so;
4200 
4201 #endif
4202 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4203 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4204 			/* sa_ignore NO_NULL_CHK */
4205 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4206 		}
4207 #if defined(__APPLE__) && !defined(__Userspace__)
4208 		so = SCTP_INP_SO(stcb->sctp_ep);
4209 		atomic_add_int(&stcb->asoc.refcnt, 1);
4210 		SCTP_TCB_UNLOCK(stcb);
4211 		SCTP_SOCKET_LOCK(so, 1);
4212 		SCTP_TCB_LOCK(stcb);
4213 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4214 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4215 			/* assoc was freed while we were unlocked */
4216 			SCTP_SOCKET_UNLOCK(so, 1);
4217 			return;
4218 		}
4219 #endif
4220 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4221 #if defined(__APPLE__) && !defined(__Userspace__)
4222 		SCTP_SOCKET_UNLOCK(so, 1);
4223 #endif
4224 	} else {
4225 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4226 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4227 		}
4228 	}
4229 
4230 	/* JRS - Use the congestion control given in the CC module */
4231 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4232 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4233 			if (net->net_ack2 > 0) {
4234 				/*
4235 				 * Karn's rule applies to clearing error count, this
4236 				 * is optional.
4237 				 */
4238 				net->error_count = 0;
4239 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4240 					/* addr came good */
4241 					net->dest_state |= SCTP_ADDR_REACHABLE;
4242 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4243 					                0, (void *)net, SCTP_SO_NOT_LOCKED);
4244 				}
4245 				if (net == stcb->asoc.primary_destination) {
4246 					if (stcb->asoc.alternate) {
4247 						/* release the alternate, primary is good */
4248 						sctp_free_remote_addr(stcb->asoc.alternate);
4249 						stcb->asoc.alternate = NULL;
4250 					}
4251 				}
4252 				if (net->dest_state & SCTP_ADDR_PF) {
4253 					net->dest_state &= ~SCTP_ADDR_PF;
4254 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4255 					                stcb->sctp_ep, stcb, net,
4256 					                SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4257 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4258 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4259 					/* Done with this net */
4260 					net->net_ack = 0;
4261 				}
4262 				/* restore any doubled timers */
4263 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4264 				if (net->RTO < stcb->asoc.minrto) {
4265 					net->RTO = stcb->asoc.minrto;
4266 				}
4267 				if (net->RTO > stcb->asoc.maxrto) {
4268 					net->RTO = stcb->asoc.maxrto;
4269 				}
4270 			}
4271 		}
4272 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4273 	}
4274 	asoc->last_acked_seq = cumack;
4275 
4276 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4277 		/* nothing left in-flight */
4278 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4279 			net->flight_size = 0;
4280 			net->partial_bytes_acked = 0;
4281 		}
4282 		asoc->total_flight = 0;
4283 		asoc->total_flight_count = 0;
4284 	}
4285 
4286 	/* RWND update */
4287 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4288 					    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4289 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4290 		/* SWS sender side engages */
4291 		asoc->peers_rwnd = 0;
4292 	}
4293 	if (asoc->peers_rwnd > old_rwnd) {
4294 		win_probe_recovery = 1;
4295 	}
4296 	/* Now assure a timer where data is queued at */
4297 again:
4298 	j = 0;
4299 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4300 		if (win_probe_recovery && (net->window_probe)) {
4301 			win_probe_recovered = 1;
4302 			/*
4303 			 * Find first chunk that was used with window probe
4304 			 * and clear the sent
4305 			 */
4306 			/* sa_ignore FREED_MEMORY */
4307 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4308 				if (tp1->window_probe) {
4309 					/* move back to data send queue */
4310 					sctp_window_probe_recovery(stcb, asoc, tp1);
4311 					break;
4312 				}
4313 			}
4314 		}
4315 		if (net->flight_size) {
4316 			j++;
4317 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4318 			if (net->window_probe) {
4319 				net->window_probe = 0;
4320 			}
4321 		} else {
4322 			if (net->window_probe) {
4323 				/* In window probes we must assure a timer is still running there */
4324 				net->window_probe = 0;
4325 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4326 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4327 				}
4328 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4329 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4330 				                stcb, net,
4331 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4332 			}
4333 		}
4334 	}
4335 	if ((j == 0) &&
4336 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4337 	    (asoc->sent_queue_retran_cnt == 0) &&
4338 	    (win_probe_recovered == 0) &&
4339 	    (done_once == 0)) {
4340 		/* huh, this should not happen unless all packets
4341 		 * are PR-SCTP and marked to skip of course.
4342 		 */
4343 		if (sctp_fs_audit(asoc)) {
4344 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4345 				net->flight_size = 0;
4346 			}
4347 			asoc->total_flight = 0;
4348 			asoc->total_flight_count = 0;
4349 			asoc->sent_queue_retran_cnt = 0;
4350 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4351 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4352 					sctp_flight_size_increase(tp1);
4353 					sctp_total_flight_increase(stcb, tp1);
4354 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4355 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4356 				}
4357 			}
4358 		}
4359 		done_once = 1;
4360 		goto again;
4361 	}
4362 	/**********************************/
4363 	/* Now what about shutdown issues */
4364 	/**********************************/
4365 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4366 		/* nothing left on sendqueue.. consider done */
4367 		/* clean up */
4368 		if ((asoc->stream_queue_cnt == 1) &&
4369 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4370 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4371 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
4372 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4373 		}
4374 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4375 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4376 		    (asoc->stream_queue_cnt == 1) &&
4377 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4378 			struct mbuf *op_err;
4379 
4380 			*abort_now = 1;
4381 			/* XXX */
4382 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4383 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4384 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4385 			return;
4386 		}
4387 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4388 		    (asoc->stream_queue_cnt == 0)) {
4389 			struct sctp_nets *netp;
4390 
4391 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4392 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4393 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4394 			}
4395 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4396 			sctp_stop_timers_for_shutdown(stcb);
4397 			if (asoc->alternate) {
4398 				netp = asoc->alternate;
4399 			} else {
4400 				netp = asoc->primary_destination;
4401 			}
4402 			sctp_send_shutdown(stcb, netp);
4403 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4404 					 stcb->sctp_ep, stcb, netp);
4405 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4406 					 stcb->sctp_ep, stcb, NULL);
4407 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4408 			   (asoc->stream_queue_cnt == 0)) {
4409 			struct sctp_nets *netp;
4410 
4411 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4412 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4413 			sctp_stop_timers_for_shutdown(stcb);
4414 			if (asoc->alternate) {
4415 				netp = asoc->alternate;
4416 			} else {
4417 				netp = asoc->primary_destination;
4418 			}
4419 			sctp_send_shutdown_ack(stcb, netp);
4420 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4421 					 stcb->sctp_ep, stcb, netp);
4422 		}
4423 	}
4424 	/*********************************************/
4425 	/* Here we perform PR-SCTP procedures        */
4426 	/* (section 4.2)                             */
4427 	/*********************************************/
4428 	/* C1. update advancedPeerAckPoint */
4429 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4430 		asoc->advanced_peer_ack_point = cumack;
4431 	}
4432 	/* PR-Sctp issues need to be addressed too */
4433 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4434 		struct sctp_tmit_chunk *lchk;
4435 		uint32_t old_adv_peer_ack_point;
4436 
4437 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4438 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4439 		/* C3. See if we need to send a Fwd-TSN */
4440 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4441 			/*
4442 			 * ISSUE with ECN, see FWD-TSN processing.
4443 			 */
4444 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4445 				send_forward_tsn(stcb, asoc);
4446 			} else if (lchk) {
4447 				/* try to FR fwd-tsn's that get lost too */
4448 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4449 					send_forward_tsn(stcb, asoc);
4450 				}
4451 			}
4452 		}
4453 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4454 			if (lchk->whoTo != NULL) {
4455 				break;
4456 			}
4457 		}
4458 		if (lchk != NULL) {
4459 			/* Assure a timer is up */
4460 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4461 			                 stcb->sctp_ep, stcb, lchk->whoTo);
4462 		}
4463 	}
4464 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4465 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4466 			       rwnd,
4467 			       stcb->asoc.peers_rwnd,
4468 			       stcb->asoc.total_flight,
4469 			       stcb->asoc.total_output_queue_size);
4470 	}
4471 }
4472 
4473 void
4474 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4475                  struct sctp_tcb *stcb,
4476                  uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4477                  int *abort_now, uint8_t flags,
4478                  uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4479 {
4480 	struct sctp_association *asoc;
4481 	struct sctp_tmit_chunk *tp1, *tp2;
4482 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4483 	uint16_t wake_him = 0;
4484 	uint32_t send_s = 0;
4485 	long j;
4486 	int accum_moved = 0;
4487 	int will_exit_fast_recovery = 0;
4488 	uint32_t a_rwnd, old_rwnd;
4489 	int win_probe_recovery = 0;
4490 	int win_probe_recovered = 0;
4491 	struct sctp_nets *net = NULL;
4492 	int done_once;
4493 	int rto_ok = 1;
4494 	uint8_t reneged_all = 0;
4495 	uint8_t cmt_dac_flag;
4496 	/*
4497 	 * we take any chance we can to service our queues since we cannot
4498 	 * get awoken when the socket is read from :<
4499 	 */
4500 	/*
4501 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4502 	 * old sack, if so discard. 2) If there is nothing left in the send
4503 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4504 	 * too, update any rwnd change and verify no timers are running.
4505 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4506 	 * moved process these first and note that it moved. 4) Process any
4507 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4508 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4509 	 * sync up flightsizes and things, stop all timers and also check
4510 	 * for shutdown_pending state. If so then go ahead and send off the
4511 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4512 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4513 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4514 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4515 	 * if in shutdown_recv state.
4516 	 */
4517 	SCTP_TCB_LOCK_ASSERT(stcb);
4518 	/* CMT DAC algo */
4519 	this_sack_lowest_newack = 0;
4520 	SCTP_STAT_INCR(sctps_slowpath_sack);
4521 	last_tsn = cum_ack;
4522 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4523 #ifdef SCTP_ASOCLOG_OF_TSNS
4524 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4525 	stcb->asoc.cumack_log_at++;
4526 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4527 		stcb->asoc.cumack_log_at = 0;
4528 	}
4529 #endif
4530 	a_rwnd = rwnd;
4531 
4532 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4533 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4534 		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4535 	}
4536 
4537 	old_rwnd = stcb->asoc.peers_rwnd;
4538 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4539 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4540 		               stcb->asoc.overall_error_count,
4541 		               0,
4542 		               SCTP_FROM_SCTP_INDATA,
4543 		               __LINE__);
4544 	}
4545 	stcb->asoc.overall_error_count = 0;
4546 	asoc = &stcb->asoc;
4547 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4548 		sctp_log_sack(asoc->last_acked_seq,
4549 		              cum_ack,
4550 		              0,
4551 		              num_seg,
4552 		              num_dup,
4553 		              SCTP_LOG_NEW_SACK);
4554 	}
4555 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4556 		uint16_t i;
4557 		uint32_t *dupdata, dblock;
4558 
4559 		for (i = 0; i < num_dup; i++) {
4560 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4561 			                                    sizeof(uint32_t), (uint8_t *)&dblock);
4562 			if (dupdata == NULL) {
4563 				break;
4564 			}
4565 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4566 		}
4567 	}
4568 	/* reality check */
4569 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4570 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4571 				 sctpchunk_listhead);
4572 		send_s = tp1->rec.data.tsn + 1;
4573 	} else {
4574 		tp1 = NULL;
4575 		send_s = asoc->sending_seq;
4576 	}
4577 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4578 		struct mbuf *op_err;
4579 		char msg[SCTP_DIAG_INFO_LEN];
4580 
4581 		/*
4582 		 * no way, we have not even sent this TSN out yet.
4583 		 * Peer is hopelessly messed up with us.
4584 		 */
4585 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4586 			    cum_ack, send_s);
4587 		if (tp1) {
4588 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4589 				    tp1->rec.data.tsn, (void *)tp1);
4590 		}
4591 	hopeless_peer:
4592 		*abort_now = 1;
4593 		/* XXX */
4594 		SCTP_SNPRINTF(msg, sizeof(msg),
4595 		              "Cum ack %8.8x greater or equal than TSN %8.8x",
4596 		              cum_ack, send_s);
4597 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4598 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4599 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4600 		return;
4601 	}
4602 	/**********************/
4603 	/* 1) check the range */
4604 	/**********************/
4605 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4606 		/* acking something behind */
4607 		return;
4608 	}
4609 
4610 	/* update the Rwnd of the peer */
4611 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4612 	    TAILQ_EMPTY(&asoc->send_queue) &&
4613 	    (asoc->stream_queue_cnt == 0)) {
4614 		/* nothing left on send/sent and strmq */
4615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4616 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4617 			                  asoc->peers_rwnd, 0, 0, a_rwnd);
4618 		}
4619 		asoc->peers_rwnd = a_rwnd;
4620 		if (asoc->sent_queue_retran_cnt) {
4621 			asoc->sent_queue_retran_cnt = 0;
4622 		}
4623 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4624 			/* SWS sender side engages */
4625 			asoc->peers_rwnd = 0;
4626 		}
4627 		/* stop any timers */
4628 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4629 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4630 			                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4631 			net->partial_bytes_acked = 0;
4632 			net->flight_size = 0;
4633 		}
4634 		asoc->total_flight = 0;
4635 		asoc->total_flight_count = 0;
4636 		return;
4637 	}
4638 	/*
4639 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4640 	 * things. The total byte count acked is tracked in netAckSz AND
4641 	 * netAck2 is used to track the total bytes acked that are un-
4642 	 * amibguious and were never retransmitted. We track these on a per
4643 	 * destination address basis.
4644 	 */
4645 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4646 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4647 			/* Drag along the window_tsn for cwr's */
4648 			net->cwr_window_tsn = cum_ack;
4649 		}
4650 		net->prev_cwnd = net->cwnd;
4651 		net->net_ack = 0;
4652 		net->net_ack2 = 0;
4653 
4654 		/*
4655 		 * CMT: Reset CUC and Fast recovery algo variables before
4656 		 * SACK processing
4657 		 */
4658 		net->new_pseudo_cumack = 0;
4659 		net->will_exit_fast_recovery = 0;
4660 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4661 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4662 		}
4663 
4664 		/*
4665 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4666 		 * to be greater than the cumack. Also reset saw_newack to 0
4667 		 * for all dests.
4668 		 */
4669 		net->saw_newack = 0;
4670 		net->this_sack_highest_newack = last_tsn;
4671 	}
4672 	/* process the new consecutive TSN first */
4673 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4674 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4675 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4676 				accum_moved = 1;
4677 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4678 					/*
4679 					 * If it is less than ACKED, it is
4680 					 * now no-longer in flight. Higher
4681 					 * values may occur during marking
4682 					 */
4683 					if ((tp1->whoTo->dest_state &
4684 					     SCTP_ADDR_UNCONFIRMED) &&
4685 					    (tp1->snd_count < 2)) {
4686 						/*
4687 						 * If there was no retran
4688 						 * and the address is
4689 						 * un-confirmed and we sent
4690 						 * there and are now
4691 						 * sacked.. its confirmed,
4692 						 * mark it so.
4693 						 */
4694 						tp1->whoTo->dest_state &=
4695 							~SCTP_ADDR_UNCONFIRMED;
4696 					}
4697 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4698 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4699 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4700 							               tp1->whoTo->flight_size,
4701 							               tp1->book_size,
4702 							               (uint32_t)(uintptr_t)tp1->whoTo,
4703 							               tp1->rec.data.tsn);
4704 						}
4705 						sctp_flight_size_decrease(tp1);
4706 						sctp_total_flight_decrease(stcb, tp1);
4707 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4708 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4709 														     tp1);
4710 						}
4711 					}
4712 					tp1->whoTo->net_ack += tp1->send_size;
4713 
4714 					/* CMT SFR and DAC algos */
4715 					this_sack_lowest_newack = tp1->rec.data.tsn;
4716 					tp1->whoTo->saw_newack = 1;
4717 
4718 					if (tp1->snd_count < 2) {
4719 						/*
4720 						 * True non-retransmitted
4721 						 * chunk
4722 						 */
4723 						tp1->whoTo->net_ack2 +=
4724 							tp1->send_size;
4725 
4726 						/* update RTO too? */
4727 						if (tp1->do_rtt) {
4728 							if (rto_ok &&
4729 							    sctp_calculate_rto(stcb,
4730 									       &stcb->asoc,
4731 									       tp1->whoTo,
4732 									       &tp1->sent_rcv_time,
4733 									       SCTP_RTT_FROM_DATA)) {
4734 								rto_ok = 0;
4735 							}
4736 							if (tp1->whoTo->rto_needed == 0) {
4737 								tp1->whoTo->rto_needed = 1;
4738 							}
4739 							tp1->do_rtt = 0;
4740 						}
4741 					}
4742 					/*
4743 					 * CMT: CUCv2 algorithm. From the
4744 					 * cumack'd TSNs, for each TSN being
4745 					 * acked for the first time, set the
4746 					 * following variables for the
4747 					 * corresp destination.
4748 					 * new_pseudo_cumack will trigger a
4749 					 * cwnd update.
4750 					 * find_(rtx_)pseudo_cumack will
4751 					 * trigger search for the next
4752 					 * expected (rtx-)pseudo-cumack.
4753 					 */
4754 					tp1->whoTo->new_pseudo_cumack = 1;
4755 					tp1->whoTo->find_pseudo_cumack = 1;
4756 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4757 
4758 
4759 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4760 						sctp_log_sack(asoc->last_acked_seq,
4761 						              cum_ack,
4762 						              tp1->rec.data.tsn,
4763 						              0,
4764 						              0,
4765 						              SCTP_LOG_TSN_ACKED);
4766 					}
4767 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4768 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4769 					}
4770 				}
4771 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4772 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4773 #ifdef SCTP_AUDITING_ENABLED
4774 					sctp_audit_log(0xB3,
4775 					               (asoc->sent_queue_retran_cnt & 0x000000ff));
4776 #endif
4777 				}
4778 				if (tp1->rec.data.chunk_was_revoked) {
4779 					/* deflate the cwnd */
4780 					tp1->whoTo->cwnd -= tp1->book_size;
4781 					tp1->rec.data.chunk_was_revoked = 0;
4782 				}
4783 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4784 					tp1->sent = SCTP_DATAGRAM_ACKED;
4785 				}
4786 			}
4787 		} else {
4788 			break;
4789 		}
4790 	}
4791 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4792 	/* always set this up to cum-ack */
4793 	asoc->this_sack_highest_gap = last_tsn;
4794 
4795 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4796 
4797 		/*
4798 		 * thisSackHighestGap will increase while handling NEW
4799 		 * segments this_sack_highest_newack will increase while
4800 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4801 		 * used for CMT DAC algo. saw_newack will also change.
4802 		 */
4803 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4804 			&biggest_tsn_newly_acked, &this_sack_lowest_newack,
4805 			num_seg, num_nr_seg, &rto_ok)) {
4806 			wake_him++;
4807 		}
4808 		/*
4809 		 * validate the biggest_tsn_acked in the gap acks if
4810 		 * strict adherence is wanted.
4811 		 */
4812 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4813 			/*
4814 			 * peer is either confused or we are under
4815 			 * attack. We must abort.
4816 			 */
4817 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4818 				    biggest_tsn_acked, send_s);
4819 			goto hopeless_peer;
4820 		}
4821 	}
4822 	/*******************************************/
4823 	/* cancel ALL T3-send timer if accum moved */
4824 	/*******************************************/
4825 	if (asoc->sctp_cmt_on_off > 0) {
4826 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4827 			if (net->new_pseudo_cumack)
4828 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4829 				                stcb, net,
4830 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4831 
4832 		}
4833 	} else {
4834 		if (accum_moved) {
4835 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4836 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4837 				                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4838 			}
4839 		}
4840 	}
4841 	/********************************************/
4842 	/* drop the acked chunks from the sentqueue */
4843 	/********************************************/
4844 	asoc->last_acked_seq = cum_ack;
4845 
4846 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4847 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4848 			break;
4849 		}
4850 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4851 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4852 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4853 #ifdef INVARIANTS
4854 			} else {
4855 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4856 #endif
4857 			}
4858 		}
4859 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4860 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4861 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4862 			asoc->trigger_reset = 1;
4863 		}
4864 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4865 		if (PR_SCTP_ENABLED(tp1->flags)) {
4866 			if (asoc->pr_sctp_cnt != 0)
4867 				asoc->pr_sctp_cnt--;
4868 		}
4869 		asoc->sent_queue_cnt--;
4870 		if (tp1->data) {
4871 			/* sa_ignore NO_NULL_CHK */
4872 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4873 			sctp_m_freem(tp1->data);
4874 			tp1->data = NULL;
4875 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4876 				asoc->sent_queue_cnt_removeable--;
4877 			}
4878 		}
4879 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4880 			sctp_log_sack(asoc->last_acked_seq,
4881 			              cum_ack,
4882 			              tp1->rec.data.tsn,
4883 			              0,
4884 			              0,
4885 			              SCTP_LOG_FREE_SENT);
4886 		}
4887 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4888 		wake_him++;
4889 	}
4890 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4891 #ifdef INVARIANTS
4892 		panic("Warning flight size is positive and should be 0");
4893 #else
4894 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4895 		            asoc->total_flight);
4896 #endif
4897 		asoc->total_flight = 0;
4898 	}
4899 
4900 #if defined(__Userspace__)
4901 	if (stcb->sctp_ep->recv_callback) {
4902 		if (stcb->sctp_socket) {
4903 			uint32_t inqueue_bytes, sb_free_now;
4904 			struct sctp_inpcb *inp;
4905 
4906 			inp = stcb->sctp_ep;
4907 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4908 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4909 
4910 			/* check if the amount free in the send socket buffer crossed the threshold */
4911 			if (inp->send_callback &&
4912 			   (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4913 			    (inp->send_sb_threshold == 0))) {
4914 				atomic_add_int(&stcb->asoc.refcnt, 1);
4915 				SCTP_TCB_UNLOCK(stcb);
4916 				inp->send_callback(stcb->sctp_socket, sb_free_now);
4917 				SCTP_TCB_LOCK(stcb);
4918 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4919 			}
4920 		}
4921 	} else if ((wake_him) && (stcb->sctp_socket)) {
4922 #else
4923 	/* sa_ignore NO_NULL_CHK */
4924 	if ((wake_him) && (stcb->sctp_socket)) {
4925 #endif
4926 #if defined(__APPLE__) && !defined(__Userspace__)
4927 		struct socket *so;
4928 
4929 #endif
4930 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4931 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4932 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4933 		}
4934 #if defined(__APPLE__) && !defined(__Userspace__)
4935 		so = SCTP_INP_SO(stcb->sctp_ep);
4936 		atomic_add_int(&stcb->asoc.refcnt, 1);
4937 		SCTP_TCB_UNLOCK(stcb);
4938 		SCTP_SOCKET_LOCK(so, 1);
4939 		SCTP_TCB_LOCK(stcb);
4940 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4941 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4942 			/* assoc was freed while we were unlocked */
4943 			SCTP_SOCKET_UNLOCK(so, 1);
4944 			return;
4945 		}
4946 #endif
4947 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4948 #if defined(__APPLE__) && !defined(__Userspace__)
4949 		SCTP_SOCKET_UNLOCK(so, 1);
4950 #endif
4951 	} else {
4952 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4953 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4954 		}
4955 	}
4956 
4957 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4958 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4959 			/* Setup so we will exit RFC2582 fast recovery */
4960 			will_exit_fast_recovery = 1;
4961 		}
4962 	}
4963 	/*
4964 	 * Check for revoked fragments:
4965 	 *
4966 	 * if Previous sack - Had no frags then we can't have any revoked if
4967 	 * Previous sack - Had frag's then - If we now have frags aka
4968 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4969 	 * some of them. else - The peer revoked all ACKED fragments, since
4970 	 * we had some before and now we have NONE.
4971 	 */
4972 
4973 	if (num_seg) {
4974 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4975 		asoc->saw_sack_with_frags = 1;
4976 	} else if (asoc->saw_sack_with_frags) {
4977 		int cnt_revoked = 0;
4978 
4979 		/* Peer revoked all dg's marked or acked */
4980 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4981 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4982 				tp1->sent = SCTP_DATAGRAM_SENT;
4983 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4984 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4985 					               tp1->whoTo->flight_size,
4986 					               tp1->book_size,
4987 					               (uint32_t)(uintptr_t)tp1->whoTo,
4988 					               tp1->rec.data.tsn);
4989 				}
4990 				sctp_flight_size_increase(tp1);
4991 				sctp_total_flight_increase(stcb, tp1);
4992 				tp1->rec.data.chunk_was_revoked = 1;
4993 				/*
4994 				 * To ensure that this increase in
4995 				 * flightsize, which is artificial,
4996 				 * does not throttle the sender, we
4997 				 * also increase the cwnd
4998 				 * artificially.
4999 				 */
5000 				tp1->whoTo->cwnd += tp1->book_size;
5001 				cnt_revoked++;
5002 			}
5003 		}
5004 		if (cnt_revoked) {
5005 			reneged_all = 1;
5006 		}
5007 		asoc->saw_sack_with_frags = 0;
5008 	}
5009 	if (num_nr_seg > 0)
5010 		asoc->saw_sack_with_nr_frags = 1;
5011 	else
5012 		asoc->saw_sack_with_nr_frags = 0;
5013 
5014 	/* JRS - Use the congestion control given in the CC module */
5015 	if (ecne_seen == 0) {
5016 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5017 			if (net->net_ack2 > 0) {
5018 				/*
5019 				 * Karn's rule applies to clearing error count, this
5020 				 * is optional.
5021 				 */
5022 				net->error_count = 0;
5023 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
5024 					/* addr came good */
5025 					net->dest_state |= SCTP_ADDR_REACHABLE;
5026 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
5027 					                0, (void *)net, SCTP_SO_NOT_LOCKED);
5028 				}
5029 
5030 				if (net == stcb->asoc.primary_destination) {
5031 					if (stcb->asoc.alternate) {
5032 						/* release the alternate, primary is good */
5033 						sctp_free_remote_addr(stcb->asoc.alternate);
5034 						stcb->asoc.alternate = NULL;
5035 					}
5036 				}
5037 
5038 				if (net->dest_state & SCTP_ADDR_PF) {
5039 					net->dest_state &= ~SCTP_ADDR_PF;
5040 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5041 					                stcb->sctp_ep, stcb, net,
5042 					                SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5043 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5044 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5045 					/* Done with this net */
5046 					net->net_ack = 0;
5047 				}
5048 				/* restore any doubled timers */
5049 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5050 				if (net->RTO < stcb->asoc.minrto) {
5051 					net->RTO = stcb->asoc.minrto;
5052 				}
5053 				if (net->RTO > stcb->asoc.maxrto) {
5054 					net->RTO = stcb->asoc.maxrto;
5055 				}
5056 			}
5057 		}
5058 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5059 	}
5060 
5061 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5062 		/* nothing left in-flight */
5063 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5064 			/* stop all timers */
5065 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5066 			                stcb, net,
5067 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
5068 			net->flight_size = 0;
5069 			net->partial_bytes_acked = 0;
5070 		}
5071 		asoc->total_flight = 0;
5072 		asoc->total_flight_count = 0;
5073 	}
5074 
5075 	/**********************************/
5076 	/* Now what about shutdown issues */
5077 	/**********************************/
5078 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5079 		/* nothing left on sendqueue.. consider done */
5080 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5081 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5082 			                  asoc->peers_rwnd, 0, 0, a_rwnd);
5083 		}
5084 		asoc->peers_rwnd = a_rwnd;
5085 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5086 			/* SWS sender side engages */
5087 			asoc->peers_rwnd = 0;
5088 		}
5089 		/* clean up */
5090 		if ((asoc->stream_queue_cnt == 1) &&
5091 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5092 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5093 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
5094 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5095 		}
5096 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5097 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5098 		    (asoc->stream_queue_cnt == 1) &&
5099 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5100 			struct mbuf *op_err;
5101 
5102 			*abort_now = 1;
5103 			/* XXX */
5104 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5105 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5106 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5107 			return;
5108 		}
5109 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5110 		    (asoc->stream_queue_cnt == 0)) {
5111 			struct sctp_nets *netp;
5112 
5113 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5114 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5115 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5116 			}
5117 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5118 			sctp_stop_timers_for_shutdown(stcb);
5119 			if (asoc->alternate) {
5120 				netp = asoc->alternate;
5121 			} else {
5122 				netp = asoc->primary_destination;
5123 			}
5124 			sctp_send_shutdown(stcb, netp);
5125 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5126 					 stcb->sctp_ep, stcb, netp);
5127 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5128 					 stcb->sctp_ep, stcb, NULL);
5129 			return;
5130 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5131 			   (asoc->stream_queue_cnt == 0)) {
5132 			struct sctp_nets *netp;
5133 
5134 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5135 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5136 			sctp_stop_timers_for_shutdown(stcb);
5137 			if (asoc->alternate) {
5138 				netp = asoc->alternate;
5139 			} else {
5140 				netp = asoc->primary_destination;
5141 			}
5142 			sctp_send_shutdown_ack(stcb, netp);
5143 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5144 			                 stcb->sctp_ep, stcb, netp);
5145 			return;
5146 		}
5147 	}
5148 	/*
5149 	 * Now here we are going to recycle net_ack for a different use...
5150 	 * HEADS UP.
5151 	 */
5152 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5153 		net->net_ack = 0;
5154 	}
5155 
5156 	/*
5157 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5158 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5159 	 * automatically ensure that.
5160 	 */
5161 	if ((asoc->sctp_cmt_on_off > 0) &&
5162 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5163 	    (cmt_dac_flag == 0)) {
5164 		this_sack_lowest_newack = cum_ack;
5165 	}
5166 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5167 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5168 		                           biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5169 	}
5170 	/* JRS - Use the congestion control given in the CC module */
5171 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5172 
5173 	/* Now are we exiting loss recovery ? */
5174 	if (will_exit_fast_recovery) {
5175 		/* Ok, we must exit fast recovery */
5176 		asoc->fast_retran_loss_recovery = 0;
5177 	}
5178 	if ((asoc->sat_t3_loss_recovery) &&
5179 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5180 		/* end satellite t3 loss recovery */
5181 		asoc->sat_t3_loss_recovery = 0;
5182 	}
5183 	/*
5184 	 * CMT Fast recovery
5185 	 */
5186 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5187 		if (net->will_exit_fast_recovery) {
5188 			/* Ok, we must exit fast recovery */
5189 			net->fast_retran_loss_recovery = 0;
5190 		}
5191 	}
5192 
5193 	/* Adjust and set the new rwnd value */
5194 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5195 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5196 		                  asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5197 	}
5198 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5199 	                                    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5200 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5201 		/* SWS sender side engages */
5202 		asoc->peers_rwnd = 0;
5203 	}
5204 	if (asoc->peers_rwnd > old_rwnd) {
5205 		win_probe_recovery = 1;
5206 	}
5207 
5208 	/*
5209 	 * Now we must setup so we have a timer up for anyone with
5210 	 * outstanding data.
5211 	 */
5212 	done_once = 0;
5213 again:
5214 	j = 0;
5215 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5216 		if (win_probe_recovery && (net->window_probe)) {
5217 			win_probe_recovered = 1;
5218 			/*-
5219 			 * Find first chunk that was used with
5220 			 * window probe and clear the event. Put
5221 			 * it back into the send queue as if has
5222 			 * not been sent.
5223 			 */
5224 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5225 				if (tp1->window_probe) {
5226 					sctp_window_probe_recovery(stcb, asoc, tp1);
5227 					break;
5228 				}
5229 			}
5230 		}
5231 		if (net->flight_size) {
5232 			j++;
5233 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5234 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5235 				                 stcb->sctp_ep, stcb, net);
5236 			}
5237 			if (net->window_probe) {
5238 				net->window_probe = 0;
5239 			}
5240 		} else {
5241 			if (net->window_probe) {
5242 				/* In window probes we must assure a timer is still running there */
5243 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5244 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5245 					                 stcb->sctp_ep, stcb, net);
5246 
5247 				}
5248 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5249 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5250 				                stcb, net,
5251 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5252 			}
5253 		}
5254 	}
5255 	if ((j == 0) &&
5256 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5257 	    (asoc->sent_queue_retran_cnt == 0) &&
5258 	    (win_probe_recovered == 0) &&
5259 	    (done_once == 0)) {
5260 		/* huh, this should not happen unless all packets
5261 		 * are PR-SCTP and marked to skip of course.
5262 		 */
5263 		if (sctp_fs_audit(asoc)) {
5264 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5265 				net->flight_size = 0;
5266 			}
5267 			asoc->total_flight = 0;
5268 			asoc->total_flight_count = 0;
5269 			asoc->sent_queue_retran_cnt = 0;
5270 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5271 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5272 					sctp_flight_size_increase(tp1);
5273 					sctp_total_flight_increase(stcb, tp1);
5274 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5275 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5276 				}
5277 			}
5278 		}
5279 		done_once = 1;
5280 		goto again;
5281 	}
5282 	/*********************************************/
5283 	/* Here we perform PR-SCTP procedures        */
5284 	/* (section 4.2)                             */
5285 	/*********************************************/
5286 	/* C1. update advancedPeerAckPoint */
5287 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5288 		asoc->advanced_peer_ack_point = cum_ack;
5289 	}
5290 	/* C2. try to further move advancedPeerAckPoint ahead */
5291 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5292 		struct sctp_tmit_chunk *lchk;
5293 		uint32_t old_adv_peer_ack_point;
5294 
5295 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5296 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5297 		/* C3. See if we need to send a Fwd-TSN */
5298 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5299 			/*
5300 			 * ISSUE with ECN, see FWD-TSN processing.
5301 			 */
5302 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5303 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5304 				               0xee, cum_ack, asoc->advanced_peer_ack_point,
5305 				               old_adv_peer_ack_point);
5306 			}
5307 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5308 				send_forward_tsn(stcb, asoc);
5309 			} else if (lchk) {
5310 				/* try to FR fwd-tsn's that get lost too */
5311 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5312 					send_forward_tsn(stcb, asoc);
5313 				}
5314 			}
5315 		}
5316 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5317 			if (lchk->whoTo != NULL) {
5318 				break;
5319 			}
5320 		}
5321 		if (lchk != NULL) {
5322 			/* Assure a timer is up */
5323 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5324 			                 stcb->sctp_ep, stcb, lchk->whoTo);
5325 		}
5326 	}
5327 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5328 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5329 		               a_rwnd,
5330 		               stcb->asoc.peers_rwnd,
5331 		               stcb->asoc.total_flight,
5332 		               stcb->asoc.total_output_queue_size);
5333 	}
5334 }
5335 
5336 void
5337 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5338 {
5339 	/* Copy cum-ack */
5340 	uint32_t cum_ack, a_rwnd;
5341 
5342 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5343 	/* Arrange so a_rwnd does NOT change */
5344 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5345 
5346 	/* Now call the express sack handling */
5347 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5348 }
5349 
5350 static void
5351 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5352 			       struct sctp_stream_in *strmin)
5353 {
5354 	struct sctp_queued_to_read *control, *ncontrol;
5355 	struct sctp_association *asoc;
5356 	uint32_t mid;
5357 	int need_reasm_check = 0;
5358 
5359 	asoc = &stcb->asoc;
5360 	mid = strmin->last_mid_delivered;
5361 	/*
5362 	 * First deliver anything prior to and including the stream no that
5363 	 * came in.
5364 	 */
5365 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5366 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5367 			/* this is deliverable now */
5368 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG)  == SCTP_DATA_NOT_FRAG) {
5369 				if (control->on_strm_q) {
5370 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5371 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5372 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5373 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5374 #ifdef INVARIANTS
5375 					} else {
5376 						panic("strmin: %p ctl: %p unknown %d",
5377 						      strmin, control, control->on_strm_q);
5378 #endif
5379 					}
5380 					control->on_strm_q = 0;
5381 				}
5382 				/* subtract pending on streams */
5383 				if (asoc->size_on_all_streams >= control->length) {
5384 					asoc->size_on_all_streams -= control->length;
5385 				} else {
5386 #ifdef INVARIANTS
5387 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5388 #else
5389 					asoc->size_on_all_streams = 0;
5390 #endif
5391 				}
5392 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5393 				/* deliver it to at least the delivery-q */
5394 				if (stcb->sctp_socket) {
5395 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5396 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5397 							  control,
5398 							  &stcb->sctp_socket->so_rcv,
5399 							  1, SCTP_READ_LOCK_HELD,
5400 							  SCTP_SO_NOT_LOCKED);
5401 				}
5402 			} else {
5403 				/* Its a fragmented message */
5404 				if (control->first_frag_seen) {
5405 					/* Make it so this is next to deliver, we restore later */
5406 					strmin->last_mid_delivered = control->mid - 1;
5407 					need_reasm_check = 1;
5408 					break;
5409 				}
5410 			}
5411 		} else {
5412 			/* no more delivery now. */
5413 			break;
5414 		}
5415 	}
5416 	if (need_reasm_check) {
5417 		int ret;
5418 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5419 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5420 			/* Restore the next to deliver unless we are ahead */
5421 			strmin->last_mid_delivered = mid;
5422 		}
5423 		if (ret == 0) {
5424 			/* Left the front Partial one on */
5425 			return;
5426 		}
5427 		need_reasm_check = 0;
5428 	}
5429 	/*
5430 	 * now we must deliver things in queue the normal way  if any are
5431 	 * now ready.
5432 	 */
5433 	mid = strmin->last_mid_delivered + 1;
5434 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5435 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5436 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5437 				/* this is deliverable now */
5438 				if (control->on_strm_q) {
5439 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5440 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5441 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5442 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5443 #ifdef INVARIANTS
5444 					} else {
5445 						panic("strmin: %p ctl: %p unknown %d",
5446 						      strmin, control, control->on_strm_q);
5447 #endif
5448 					}
5449 					control->on_strm_q = 0;
5450 				}
5451 				/* subtract pending on streams */
5452 				if (asoc->size_on_all_streams >= control->length) {
5453 					asoc->size_on_all_streams -= control->length;
5454 				} else {
5455 #ifdef INVARIANTS
5456 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5457 #else
5458 					asoc->size_on_all_streams = 0;
5459 #endif
5460 				}
5461 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5462 				/* deliver it to at least the delivery-q */
5463 				strmin->last_mid_delivered = control->mid;
5464 				if (stcb->sctp_socket) {
5465 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5466 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5467 							  control,
5468 							  &stcb->sctp_socket->so_rcv, 1,
5469 							  SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5470 
5471 				}
5472 				mid = strmin->last_mid_delivered + 1;
5473 			} else {
5474 				/* Its a fragmented message */
5475 				if (control->first_frag_seen) {
5476 					/* Make it so this is next to deliver */
5477 					strmin->last_mid_delivered = control->mid - 1;
5478 					need_reasm_check = 1;
5479 					break;
5480 				}
5481 			}
5482 		} else {
5483 			break;
5484 		}
5485 	}
5486 	if (need_reasm_check) {
5487 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5488 	}
5489 }
5490 
5491 
5492 
5493 static void
5494 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5495 	struct sctp_association *asoc, struct sctp_stream_in *strm,
5496 	struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5497 {
5498 	struct sctp_tmit_chunk *chk, *nchk;
5499 	int cnt_removed = 0;
5500 
5501 	/*
5502 	 * For now large messages held on the stream reasm that are
5503 	 * complete will be tossed too. We could in theory do more
5504 	 * work to spin through and stop after dumping one msg aka
5505 	 * seeing the start of a new msg at the head, and call the
5506 	 * delivery function... to see if it can be delivered... But
5507 	 * for now we just dump everything on the queue.
5508 	 */
5509 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5510 		return;
5511 	}
5512 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5513 		/* Purge hanging chunks */
5514 		if (!asoc->idata_supported && (ordered == 0)) {
5515 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5516 				break;
5517 			}
5518 		}
5519 		cnt_removed++;
5520 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5521 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5522 			asoc->size_on_reasm_queue -= chk->send_size;
5523 		} else {
5524 #ifdef INVARIANTS
5525 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5526 #else
5527 			asoc->size_on_reasm_queue = 0;
5528 #endif
5529 		}
5530 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5531 		if (chk->data) {
5532 			sctp_m_freem(chk->data);
5533 			chk->data = NULL;
5534 		}
5535 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5536 	}
5537 	if (!TAILQ_EMPTY(&control->reasm)) {
5538 		/* This has to be old data, unordered */
5539 		if (control->data) {
5540 			sctp_m_freem(control->data);
5541 			control->data = NULL;
5542 		}
5543 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5544 		chk = TAILQ_FIRST(&control->reasm);
5545 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5546 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5547 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5548 						chk, SCTP_READ_LOCK_HELD);
5549 		}
5550 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5551 		return;
5552 	}
5553 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5554 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5555 		if (asoc->size_on_all_streams >= control->length) {
5556 			asoc->size_on_all_streams -= control->length;
5557 		} else {
5558 #ifdef INVARIANTS
5559 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5560 #else
5561 			asoc->size_on_all_streams = 0;
5562 #endif
5563 		}
5564 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5565 		control->on_strm_q = 0;
5566 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5567 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5568 		control->on_strm_q = 0;
5569 #ifdef INVARIANTS
5570 	} else if (control->on_strm_q) {
5571 		panic("strm: %p ctl: %p unknown %d",
5572 		    strm, control, control->on_strm_q);
5573 #endif
5574 	}
5575 	control->on_strm_q = 0;
5576 	if (control->on_read_q == 0) {
5577 		sctp_free_remote_addr(control->whoFrom);
5578 		if (control->data) {
5579 			sctp_m_freem(control->data);
5580 			control->data = NULL;
5581 		}
5582 		sctp_free_a_readq(stcb, control);
5583 	}
5584 }
5585 
5586 void
5587 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5588                         struct sctp_forward_tsn_chunk *fwd,
5589                         int *abort_flag, struct mbuf *m , int offset)
5590 {
5591 	/* The pr-sctp fwd tsn */
5592 	/*
5593 	 * here we will perform all the data receiver side steps for
5594 	 * processing FwdTSN, as required in by pr-sctp draft:
5595 	 *
5596 	 * Assume we get FwdTSN(x):
5597 	 *
5598 	 * 1) update local cumTSN to x
5599 	 * 2) try to further advance cumTSN to x + others we have
5600 	 * 3) examine and update re-ordering queue on pr-in-streams
5601 	 * 4) clean up re-assembly queue
5602 	 * 5) Send a sack to report where we are.
5603 	 */
5604 	struct sctp_association *asoc;
5605 	uint32_t new_cum_tsn, gap;
5606 	unsigned int i, fwd_sz, m_size;
5607 	uint32_t str_seq;
5608 	struct sctp_stream_in *strm;
5609 	struct sctp_queued_to_read *control, *sv;
5610 
5611 	asoc = &stcb->asoc;
5612 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5613 		SCTPDBG(SCTP_DEBUG_INDATA1,
5614 			"Bad size too small/big fwd-tsn\n");
5615 		return;
5616 	}
5617 	m_size = (stcb->asoc.mapping_array_size << 3);
5618 	/*************************************************************/
5619 	/* 1. Here we update local cumTSN and shift the bitmap array */
5620 	/*************************************************************/
5621 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5622 
5623 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5624 		/* Already got there ... */
5625 		return;
5626 	}
5627 	/*
5628 	 * now we know the new TSN is more advanced, let's find the actual
5629 	 * gap
5630 	 */
5631 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5632 	asoc->cumulative_tsn = new_cum_tsn;
5633 	if (gap >= m_size) {
5634 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5635 			struct mbuf *op_err;
5636 			char msg[SCTP_DIAG_INFO_LEN];
5637 
5638 			/*
5639 			 * out of range (of single byte chunks in the rwnd I
5640 			 * give out). This must be an attacker.
5641 			 */
5642 			*abort_flag = 1;
5643 			SCTP_SNPRINTF(msg, sizeof(msg),
5644 			              "New cum ack %8.8x too high, highest TSN %8.8x",
5645 			              new_cum_tsn, asoc->highest_tsn_inside_map);
5646 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5647 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5648 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5649 			return;
5650 		}
5651 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5652 
5653 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5654 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5655 		asoc->highest_tsn_inside_map = new_cum_tsn;
5656 
5657 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5658 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5659 
5660 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5661 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5662 		}
5663 	} else {
5664 		SCTP_TCB_LOCK_ASSERT(stcb);
5665 		for (i = 0; i <= gap; i++) {
5666 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5667 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5668 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5669 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5670 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5671 				}
5672 			}
5673 		}
5674 	}
5675 	/*************************************************************/
5676 	/* 2. Clear up re-assembly queue                             */
5677 	/*************************************************************/
5678 
5679 	/* This is now done as part of clearing up the stream/seq */
5680 	if (asoc->idata_supported == 0) {
5681 		uint16_t sid;
5682 
5683 		/* Flush all the un-ordered data based on cum-tsn */
5684 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5685 		for (sid = 0 ; sid < asoc->streamincnt; sid++) {
5686 			strm = &asoc->strmin[sid];
5687 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5688 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5689 			}
5690 		}
5691 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5692 	}
5693 	/*******************************************************/
5694 	/* 3. Update the PR-stream re-ordering queues and fix  */
5695 	/*    delivery issues as needed.                       */
5696 	/*******************************************************/
5697 	fwd_sz -= sizeof(*fwd);
5698 	if (m && fwd_sz) {
5699 		/* New method. */
5700 		unsigned int num_str;
5701 		uint32_t mid;
5702 		uint16_t sid;
5703 		uint16_t ordered, flags;
5704 		struct sctp_strseq *stseq, strseqbuf;
5705 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5706 		offset += sizeof(*fwd);
5707 
5708 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5709 		if (asoc->idata_supported) {
5710 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5711 		} else {
5712 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5713 		}
5714 		for (i = 0; i < num_str; i++) {
5715 			if (asoc->idata_supported) {
5716 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5717 									    sizeof(struct sctp_strseq_mid),
5718 									    (uint8_t *)&strseqbuf_m);
5719 				offset += sizeof(struct sctp_strseq_mid);
5720 				if (stseq_m == NULL) {
5721 					break;
5722 				}
5723 				sid = ntohs(stseq_m->sid);
5724 				mid = ntohl(stseq_m->mid);
5725 				flags = ntohs(stseq_m->flags);
5726 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5727 					ordered = 0;
5728 				} else {
5729 					ordered = 1;
5730 				}
5731 			} else {
5732 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5733 									    sizeof(struct sctp_strseq),
5734 									    (uint8_t *)&strseqbuf);
5735 				offset += sizeof(struct sctp_strseq);
5736 				if (stseq == NULL) {
5737 					break;
5738 				}
5739 				sid = ntohs(stseq->sid);
5740 				mid = (uint32_t)ntohs(stseq->ssn);
5741 				ordered = 1;
5742 			}
5743 			/* Convert */
5744 
5745 			/* now process */
5746 
5747 			/*
5748 			 * Ok we now look for the stream/seq on the read queue
5749 			 * where its not all delivered. If we find it we transmute the
5750 			 * read entry into a PDI_ABORTED.
5751 			 */
5752 			if (sid >= asoc->streamincnt) {
5753 				/* screwed up streams, stop!  */
5754 				break;
5755 			}
5756 			if ((asoc->str_of_pdapi == sid) &&
5757 			    (asoc->ssn_of_pdapi == mid)) {
5758 				/* If this is the one we were partially delivering
5759 				 * now then we no longer are. Note this will change
5760 				 * with the reassembly re-write.
5761 				 */
5762 				asoc->fragmented_delivery_inprogress = 0;
5763 			}
5764 			strm = &asoc->strmin[sid];
5765 			if (ordered) {
5766 				TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5767 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5768 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5769 					}
5770 				}
5771 			} else {
5772 				if (asoc->idata_supported) {
5773 					TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5774 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5775 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5776 						}
5777 					}
5778 				} else {
5779 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5780 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5781 					}
5782 				}
5783 			}
5784 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5785 				if ((control->sinfo_stream == sid) &&
5786 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5787 					str_seq = (sid << 16) | (0x0000ffff & mid);
5788 					control->pdapi_aborted = 1;
5789 					sv = stcb->asoc.control_pdapi;
5790 					control->end_added = 1;
5791 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5792 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5793 						if (asoc->size_on_all_streams >= control->length) {
5794 							asoc->size_on_all_streams -= control->length;
5795 						} else {
5796 #ifdef INVARIANTS
5797 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5798 #else
5799 							asoc->size_on_all_streams = 0;
5800 #endif
5801 						}
5802 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5803 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5804 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5805 #ifdef INVARIANTS
5806 					} else if (control->on_strm_q) {
5807 						panic("strm: %p ctl: %p unknown %d",
5808 						      strm, control, control->on_strm_q);
5809 #endif
5810 					}
5811 					control->on_strm_q = 0;
5812 					stcb->asoc.control_pdapi = control;
5813 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5814 					                stcb,
5815 					                SCTP_PARTIAL_DELIVERY_ABORTED,
5816 					                (void *)&str_seq,
5817 							SCTP_SO_NOT_LOCKED);
5818 					stcb->asoc.control_pdapi = sv;
5819 					break;
5820 				} else if ((control->sinfo_stream == sid) &&
5821 					   SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5822 					/* We are past our victim SSN */
5823 					break;
5824 				}
5825 			}
5826 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5827 				/* Update the sequence number */
5828 				strm->last_mid_delivered = mid;
5829 			}
5830 			/* now kick the stream the new way */
5831 			/*sa_ignore NO_NULL_CHK*/
5832 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5833 		}
5834 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5835 	}
5836 	/*
5837 	 * Now slide thing forward.
5838 	 */
5839 	sctp_slide_mapping_arrays(stcb);
5840 }
5841