1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #ifdef __FreeBSD__
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 361243 2020-05-19 07:23:35Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #ifdef __FreeBSD__
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_timer.h>
53 #include <netinet/sctp_asconf.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_bsd_addr.h>
56 #include <netinet/sctp_input.h>
57 #include <netinet/sctp_crc32.h>
58 #ifdef __FreeBSD__
59 #include <netinet/sctp_lock_bsd.h>
60 #endif
61 /*
62 * NOTES: On the outbound side of things I need to check the sack timer to
63 * see if I should generate a sack into the chunk queue (if I have data to
64 * send that is and will be sending it .. for bundling.
65 *
66 * The callback in sctp_usrreq.c will get called when the socket is read from.
67 * This will cause sctp_service_queues() to get called on the top entry in
68 * the list.
69 */
70 static uint32_t
71 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72 struct sctp_stream_in *strm,
73 struct sctp_tcb *stcb,
74 struct sctp_association *asoc,
75 struct sctp_tmit_chunk *chk, int lock_held);
76
77
78 void
sctp_set_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)79 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80 {
81 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
82 }
83
84 /* Calculate what the rwnd would be */
85 uint32_t
sctp_calc_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)86 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
87 {
88 uint32_t calc = 0;
89
90 /*
91 * This is really set wrong with respect to a 1-2-m socket. Since
92 * the sb_cc is the count that everyone as put up. When we re-write
93 * sctp_soreceive then we will fix this so that ONLY this
94 * associations data is taken into account.
95 */
96 if (stcb->sctp_socket == NULL) {
97 return (calc);
98 }
99
100 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
101 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
102 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
103 ("size_on_all_streams is %u", asoc->size_on_all_streams));
104 if (stcb->asoc.sb_cc == 0 &&
105 asoc->cnt_on_reasm_queue == 0 &&
106 asoc->cnt_on_all_streams == 0) {
107 /* Full rwnd granted */
108 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
109 return (calc);
110 }
111 /* get actual space */
112 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
113 /*
114 * take out what has NOT been put on socket queue and we yet hold
115 * for putting up.
116 */
117 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
118 asoc->cnt_on_reasm_queue * MSIZE));
119 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
120 asoc->cnt_on_all_streams * MSIZE));
121 if (calc == 0) {
122 /* out of space */
123 return (calc);
124 }
125
126 /* what is the overhead of all these rwnd's */
127 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
128 /* If the window gets too small due to ctrl-stuff, reduce it
129 * to 1, even it is 0. SWS engaged
130 */
131 if (calc < stcb->asoc.my_rwnd_control_len) {
132 calc = 1;
133 }
134 return (calc);
135 }
136
137
138
139 /*
140 * Build out our readq entry based on the incoming packet.
141 */
142 struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t tsn,uint32_t ppid,uint32_t context,uint16_t sid,uint32_t mid,uint8_t flags,struct mbuf * dm)143 sctp_build_readq_entry(struct sctp_tcb *stcb,
144 struct sctp_nets *net,
145 uint32_t tsn, uint32_t ppid,
146 uint32_t context, uint16_t sid,
147 uint32_t mid, uint8_t flags,
148 struct mbuf *dm)
149 {
150 struct sctp_queued_to_read *read_queue_e = NULL;
151
152 sctp_alloc_a_readq(stcb, read_queue_e);
153 if (read_queue_e == NULL) {
154 goto failed_build;
155 }
156 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
157 read_queue_e->sinfo_stream = sid;
158 read_queue_e->sinfo_flags = (flags << 8);
159 read_queue_e->sinfo_ppid = ppid;
160 read_queue_e->sinfo_context = context;
161 read_queue_e->sinfo_tsn = tsn;
162 read_queue_e->sinfo_cumtsn = tsn;
163 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
164 read_queue_e->mid = mid;
165 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
166 TAILQ_INIT(&read_queue_e->reasm);
167 read_queue_e->whoFrom = net;
168 atomic_add_int(&net->ref_count, 1);
169 read_queue_e->data = dm;
170 read_queue_e->stcb = stcb;
171 read_queue_e->port_from = stcb->rport;
172 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
173 read_queue_e->do_not_ref_stcb = 1;
174 }
175 failed_build:
176 return (read_queue_e);
177 }
178
179 struct mbuf *
sctp_build_ctl_nchunk(struct sctp_inpcb * inp,struct sctp_sndrcvinfo * sinfo)180 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
181 {
182 struct sctp_extrcvinfo *seinfo;
183 struct sctp_sndrcvinfo *outinfo;
184 struct sctp_rcvinfo *rcvinfo;
185 struct sctp_nxtinfo *nxtinfo;
186 #if defined(__Userspace_os_Windows)
187 WSACMSGHDR *cmh;
188 #else
189 struct cmsghdr *cmh;
190 #endif
191 struct mbuf *ret;
192 int len;
193 int use_extended;
194 int provide_nxt;
195
196 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
197 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
198 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
199 /* user does not want any ancillary data */
200 return (NULL);
201 }
202
203 len = 0;
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
205 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
206 }
207 seinfo = (struct sctp_extrcvinfo *)sinfo;
208 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
209 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
210 provide_nxt = 1;
211 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
212 } else {
213 provide_nxt = 0;
214 }
215 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
216 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
217 use_extended = 1;
218 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
219 } else {
220 use_extended = 0;
221 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
222 }
223 } else {
224 use_extended = 0;
225 }
226
227 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
228 if (ret == NULL) {
229 /* No space */
230 return (ret);
231 }
232 SCTP_BUF_LEN(ret) = 0;
233
234 /* We need a CMSG header followed by the struct */
235 #if defined(__Userspace_os_Windows)
236 cmh = mtod(ret, WSACMSGHDR *);
237 #else
238 cmh = mtod(ret, struct cmsghdr *);
239 #endif
240 /*
241 * Make sure that there is no un-initialized padding between
242 * the cmsg header and cmsg data and after the cmsg data.
243 */
244 memset(cmh, 0, len);
245 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
248 cmh->cmsg_type = SCTP_RCVINFO;
249 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
250 rcvinfo->rcv_sid = sinfo->sinfo_stream;
251 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
252 rcvinfo->rcv_flags = sinfo->sinfo_flags;
253 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
254 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
255 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
256 rcvinfo->rcv_context = sinfo->sinfo_context;
257 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
258 #if defined(__Userspace_os_Windows)
259 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
260 #else
261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
262 #endif
263 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
264 }
265 if (provide_nxt) {
266 cmh->cmsg_level = IPPROTO_SCTP;
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
268 cmh->cmsg_type = SCTP_NXTINFO;
269 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
270 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
271 nxtinfo->nxt_flags = 0;
272 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
273 nxtinfo->nxt_flags |= SCTP_UNORDERED;
274 }
275 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
276 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
277 }
278 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
279 nxtinfo->nxt_flags |= SCTP_COMPLETE;
280 }
281 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
282 nxtinfo->nxt_length = seinfo->serinfo_next_length;
283 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
284 #if defined(__Userspace_os_Windows)
285 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
286 #else
287 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
288 #endif
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 }
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 if (use_extended) {
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 } else {
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
302 *outinfo = *sinfo;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 }
305 }
306 return (ret);
307 }
308
309
310 static void
sctp_mark_non_revokable(struct sctp_association * asoc,uint32_t tsn)311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 uint32_t gap, i, cumackp1;
314 int fnd = 0;
315 int in_r=0, in_nr=0;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 return;
318 }
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
321 /* this tsn is behind the cum ack and thus we don't
322 * need to worry about it being moved from one to the other.
323 */
324 return;
325 }
326 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
327 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
328 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
329 if ((in_r == 0) && (in_nr == 0)) {
330 #ifdef INVARIANTS
331 panic("Things are really messed up now");
332 #else
333 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
334 sctp_print_mapping_array(asoc);
335 #endif
336 }
337 if (in_nr == 0)
338 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
339 if (in_r)
340 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
341 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
342 asoc->highest_tsn_inside_nr_map = tsn;
343 }
344 if (tsn == asoc->highest_tsn_inside_map) {
345 /* We must back down to see what the new highest is */
346 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
347 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
348 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
349 asoc->highest_tsn_inside_map = i;
350 fnd = 1;
351 break;
352 }
353 }
354 if (!fnd) {
355 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
356 }
357 }
358 }
359
360 static int
sctp_place_control_in_stream(struct sctp_stream_in * strm,struct sctp_association * asoc,struct sctp_queued_to_read * control)361 sctp_place_control_in_stream(struct sctp_stream_in *strm,
362 struct sctp_association *asoc,
363 struct sctp_queued_to_read *control)
364 {
365 struct sctp_queued_to_read *at;
366 struct sctp_readhead *q;
367 uint8_t flags, unordered;
368
369 flags = (control->sinfo_flags >> 8);
370 unordered = flags & SCTP_DATA_UNORDERED;
371 if (unordered) {
372 q = &strm->uno_inqueue;
373 if (asoc->idata_supported == 0) {
374 if (!TAILQ_EMPTY(q)) {
375 /* Only one stream can be here in old style -- abort */
376 return (-1);
377 }
378 TAILQ_INSERT_TAIL(q, control, next_instrm);
379 control->on_strm_q = SCTP_ON_UNORDERED;
380 return (0);
381 }
382 } else {
383 q = &strm->inqueue;
384 }
385 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
386 control->end_added = 1;
387 control->first_frag_seen = 1;
388 control->last_frag_seen = 1;
389 }
390 if (TAILQ_EMPTY(q)) {
391 /* Empty queue */
392 TAILQ_INSERT_HEAD(q, control, next_instrm);
393 if (unordered) {
394 control->on_strm_q = SCTP_ON_UNORDERED;
395 } else {
396 control->on_strm_q = SCTP_ON_ORDERED;
397 }
398 return (0);
399 } else {
400 TAILQ_FOREACH(at, q, next_instrm) {
401 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
402 /*
403 * one in queue is bigger than the
404 * new one, insert before this one
405 */
406 TAILQ_INSERT_BEFORE(at, control, next_instrm);
407 if (unordered) {
408 control->on_strm_q = SCTP_ON_UNORDERED;
409 } else {
410 control->on_strm_q = SCTP_ON_ORDERED ;
411 }
412 break;
413 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
414 /*
415 * Gak, He sent me a duplicate msg
416 * id number?? return -1 to abort.
417 */
418 return (-1);
419 } else {
420 if (TAILQ_NEXT(at, next_instrm) == NULL) {
421 /*
422 * We are at the end, insert
423 * it after this one
424 */
425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
426 sctp_log_strm_del(control, at,
427 SCTP_STR_LOG_FROM_INSERT_TL);
428 }
429 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
430 if (unordered) {
431 control->on_strm_q = SCTP_ON_UNORDERED ;
432 } else {
433 control->on_strm_q = SCTP_ON_ORDERED ;
434 }
435 break;
436 }
437 }
438 }
439 }
440 return (0);
441 }
442
443 static void
sctp_abort_in_reasm(struct sctp_tcb * stcb,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag,int opspot)444 sctp_abort_in_reasm(struct sctp_tcb *stcb,
445 struct sctp_queued_to_read *control,
446 struct sctp_tmit_chunk *chk,
447 int *abort_flag, int opspot)
448 {
449 char msg[SCTP_DIAG_INFO_LEN];
450 struct mbuf *oper;
451
452 if (stcb->asoc.idata_supported) {
453 SCTP_SNPRINTF(msg, sizeof(msg),
454 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
455 opspot,
456 control->fsn_included,
457 chk->rec.data.tsn,
458 chk->rec.data.sid,
459 chk->rec.data.fsn, chk->rec.data.mid);
460 } else {
461 SCTP_SNPRINTF(msg, sizeof(msg),
462 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
463 opspot,
464 control->fsn_included,
465 chk->rec.data.tsn,
466 chk->rec.data.sid,
467 chk->rec.data.fsn,
468 (uint16_t)chk->rec.data.mid);
469 }
470 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
471 sctp_m_freem(chk->data);
472 chk->data = NULL;
473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
475 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
476 *abort_flag = 1;
477 }
478
479 static void
sctp_clean_up_control(struct sctp_tcb * stcb,struct sctp_queued_to_read * control)480 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
481 {
482 /*
483 * The control could not be placed and must be cleaned.
484 */
485 struct sctp_tmit_chunk *chk, *nchk;
486 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
487 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
488 if (chk->data)
489 sctp_m_freem(chk->data);
490 chk->data = NULL;
491 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
492 }
493 sctp_free_remote_addr(control->whoFrom);
494 if (control->data) {
495 sctp_m_freem(control->data);
496 control->data = NULL;
497 }
498 sctp_free_a_readq(stcb, control);
499 }
500
501 /*
502 * Queue the chunk either right into the socket buffer if it is the next one
503 * to go OR put it in the correct place in the delivery queue. If we do
504 * append to the so_buf, keep doing so until we are out of order as
505 * long as the control's entered are non-fragmented.
506 */
507 static void
sctp_queue_data_to_stream(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,int * abort_flag,int * need_reasm)508 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
509 struct sctp_association *asoc,
510 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
511 {
512 /*
513 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
514 * all the data in one stream this could happen quite rapidly. One
515 * could use the TSN to keep track of things, but this scheme breaks
516 * down in the other type of stream usage that could occur. Send a
517 * single msg to stream 0, send 4Billion messages to stream 1, now
518 * send a message to stream 0. You have a situation where the TSN
519 * has wrapped but not in the stream. Is this worth worrying about
520 * or should we just change our queue sort at the bottom to be by
521 * TSN.
522 *
523 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
524 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
525 * assignment this could happen... and I don't see how this would be
526 * a violation. So for now I am undecided an will leave the sort by
527 * SSN alone. Maybe a hybred approach is the answer
528 *
529 */
530 struct sctp_queued_to_read *at;
531 int queue_needed;
532 uint32_t nxt_todel;
533 struct mbuf *op_err;
534 struct sctp_stream_in *strm;
535 char msg[SCTP_DIAG_INFO_LEN];
536
537 strm = &asoc->strmin[control->sinfo_stream];
538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
539 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
540 }
541 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
542 /* The incoming sseq is behind where we last delivered? */
543 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
544 strm->last_mid_delivered, control->mid);
545 /*
546 * throw it in the stream so it gets cleaned up in
547 * association destruction
548 */
549 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
550 if (asoc->idata_supported) {
551 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
552 strm->last_mid_delivered, control->sinfo_tsn,
553 control->sinfo_stream, control->mid);
554 } else {
555 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
556 (uint16_t)strm->last_mid_delivered,
557 control->sinfo_tsn,
558 control->sinfo_stream,
559 (uint16_t)control->mid);
560 }
561 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
562 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
563 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
564 *abort_flag = 1;
565 return;
566
567 }
568 queue_needed = 1;
569 asoc->size_on_all_streams += control->length;
570 sctp_ucount_incr(asoc->cnt_on_all_streams);
571 nxt_todel = strm->last_mid_delivered + 1;
572 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
573 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
574 struct socket *so;
575
576 so = SCTP_INP_SO(stcb->sctp_ep);
577 atomic_add_int(&stcb->asoc.refcnt, 1);
578 SCTP_TCB_UNLOCK(stcb);
579 SCTP_SOCKET_LOCK(so, 1);
580 SCTP_TCB_LOCK(stcb);
581 atomic_subtract_int(&stcb->asoc.refcnt, 1);
582 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
583 SCTP_SOCKET_UNLOCK(so, 1);
584 return;
585 }
586 #endif
587 /* can be delivered right away? */
588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
589 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
590 }
591 /* EY it wont be queued if it could be delivered directly */
592 queue_needed = 0;
593 if (asoc->size_on_all_streams >= control->length) {
594 asoc->size_on_all_streams -= control->length;
595 } else {
596 #ifdef INVARIANTS
597 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
598 #else
599 asoc->size_on_all_streams = 0;
600 #endif
601 }
602 sctp_ucount_decr(asoc->cnt_on_all_streams);
603 strm->last_mid_delivered++;
604 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
605 sctp_add_to_readq(stcb->sctp_ep, stcb,
606 control,
607 &stcb->sctp_socket->so_rcv, 1,
608 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
609 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
610 /* all delivered */
611 nxt_todel = strm->last_mid_delivered + 1;
612 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
613 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
614 if (control->on_strm_q == SCTP_ON_ORDERED) {
615 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
616 if (asoc->size_on_all_streams >= control->length) {
617 asoc->size_on_all_streams -= control->length;
618 } else {
619 #ifdef INVARIANTS
620 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
621 #else
622 asoc->size_on_all_streams = 0;
623 #endif
624 }
625 sctp_ucount_decr(asoc->cnt_on_all_streams);
626 #ifdef INVARIANTS
627 } else {
628 panic("Huh control: %p is on_strm_q: %d",
629 control, control->on_strm_q);
630 #endif
631 }
632 control->on_strm_q = 0;
633 strm->last_mid_delivered++;
634 /*
635 * We ignore the return of deliver_data here
636 * since we always can hold the chunk on the
637 * d-queue. And we have a finite number that
638 * can be delivered from the strq.
639 */
640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
641 sctp_log_strm_del(control, NULL,
642 SCTP_STR_LOG_FROM_IMMED_DEL);
643 }
644 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
645 sctp_add_to_readq(stcb->sctp_ep, stcb,
646 control,
647 &stcb->sctp_socket->so_rcv, 1,
648 SCTP_READ_LOCK_NOT_HELD,
649 SCTP_SO_LOCKED);
650 continue;
651 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
652 *need_reasm = 1;
653 }
654 break;
655 }
656 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
657 SCTP_SOCKET_UNLOCK(so, 1);
658 #endif
659 }
660 if (queue_needed) {
661 /*
662 * Ok, we did not deliver this guy, find the correct place
663 * to put it on the queue.
664 */
665 if (sctp_place_control_in_stream(strm, asoc, control)) {
666 SCTP_SNPRINTF(msg, sizeof(msg),
667 "Queue to str MID: %u duplicate", control->mid);
668 sctp_clean_up_control(stcb, control);
669 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
670 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
671 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
672 *abort_flag = 1;
673 }
674 }
675 }
676
677
678 static void
sctp_setup_tail_pointer(struct sctp_queued_to_read * control)679 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
680 {
681 struct mbuf *m, *prev = NULL;
682 struct sctp_tcb *stcb;
683
684 stcb = control->stcb;
685 control->held_length = 0;
686 control->length = 0;
687 m = control->data;
688 while (m) {
689 if (SCTP_BUF_LEN(m) == 0) {
690 /* Skip mbufs with NO length */
691 if (prev == NULL) {
692 /* First one */
693 control->data = sctp_m_free(m);
694 m = control->data;
695 } else {
696 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 m = SCTP_BUF_NEXT(prev);
698 }
699 if (m == NULL) {
700 control->tail_mbuf = prev;
701 }
702 continue;
703 }
704 prev = m;
705 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
706 if (control->on_read_q) {
707 /*
708 * On read queue so we must increment the
709 * SB stuff, we assume caller has done any locks of SB.
710 */
711 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
712 }
713 m = SCTP_BUF_NEXT(m);
714 }
715 if (prev) {
716 control->tail_mbuf = prev;
717 }
718 }
719
720 static void
sctp_add_to_tail_pointer(struct sctp_queued_to_read * control,struct mbuf * m,uint32_t * added)721 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
722 {
723 struct mbuf *prev=NULL;
724 struct sctp_tcb *stcb;
725
726 stcb = control->stcb;
727 if (stcb == NULL) {
728 #ifdef INVARIANTS
729 panic("Control broken");
730 #else
731 return;
732 #endif
733 }
734 if (control->tail_mbuf == NULL) {
735 /* TSNH */
736 sctp_m_freem(control->data);
737 control->data = m;
738 sctp_setup_tail_pointer(control);
739 return;
740 }
741 control->tail_mbuf->m_next = m;
742 while (m) {
743 if (SCTP_BUF_LEN(m) == 0) {
744 /* Skip mbufs with NO length */
745 if (prev == NULL) {
746 /* First one */
747 control->tail_mbuf->m_next = sctp_m_free(m);
748 m = control->tail_mbuf->m_next;
749 } else {
750 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
751 m = SCTP_BUF_NEXT(prev);
752 }
753 if (m == NULL) {
754 control->tail_mbuf = prev;
755 }
756 continue;
757 }
758 prev = m;
759 if (control->on_read_q) {
760 /*
761 * On read queue so we must increment the
762 * SB stuff, we assume caller has done any locks of SB.
763 */
764 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
765 }
766 *added += SCTP_BUF_LEN(m);
767 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
768 m = SCTP_BUF_NEXT(m);
769 }
770 if (prev) {
771 control->tail_mbuf = prev;
772 }
773 }
774
775 static void
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read * nc,struct sctp_queued_to_read * control)776 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
777 {
778 memset(nc, 0, sizeof(struct sctp_queued_to_read));
779 nc->sinfo_stream = control->sinfo_stream;
780 nc->mid = control->mid;
781 TAILQ_INIT(&nc->reasm);
782 nc->top_fsn = control->top_fsn;
783 nc->mid = control->mid;
784 nc->sinfo_flags = control->sinfo_flags;
785 nc->sinfo_ppid = control->sinfo_ppid;
786 nc->sinfo_context = control->sinfo_context;
787 nc->fsn_included = 0xffffffff;
788 nc->sinfo_tsn = control->sinfo_tsn;
789 nc->sinfo_cumtsn = control->sinfo_cumtsn;
790 nc->sinfo_assoc_id = control->sinfo_assoc_id;
791 nc->whoFrom = control->whoFrom;
792 atomic_add_int(&nc->whoFrom->ref_count, 1);
793 nc->stcb = control->stcb;
794 nc->port_from = control->port_from;
795 nc->do_not_ref_stcb = control->do_not_ref_stcb;
796 }
797
798 static void
sctp_reset_a_control(struct sctp_queued_to_read * control,struct sctp_inpcb * inp,uint32_t tsn)799 sctp_reset_a_control(struct sctp_queued_to_read *control,
800 struct sctp_inpcb *inp, uint32_t tsn)
801 {
802 control->fsn_included = tsn;
803 if (control->on_read_q) {
804 /*
805 * We have to purge it from there,
806 * hopefully this will work :-)
807 */
808 TAILQ_REMOVE(&inp->read_queue, control, next);
809 control->on_read_q = 0;
810 }
811 }
812
813 static int
sctp_handle_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,struct sctp_queued_to_read * control,uint32_t pd_point,int inp_read_lock_held)814 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
815 struct sctp_association *asoc,
816 struct sctp_stream_in *strm,
817 struct sctp_queued_to_read *control,
818 uint32_t pd_point,
819 int inp_read_lock_held)
820 {
821 /* Special handling for the old un-ordered data chunk.
822 * All the chunks/TSN's go to mid 0. So
823 * we have to do the old style watching to see
824 * if we have it all. If you return one, no other
825 * control entries on the un-ordered queue will
826 * be looked at. In theory there should be no others
827 * entries in reality, unless the guy is sending both
828 * unordered NDATA and unordered DATA...
829 */
830 struct sctp_tmit_chunk *chk, *lchk, *tchk;
831 uint32_t fsn;
832 struct sctp_queued_to_read *nc;
833 int cnt_added;
834
835 if (control->first_frag_seen == 0) {
836 /* Nothing we can do, we have not seen the first piece yet */
837 return (1);
838 }
839 /* Collapse any we can */
840 cnt_added = 0;
841 restart:
842 fsn = control->fsn_included + 1;
843 /* Now what can we add? */
844 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
845 if (chk->rec.data.fsn == fsn) {
846 /* Ok lets add it */
847 sctp_alloc_a_readq(stcb, nc);
848 if (nc == NULL) {
849 break;
850 }
851 memset(nc, 0, sizeof(struct sctp_queued_to_read));
852 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
853 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
854 fsn++;
855 cnt_added++;
856 chk = NULL;
857 if (control->end_added) {
858 /* We are done */
859 if (!TAILQ_EMPTY(&control->reasm)) {
860 /*
861 * Ok we have to move anything left on
862 * the control queue to a new control.
863 */
864 sctp_build_readq_entry_from_ctl(nc, control);
865 tchk = TAILQ_FIRST(&control->reasm);
866 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
867 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 if (asoc->size_on_reasm_queue >= tchk->send_size) {
869 asoc->size_on_reasm_queue -= tchk->send_size;
870 } else {
871 #ifdef INVARIANTS
872 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
873 #else
874 asoc->size_on_reasm_queue = 0;
875 #endif
876 }
877 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
878 nc->first_frag_seen = 1;
879 nc->fsn_included = tchk->rec.data.fsn;
880 nc->data = tchk->data;
881 nc->sinfo_ppid = tchk->rec.data.ppid;
882 nc->sinfo_tsn = tchk->rec.data.tsn;
883 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
884 tchk->data = NULL;
885 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
886 sctp_setup_tail_pointer(nc);
887 tchk = TAILQ_FIRST(&control->reasm);
888 }
889 /* Spin the rest onto the queue */
890 while (tchk) {
891 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
892 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
893 tchk = TAILQ_FIRST(&control->reasm);
894 }
895 /* Now lets add it to the queue after removing control */
896 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
897 nc->on_strm_q = SCTP_ON_UNORDERED;
898 if (control->on_strm_q) {
899 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
900 control->on_strm_q = 0;
901 }
902 }
903 if (control->pdapi_started) {
904 strm->pd_api_started = 0;
905 control->pdapi_started = 0;
906 }
907 if (control->on_strm_q) {
908 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
909 control->on_strm_q = 0;
910 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
911 }
912 if (control->on_read_q == 0) {
913 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
914 &stcb->sctp_socket->so_rcv, control->end_added,
915 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
916 #if defined(__Userspace__)
917 } else {
918 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
919 #endif
920 }
921 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
922 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
923 /* Switch to the new guy and continue */
924 control = nc;
925 goto restart;
926 } else {
927 if (nc->on_strm_q == 0) {
928 sctp_free_a_readq(stcb, nc);
929 }
930 }
931 return (1);
932 } else {
933 sctp_free_a_readq(stcb, nc);
934 }
935 } else {
936 /* Can't add more */
937 break;
938 }
939 }
940 if (cnt_added && strm->pd_api_started) {
941 #if defined(__Userspace__)
942 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
943 #endif
944 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
945 }
946 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
947 strm->pd_api_started = 1;
948 control->pdapi_started = 1;
949 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
950 &stcb->sctp_socket->so_rcv, control->end_added,
951 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
952 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
953 return (0);
954 } else {
955 return (1);
956 }
957 }
958
959 static void
sctp_inject_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag)960 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
961 struct sctp_association *asoc,
962 struct sctp_queued_to_read *control,
963 struct sctp_tmit_chunk *chk,
964 int *abort_flag)
965 {
966 struct sctp_tmit_chunk *at;
967 int inserted;
968 /*
969 * Here we need to place the chunk into the control structure
970 * sorted in the correct order.
971 */
972 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
973 /* Its the very first one. */
974 SCTPDBG(SCTP_DEBUG_XXX,
975 "chunk is a first fsn: %u becomes fsn_included\n",
976 chk->rec.data.fsn);
977 at = TAILQ_FIRST(&control->reasm);
978 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
979 /*
980 * The first chunk in the reassembly is
981 * a smaller TSN than this one, even though
982 * this has a first, it must be from a subsequent
983 * msg.
984 */
985 goto place_chunk;
986 }
987 if (control->first_frag_seen) {
988 /*
989 * In old un-ordered we can reassembly on
990 * one control multiple messages. As long
991 * as the next FIRST is greater then the old
992 * first (TSN i.e. FSN wise)
993 */
994 struct mbuf *tdata;
995 uint32_t tmp;
996
997 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
998 /* Easy way the start of a new guy beyond the lowest */
999 goto place_chunk;
1000 }
1001 if ((chk->rec.data.fsn == control->fsn_included) ||
1002 (control->pdapi_started)) {
1003 /*
1004 * Ok this should not happen, if it does
1005 * we started the pd-api on the higher TSN (since
1006 * the equals part is a TSN failure it must be that).
1007 *
1008 * We are completly hosed in that case since I have
1009 * no way to recover. This really will only happen
1010 * if we can get more TSN's higher before the pd-api-point.
1011 */
1012 sctp_abort_in_reasm(stcb, control, chk,
1013 abort_flag,
1014 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1015
1016 return;
1017 }
1018 /*
1019 * Ok we have two firsts and the one we just got
1020 * is smaller than the one we previously placed.. yuck!
1021 * We must swap them out.
1022 */
1023 /* swap the mbufs */
1024 tdata = control->data;
1025 control->data = chk->data;
1026 chk->data = tdata;
1027 /* Save the lengths */
1028 chk->send_size = control->length;
1029 /* Recompute length of control and tail pointer */
1030 sctp_setup_tail_pointer(control);
1031 /* Fix the FSN included */
1032 tmp = control->fsn_included;
1033 control->fsn_included = chk->rec.data.fsn;
1034 chk->rec.data.fsn = tmp;
1035 /* Fix the TSN included */
1036 tmp = control->sinfo_tsn;
1037 control->sinfo_tsn = chk->rec.data.tsn;
1038 chk->rec.data.tsn = tmp;
1039 /* Fix the PPID included */
1040 tmp = control->sinfo_ppid;
1041 control->sinfo_ppid = chk->rec.data.ppid;
1042 chk->rec.data.ppid = tmp;
1043 /* Fix tail pointer */
1044 goto place_chunk;
1045 }
1046 control->first_frag_seen = 1;
1047 control->fsn_included = chk->rec.data.fsn;
1048 control->top_fsn = chk->rec.data.fsn;
1049 control->sinfo_tsn = chk->rec.data.tsn;
1050 control->sinfo_ppid = chk->rec.data.ppid;
1051 control->data = chk->data;
1052 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1053 chk->data = NULL;
1054 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1055 sctp_setup_tail_pointer(control);
1056 return;
1057 }
1058 place_chunk:
1059 inserted = 0;
1060 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1061 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1062 /*
1063 * This one in queue is bigger than the new one, insert
1064 * the new one before at.
1065 */
1066 asoc->size_on_reasm_queue += chk->send_size;
1067 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1068 inserted = 1;
1069 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1070 break;
1071 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1072 /*
1073 * They sent a duplicate fsn number. This
1074 * really should not happen since the FSN is
1075 * a TSN and it should have been dropped earlier.
1076 */
1077 sctp_abort_in_reasm(stcb, control, chk,
1078 abort_flag,
1079 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1080 return;
1081 }
1082
1083 }
1084 if (inserted == 0) {
1085 /* Its at the end */
1086 asoc->size_on_reasm_queue += chk->send_size;
1087 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1088 control->top_fsn = chk->rec.data.fsn;
1089 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1090 }
1091 }
1092
1093 static int
sctp_deliver_reasm_check(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,int inp_read_lock_held)1094 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1095 struct sctp_stream_in *strm, int inp_read_lock_held)
1096 {
1097 /*
1098 * Given a stream, strm, see if any of
1099 * the SSN's on it that are fragmented
1100 * are ready to deliver. If so go ahead
1101 * and place them on the read queue. In
1102 * so placing if we have hit the end, then
1103 * we need to remove them from the stream's queue.
1104 */
1105 struct sctp_queued_to_read *control, *nctl = NULL;
1106 uint32_t next_to_del;
1107 uint32_t pd_point;
1108 int ret = 0;
1109
1110 if (stcb->sctp_socket) {
1111 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1112 stcb->sctp_ep->partial_delivery_point);
1113 } else {
1114 pd_point = stcb->sctp_ep->partial_delivery_point;
1115 }
1116 control = TAILQ_FIRST(&strm->uno_inqueue);
1117
1118 if ((control != NULL) &&
1119 (asoc->idata_supported == 0)) {
1120 /* Special handling needed for "old" data format */
1121 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1122 goto done_un;
1123 }
1124 }
1125 if (strm->pd_api_started) {
1126 /* Can't add more */
1127 return (0);
1128 }
1129 while (control) {
1130 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1131 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1132 nctl = TAILQ_NEXT(control, next_instrm);
1133 if (control->end_added) {
1134 /* We just put the last bit on */
1135 if (control->on_strm_q) {
1136 #ifdef INVARIANTS
1137 if (control->on_strm_q != SCTP_ON_UNORDERED ) {
1138 panic("Huh control: %p on_q: %d -- not unordered?",
1139 control, control->on_strm_q);
1140 }
1141 #endif
1142 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1143 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1144 control->on_strm_q = 0;
1145 }
1146 if (control->on_read_q == 0) {
1147 sctp_add_to_readq(stcb->sctp_ep, stcb,
1148 control,
1149 &stcb->sctp_socket->so_rcv, control->end_added,
1150 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1151 }
1152 } else {
1153 /* Can we do a PD-API for this un-ordered guy? */
1154 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1155 strm->pd_api_started = 1;
1156 control->pdapi_started = 1;
1157 sctp_add_to_readq(stcb->sctp_ep, stcb,
1158 control,
1159 &stcb->sctp_socket->so_rcv, control->end_added,
1160 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1161
1162 break;
1163 }
1164 }
1165 control = nctl;
1166 }
1167 done_un:
1168 control = TAILQ_FIRST(&strm->inqueue);
1169 if (strm->pd_api_started) {
1170 /* Can't add more */
1171 return (0);
1172 }
1173 if (control == NULL) {
1174 return (ret);
1175 }
1176 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1177 /* Ok the guy at the top was being partially delivered
1178 * completed, so we remove it. Note
1179 * the pd_api flag was taken off when the
1180 * chunk was merged on in sctp_queue_data_for_reasm below.
1181 */
1182 nctl = TAILQ_NEXT(control, next_instrm);
1183 SCTPDBG(SCTP_DEBUG_XXX,
1184 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1185 control, control->end_added, control->mid,
1186 control->top_fsn, control->fsn_included,
1187 strm->last_mid_delivered);
1188 if (control->end_added) {
1189 if (control->on_strm_q) {
1190 #ifdef INVARIANTS
1191 if (control->on_strm_q != SCTP_ON_ORDERED ) {
1192 panic("Huh control: %p on_q: %d -- not ordered?",
1193 control, control->on_strm_q);
1194 }
1195 #endif
1196 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1197 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1198 if (asoc->size_on_all_streams >= control->length) {
1199 asoc->size_on_all_streams -= control->length;
1200 } else {
1201 #ifdef INVARIANTS
1202 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1203 #else
1204 asoc->size_on_all_streams = 0;
1205 #endif
1206 }
1207 sctp_ucount_decr(asoc->cnt_on_all_streams);
1208 control->on_strm_q = 0;
1209 }
1210 if (strm->pd_api_started && control->pdapi_started) {
1211 control->pdapi_started = 0;
1212 strm->pd_api_started = 0;
1213 }
1214 if (control->on_read_q == 0) {
1215 sctp_add_to_readq(stcb->sctp_ep, stcb,
1216 control,
1217 &stcb->sctp_socket->so_rcv, control->end_added,
1218 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1219 }
1220 control = nctl;
1221 }
1222 }
1223 if (strm->pd_api_started) {
1224 /* Can't add more must have gotten an un-ordered above being partially delivered. */
1225 return (0);
1226 }
1227 deliver_more:
1228 next_to_del = strm->last_mid_delivered + 1;
1229 if (control) {
1230 SCTPDBG(SCTP_DEBUG_XXX,
1231 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1232 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1233 next_to_del);
1234 nctl = TAILQ_NEXT(control, next_instrm);
1235 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1236 (control->first_frag_seen)) {
1237 int done;
1238
1239 /* Ok we can deliver it onto the stream. */
1240 if (control->end_added) {
1241 /* We are done with it afterwards */
1242 if (control->on_strm_q) {
1243 #ifdef INVARIANTS
1244 if (control->on_strm_q != SCTP_ON_ORDERED ) {
1245 panic("Huh control: %p on_q: %d -- not ordered?",
1246 control, control->on_strm_q);
1247 }
1248 #endif
1249 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1250 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1251 if (asoc->size_on_all_streams >= control->length) {
1252 asoc->size_on_all_streams -= control->length;
1253 } else {
1254 #ifdef INVARIANTS
1255 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1256 #else
1257 asoc->size_on_all_streams = 0;
1258 #endif
1259 }
1260 sctp_ucount_decr(asoc->cnt_on_all_streams);
1261 control->on_strm_q = 0;
1262 }
1263 ret++;
1264 }
1265 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1266 /* A singleton now slipping through - mark it non-revokable too */
1267 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1268 } else if (control->end_added == 0) {
1269 /* Check if we can defer adding until its all there */
1270 if ((control->length < pd_point) || (strm->pd_api_started)) {
1271 /* Don't need it or cannot add more (one being delivered that way) */
1272 goto out;
1273 }
1274 }
1275 done = (control->end_added) && (control->last_frag_seen);
1276 if (control->on_read_q == 0) {
1277 if (!done) {
1278 if (asoc->size_on_all_streams >= control->length) {
1279 asoc->size_on_all_streams -= control->length;
1280 } else {
1281 #ifdef INVARIANTS
1282 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1283 #else
1284 asoc->size_on_all_streams = 0;
1285 #endif
1286 }
1287 strm->pd_api_started = 1;
1288 control->pdapi_started = 1;
1289 }
1290 sctp_add_to_readq(stcb->sctp_ep, stcb,
1291 control,
1292 &stcb->sctp_socket->so_rcv, control->end_added,
1293 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1294 }
1295 strm->last_mid_delivered = next_to_del;
1296 if (done) {
1297 control = nctl;
1298 goto deliver_more;
1299 }
1300 }
1301 }
1302 out:
1303 return (ret);
1304 }
1305
1306
1307 uint32_t
sctp_add_chk_to_control(struct sctp_queued_to_read * control,struct sctp_stream_in * strm,struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int hold_rlock)1308 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1309 struct sctp_stream_in *strm,
1310 struct sctp_tcb *stcb, struct sctp_association *asoc,
1311 struct sctp_tmit_chunk *chk, int hold_rlock)
1312 {
1313 /*
1314 * Given a control and a chunk, merge the
1315 * data from the chk onto the control and free
1316 * up the chunk resources.
1317 */
1318 uint32_t added=0;
1319 int i_locked = 0;
1320
1321 if (control->on_read_q && (hold_rlock == 0)) {
1322 /*
1323 * Its being pd-api'd so we must
1324 * do some locks.
1325 */
1326 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1327 i_locked = 1;
1328 }
1329 if (control->data == NULL) {
1330 control->data = chk->data;
1331 sctp_setup_tail_pointer(control);
1332 } else {
1333 sctp_add_to_tail_pointer(control, chk->data, &added);
1334 }
1335 control->fsn_included = chk->rec.data.fsn;
1336 asoc->size_on_reasm_queue -= chk->send_size;
1337 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1338 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1339 chk->data = NULL;
1340 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1341 control->first_frag_seen = 1;
1342 control->sinfo_tsn = chk->rec.data.tsn;
1343 control->sinfo_ppid = chk->rec.data.ppid;
1344 }
1345 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1346 /* Its complete */
1347 if ((control->on_strm_q) && (control->on_read_q)) {
1348 if (control->pdapi_started) {
1349 control->pdapi_started = 0;
1350 strm->pd_api_started = 0;
1351 }
1352 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1353 /* Unordered */
1354 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1355 control->on_strm_q = 0;
1356 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1357 /* Ordered */
1358 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1359 /*
1360 * Don't need to decrement size_on_all_streams,
1361 * since control is on the read queue.
1362 */
1363 sctp_ucount_decr(asoc->cnt_on_all_streams);
1364 control->on_strm_q = 0;
1365 #ifdef INVARIANTS
1366 } else if (control->on_strm_q) {
1367 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1368 control->on_strm_q);
1369 #endif
1370 }
1371 }
1372 control->end_added = 1;
1373 control->last_frag_seen = 1;
1374 }
1375 if (i_locked) {
1376 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1377 }
1378 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1379 return (added);
1380 }
1381
1382 /*
1383 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1384 * queue, see if anthing can be delivered. If so pull it off (or as much as
1385 * we can. If we run out of space then we must dump what we can and set the
1386 * appropriate flag to say we queued what we could.
1387 */
1388 static void
sctp_queue_data_for_reasm(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int created_control,int * abort_flag,uint32_t tsn)1389 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1390 struct sctp_queued_to_read *control,
1391 struct sctp_tmit_chunk *chk,
1392 int created_control,
1393 int *abort_flag, uint32_t tsn)
1394 {
1395 uint32_t next_fsn;
1396 struct sctp_tmit_chunk *at, *nat;
1397 struct sctp_stream_in *strm;
1398 int do_wakeup, unordered;
1399 uint32_t lenadded;
1400
1401 strm = &asoc->strmin[control->sinfo_stream];
1402 /*
1403 * For old un-ordered data chunks.
1404 */
1405 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1406 unordered = 1;
1407 } else {
1408 unordered = 0;
1409 }
1410 /* Must be added to the stream-in queue */
1411 if (created_control) {
1412 if (unordered == 0) {
1413 sctp_ucount_incr(asoc->cnt_on_all_streams);
1414 }
1415 if (sctp_place_control_in_stream(strm, asoc, control)) {
1416 /* Duplicate SSN? */
1417 sctp_abort_in_reasm(stcb, control, chk,
1418 abort_flag,
1419 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1420 sctp_clean_up_control(stcb, control);
1421 return;
1422 }
1423 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1424 /* Ok we created this control and now
1425 * lets validate that its legal i.e. there
1426 * is a B bit set, if not and we have
1427 * up to the cum-ack then its invalid.
1428 */
1429 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1430 sctp_abort_in_reasm(stcb, control, chk,
1431 abort_flag,
1432 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1433 return;
1434 }
1435 }
1436 }
1437 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1438 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1439 return;
1440 }
1441 /*
1442 * Ok we must queue the chunk into the reasembly portion:
1443 * o if its the first it goes to the control mbuf.
1444 * o if its not first but the next in sequence it goes to the control,
1445 * and each succeeding one in order also goes.
1446 * o if its not in order we place it on the list in its place.
1447 */
1448 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1449 /* Its the very first one. */
1450 SCTPDBG(SCTP_DEBUG_XXX,
1451 "chunk is a first fsn: %u becomes fsn_included\n",
1452 chk->rec.data.fsn);
1453 if (control->first_frag_seen) {
1454 /*
1455 * Error on senders part, they either
1456 * sent us two data chunks with FIRST,
1457 * or they sent two un-ordered chunks that
1458 * were fragmented at the same time in the same stream.
1459 */
1460 sctp_abort_in_reasm(stcb, control, chk,
1461 abort_flag,
1462 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1463 return;
1464 }
1465 control->first_frag_seen = 1;
1466 control->sinfo_ppid = chk->rec.data.ppid;
1467 control->sinfo_tsn = chk->rec.data.tsn;
1468 control->fsn_included = chk->rec.data.fsn;
1469 control->data = chk->data;
1470 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1471 chk->data = NULL;
1472 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1473 sctp_setup_tail_pointer(control);
1474 asoc->size_on_all_streams += control->length;
1475 } else {
1476 /* Place the chunk in our list */
1477 int inserted=0;
1478 if (control->last_frag_seen == 0) {
1479 /* Still willing to raise highest FSN seen */
1480 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1481 SCTPDBG(SCTP_DEBUG_XXX,
1482 "We have a new top_fsn: %u\n",
1483 chk->rec.data.fsn);
1484 control->top_fsn = chk->rec.data.fsn;
1485 }
1486 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 SCTPDBG(SCTP_DEBUG_XXX,
1488 "The last fsn is now in place fsn: %u\n",
1489 chk->rec.data.fsn);
1490 control->last_frag_seen = 1;
1491 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1492 SCTPDBG(SCTP_DEBUG_XXX,
1493 "New fsn: %u is not at top_fsn: %u -- abort\n",
1494 chk->rec.data.fsn,
1495 control->top_fsn);
1496 sctp_abort_in_reasm(stcb, control, chk,
1497 abort_flag,
1498 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1499 return;
1500 }
1501 }
1502 if (asoc->idata_supported || control->first_frag_seen) {
1503 /*
1504 * For IDATA we always check since we know that
1505 * the first fragment is 0. For old DATA we have
1506 * to receive the first before we know the first FSN
1507 * (which is the TSN).
1508 */
1509 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1510 /* We have already delivered up to this so its a dup */
1511 sctp_abort_in_reasm(stcb, control, chk,
1512 abort_flag,
1513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1514 return;
1515 }
1516 }
1517 } else {
1518 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1519 /* Second last? huh? */
1520 SCTPDBG(SCTP_DEBUG_XXX,
1521 "Duplicate last fsn: %u (top: %u) -- abort\n",
1522 chk->rec.data.fsn, control->top_fsn);
1523 sctp_abort_in_reasm(stcb, control,
1524 chk, abort_flag,
1525 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1526 return;
1527 }
1528 if (asoc->idata_supported || control->first_frag_seen) {
1529 /*
1530 * For IDATA we always check since we know that
1531 * the first fragment is 0. For old DATA we have
1532 * to receive the first before we know the first FSN
1533 * (which is the TSN).
1534 */
1535
1536 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1537 /* We have already delivered up to this so its a dup */
1538 SCTPDBG(SCTP_DEBUG_XXX,
1539 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1540 chk->rec.data.fsn, control->fsn_included);
1541 sctp_abort_in_reasm(stcb, control, chk,
1542 abort_flag,
1543 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1544 return;
1545 }
1546 }
1547 /* validate not beyond top FSN if we have seen last one */
1548 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1549 SCTPDBG(SCTP_DEBUG_XXX,
1550 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1551 chk->rec.data.fsn,
1552 control->top_fsn);
1553 sctp_abort_in_reasm(stcb, control, chk,
1554 abort_flag,
1555 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1556 return;
1557 }
1558 }
1559 /*
1560 * If we reach here, we need to place the
1561 * new chunk in the reassembly for this
1562 * control.
1563 */
1564 SCTPDBG(SCTP_DEBUG_XXX,
1565 "chunk is a not first fsn: %u needs to be inserted\n",
1566 chk->rec.data.fsn);
1567 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1568 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1569 /*
1570 * This one in queue is bigger than the new one, insert
1571 * the new one before at.
1572 */
1573 SCTPDBG(SCTP_DEBUG_XXX,
1574 "Insert it before fsn: %u\n",
1575 at->rec.data.fsn);
1576 asoc->size_on_reasm_queue += chk->send_size;
1577 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1578 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1579 inserted = 1;
1580 break;
1581 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1582 /* Gak, He sent me a duplicate str seq number */
1583 /*
1584 * foo bar, I guess I will just free this new guy,
1585 * should we abort too? FIX ME MAYBE? Or it COULD be
1586 * that the SSN's have wrapped. Maybe I should
1587 * compare to TSN somehow... sigh for now just blow
1588 * away the chunk!
1589 */
1590 SCTPDBG(SCTP_DEBUG_XXX,
1591 "Duplicate to fsn: %u -- abort\n",
1592 at->rec.data.fsn);
1593 sctp_abort_in_reasm(stcb, control,
1594 chk, abort_flag,
1595 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1596 return;
1597 }
1598 }
1599 if (inserted == 0) {
1600 /* Goes on the end */
1601 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1602 chk->rec.data.fsn);
1603 asoc->size_on_reasm_queue += chk->send_size;
1604 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1605 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1606 }
1607 }
1608 /*
1609 * Ok lets see if we can suck any up into the control
1610 * structure that are in seq if it makes sense.
1611 */
1612 do_wakeup = 0;
1613 /*
1614 * If the first fragment has not been
1615 * seen there is no sense in looking.
1616 */
1617 if (control->first_frag_seen) {
1618 next_fsn = control->fsn_included + 1;
1619 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1620 if (at->rec.data.fsn == next_fsn) {
1621 /* We can add this one now to the control */
1622 SCTPDBG(SCTP_DEBUG_XXX,
1623 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1624 control, at,
1625 at->rec.data.fsn,
1626 next_fsn, control->fsn_included);
1627 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1628 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1629 if (control->on_read_q) {
1630 do_wakeup = 1;
1631 } else {
1632 /*
1633 * We only add to the size-on-all-streams
1634 * if its not on the read q. The read q
1635 * flag will cause a sballoc so its accounted
1636 * for there.
1637 */
1638 asoc->size_on_all_streams += lenadded;
1639 }
1640 next_fsn++;
1641 if (control->end_added && control->pdapi_started) {
1642 if (strm->pd_api_started) {
1643 strm->pd_api_started = 0;
1644 control->pdapi_started = 0;
1645 }
1646 if (control->on_read_q == 0) {
1647 sctp_add_to_readq(stcb->sctp_ep, stcb,
1648 control,
1649 &stcb->sctp_socket->so_rcv, control->end_added,
1650 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1651 }
1652 break;
1653 }
1654 } else {
1655 break;
1656 }
1657 }
1658 }
1659 if (do_wakeup) {
1660 #if defined(__Userspace__)
1661 sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1662 #endif
1663 /* Need to wakeup the reader */
1664 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1665 }
1666 }
1667
1668 static struct sctp_queued_to_read *
sctp_find_reasm_entry(struct sctp_stream_in * strm,uint32_t mid,int ordered,int idata_supported)1669 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1670 {
1671 struct sctp_queued_to_read *control;
1672
1673 if (ordered) {
1674 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1675 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1676 break;
1677 }
1678 }
1679 } else {
1680 if (idata_supported) {
1681 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1682 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1683 break;
1684 }
1685 }
1686 } else {
1687 control = TAILQ_FIRST(&strm->uno_inqueue);
1688 }
1689 }
1690 return (control);
1691 }
1692
1693 static int
sctp_process_a_data_chunk(struct sctp_tcb * stcb,struct sctp_association * asoc,struct mbuf ** m,int offset,int chk_length,struct sctp_nets * net,uint32_t * high_tsn,int * abort_flag,int * break_flag,int last_chunk,uint8_t chk_type)1694 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1695 struct mbuf **m, int offset, int chk_length,
1696 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1697 int *break_flag, int last_chunk, uint8_t chk_type)
1698 {
1699 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1700 uint32_t tsn, fsn, gap, mid;
1701 struct mbuf *dmbuf;
1702 int the_len;
1703 int need_reasm_check = 0;
1704 uint16_t sid;
1705 struct mbuf *op_err;
1706 char msg[SCTP_DIAG_INFO_LEN];
1707 struct sctp_queued_to_read *control, *ncontrol;
1708 uint32_t ppid;
1709 uint8_t chk_flags;
1710 struct sctp_stream_reset_list *liste;
1711 int ordered;
1712 size_t clen;
1713 int created_control = 0;
1714
1715 if (chk_type == SCTP_IDATA) {
1716 struct sctp_idata_chunk *chunk, chunk_buf;
1717
1718 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1719 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1720 chk_flags = chunk->ch.chunk_flags;
1721 clen = sizeof(struct sctp_idata_chunk);
1722 tsn = ntohl(chunk->dp.tsn);
1723 sid = ntohs(chunk->dp.sid);
1724 mid = ntohl(chunk->dp.mid);
1725 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1726 fsn = 0;
1727 ppid = chunk->dp.ppid_fsn.ppid;
1728 } else {
1729 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1730 ppid = 0xffffffff; /* Use as an invalid value. */
1731 }
1732 } else {
1733 struct sctp_data_chunk *chunk, chunk_buf;
1734
1735 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1736 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1737 chk_flags = chunk->ch.chunk_flags;
1738 clen = sizeof(struct sctp_data_chunk);
1739 tsn = ntohl(chunk->dp.tsn);
1740 sid = ntohs(chunk->dp.sid);
1741 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1742 fsn = tsn;
1743 ppid = chunk->dp.ppid;
1744 }
1745 if ((size_t)chk_length == clen) {
1746 /*
1747 * Need to send an abort since we had a
1748 * empty data chunk.
1749 */
1750 op_err = sctp_generate_no_user_data_cause(tsn);
1751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1752 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1753 *abort_flag = 1;
1754 return (0);
1755 }
1756 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1757 asoc->send_sack = 1;
1758 }
1759 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1761 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1762 }
1763 if (stcb == NULL) {
1764 return (0);
1765 }
1766 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1767 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1768 /* It is a duplicate */
1769 SCTP_STAT_INCR(sctps_recvdupdata);
1770 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1771 /* Record a dup for the next outbound sack */
1772 asoc->dup_tsns[asoc->numduptsns] = tsn;
1773 asoc->numduptsns++;
1774 }
1775 asoc->send_sack = 1;
1776 return (0);
1777 }
1778 /* Calculate the number of TSN's between the base and this TSN */
1779 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1780 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1781 /* Can't hold the bit in the mapping at max array, toss it */
1782 return (0);
1783 }
1784 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1785 SCTP_TCB_LOCK_ASSERT(stcb);
1786 if (sctp_expand_mapping_array(asoc, gap)) {
1787 /* Can't expand, drop it */
1788 return (0);
1789 }
1790 }
1791 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1792 *high_tsn = tsn;
1793 }
1794 /* See if we have received this one already */
1795 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1796 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1797 SCTP_STAT_INCR(sctps_recvdupdata);
1798 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1799 /* Record a dup for the next outbound sack */
1800 asoc->dup_tsns[asoc->numduptsns] = tsn;
1801 asoc->numduptsns++;
1802 }
1803 asoc->send_sack = 1;
1804 return (0);
1805 }
1806 /*
1807 * Check to see about the GONE flag, duplicates would cause a sack
1808 * to be sent up above
1809 */
1810 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1811 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1812 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1813 /*
1814 * wait a minute, this guy is gone, there is no longer a
1815 * receiver. Send peer an ABORT!
1816 */
1817 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1818 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1819 *abort_flag = 1;
1820 return (0);
1821 }
1822 /*
1823 * Now before going further we see if there is room. If NOT then we
1824 * MAY let one through only IF this TSN is the one we are waiting
1825 * for on a partial delivery API.
1826 */
1827
1828 /* Is the stream valid? */
1829 if (sid >= asoc->streamincnt) {
1830 struct sctp_error_invalid_stream *cause;
1831
1832 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1833 0, M_NOWAIT, 1, MT_DATA);
1834 if (op_err != NULL) {
1835 /* add some space up front so prepend will work well */
1836 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1837 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1838 /*
1839 * Error causes are just param's and this one has
1840 * two back to back phdr, one with the error type
1841 * and size, the other with the streamid and a rsvd
1842 */
1843 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1844 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1845 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1846 cause->stream_id = htons(sid);
1847 cause->reserved = htons(0);
1848 sctp_queue_op_err(stcb, op_err);
1849 }
1850 SCTP_STAT_INCR(sctps_badsid);
1851 SCTP_TCB_LOCK_ASSERT(stcb);
1852 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1853 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1854 asoc->highest_tsn_inside_nr_map = tsn;
1855 }
1856 if (tsn == (asoc->cumulative_tsn + 1)) {
1857 /* Update cum-ack */
1858 asoc->cumulative_tsn = tsn;
1859 }
1860 return (0);
1861 }
1862 /*
1863 * If its a fragmented message, lets see if we can
1864 * find the control on the reassembly queues.
1865 */
1866 if ((chk_type == SCTP_IDATA) &&
1867 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1868 (fsn == 0)) {
1869 /*
1870 * The first *must* be fsn 0, and other
1871 * (middle/end) pieces can *not* be fsn 0.
1872 * XXX: This can happen in case of a wrap around.
1873 * Ignore is for now.
1874 */
1875 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1876 goto err_out;
1877 }
1878 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1879 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1880 chk_flags, control);
1881 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1882 /* See if we can find the re-assembly entity */
1883 if (control != NULL) {
1884 /* We found something, does it belong? */
1885 if (ordered && (mid != control->mid)) {
1886 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1887 err_out:
1888 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1889 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1890 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1891 *abort_flag = 1;
1892 return (0);
1893 }
1894 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1895 /* We can't have a switched order with an unordered chunk */
1896 SCTP_SNPRINTF(msg, sizeof(msg),
1897 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1898 tsn);
1899 goto err_out;
1900 }
1901 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1902 /* We can't have a switched unordered with a ordered chunk */
1903 SCTP_SNPRINTF(msg, sizeof(msg),
1904 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1905 tsn);
1906 goto err_out;
1907 }
1908 }
1909 } else {
1910 /* Its a complete segment. Lets validate we
1911 * don't have a re-assembly going on with
1912 * the same Stream/Seq (for ordered) or in
1913 * the same Stream for unordered.
1914 */
1915 if (control != NULL) {
1916 if (ordered || asoc->idata_supported) {
1917 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1918 chk_flags, mid);
1919 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1920 goto err_out;
1921 } else {
1922 if ((tsn == control->fsn_included + 1) &&
1923 (control->end_added == 0)) {
1924 SCTP_SNPRINTF(msg, sizeof(msg),
1925 "Illegal message sequence, missing end for MID: %8.8x",
1926 control->fsn_included);
1927 goto err_out;
1928 } else {
1929 control = NULL;
1930 }
1931 }
1932 }
1933 }
1934 /* now do the tests */
1935 if (((asoc->cnt_on_all_streams +
1936 asoc->cnt_on_reasm_queue +
1937 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1938 (((int)asoc->my_rwnd) <= 0)) {
1939 /*
1940 * When we have NO room in the rwnd we check to make sure
1941 * the reader is doing its job...
1942 */
1943 if (stcb->sctp_socket->so_rcv.sb_cc) {
1944 /* some to read, wake-up */
1945 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1946 struct socket *so;
1947
1948 so = SCTP_INP_SO(stcb->sctp_ep);
1949 atomic_add_int(&stcb->asoc.refcnt, 1);
1950 SCTP_TCB_UNLOCK(stcb);
1951 SCTP_SOCKET_LOCK(so, 1);
1952 SCTP_TCB_LOCK(stcb);
1953 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1954 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1955 /* assoc was freed while we were unlocked */
1956 SCTP_SOCKET_UNLOCK(so, 1);
1957 return (0);
1958 }
1959 #endif
1960 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1961 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1962 SCTP_SOCKET_UNLOCK(so, 1);
1963 #endif
1964 }
1965 /* now is it in the mapping array of what we have accepted? */
1966 if (chk_type == SCTP_DATA) {
1967 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1968 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1969 /* Nope not in the valid range dump it */
1970 dump_packet:
1971 sctp_set_rwnd(stcb, asoc);
1972 if ((asoc->cnt_on_all_streams +
1973 asoc->cnt_on_reasm_queue +
1974 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1975 SCTP_STAT_INCR(sctps_datadropchklmt);
1976 } else {
1977 SCTP_STAT_INCR(sctps_datadroprwnd);
1978 }
1979 *break_flag = 1;
1980 return (0);
1981 }
1982 } else {
1983 if (control == NULL) {
1984 goto dump_packet;
1985 }
1986 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1987 goto dump_packet;
1988 }
1989 }
1990 }
1991 #ifdef SCTP_ASOCLOG_OF_TSNS
1992 SCTP_TCB_LOCK_ASSERT(stcb);
1993 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1994 asoc->tsn_in_at = 0;
1995 asoc->tsn_in_wrapped = 1;
1996 }
1997 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1998 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1999 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2000 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2001 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2002 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2003 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2004 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2005 asoc->tsn_in_at++;
2006 #endif
2007 /*
2008 * Before we continue lets validate that we are not being fooled by
2009 * an evil attacker. We can only have Nk chunks based on our TSN
2010 * spread allowed by the mapping array N * 8 bits, so there is no
2011 * way our stream sequence numbers could have wrapped. We of course
2012 * only validate the FIRST fragment so the bit must be set.
2013 */
2014 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2015 (TAILQ_EMPTY(&asoc->resetHead)) &&
2016 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2017 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2018 /* The incoming sseq is behind where we last delivered? */
2019 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2020 mid, asoc->strmin[sid].last_mid_delivered);
2021
2022 if (asoc->idata_supported) {
2023 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2024 asoc->strmin[sid].last_mid_delivered,
2025 tsn,
2026 sid,
2027 mid);
2028 } else {
2029 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2030 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2031 tsn,
2032 sid,
2033 (uint16_t)mid);
2034 }
2035 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2036 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2037 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2038 *abort_flag = 1;
2039 return (0);
2040 }
2041 if (chk_type == SCTP_IDATA) {
2042 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2043 } else {
2044 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2045 }
2046 if (last_chunk == 0) {
2047 if (chk_type == SCTP_IDATA) {
2048 dmbuf = SCTP_M_COPYM(*m,
2049 (offset + sizeof(struct sctp_idata_chunk)),
2050 the_len, M_NOWAIT);
2051 } else {
2052 dmbuf = SCTP_M_COPYM(*m,
2053 (offset + sizeof(struct sctp_data_chunk)),
2054 the_len, M_NOWAIT);
2055 }
2056 #ifdef SCTP_MBUF_LOGGING
2057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2058 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2059 }
2060 #endif
2061 } else {
2062 /* We can steal the last chunk */
2063 int l_len;
2064 dmbuf = *m;
2065 /* lop off the top part */
2066 if (chk_type == SCTP_IDATA) {
2067 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2068 } else {
2069 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2070 }
2071 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2072 l_len = SCTP_BUF_LEN(dmbuf);
2073 } else {
2074 /* need to count up the size hopefully
2075 * does not hit this to often :-0
2076 */
2077 struct mbuf *lat;
2078
2079 l_len = 0;
2080 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2081 l_len += SCTP_BUF_LEN(lat);
2082 }
2083 }
2084 if (l_len > the_len) {
2085 /* Trim the end round bytes off too */
2086 m_adj(dmbuf, -(l_len - the_len));
2087 }
2088 }
2089 if (dmbuf == NULL) {
2090 SCTP_STAT_INCR(sctps_nomem);
2091 return (0);
2092 }
2093 /*
2094 * Now no matter what, we need a control, get one
2095 * if we don't have one (we may have gotten it
2096 * above when we found the message was fragmented
2097 */
2098 if (control == NULL) {
2099 sctp_alloc_a_readq(stcb, control);
2100 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2101 ppid,
2102 sid,
2103 chk_flags,
2104 NULL, fsn, mid);
2105 if (control == NULL) {
2106 SCTP_STAT_INCR(sctps_nomem);
2107 return (0);
2108 }
2109 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2110 struct mbuf *mm;
2111
2112 control->data = dmbuf;
2113 control->tail_mbuf = NULL;
2114 for (mm = control->data; mm; mm = mm->m_next) {
2115 control->length += SCTP_BUF_LEN(mm);
2116 if (SCTP_BUF_NEXT(mm) == NULL) {
2117 control->tail_mbuf = mm;
2118 }
2119 }
2120 control->end_added = 1;
2121 control->last_frag_seen = 1;
2122 control->first_frag_seen = 1;
2123 control->fsn_included = fsn;
2124 control->top_fsn = fsn;
2125 }
2126 created_control = 1;
2127 }
2128 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2129 chk_flags, ordered, mid, control);
2130 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2131 TAILQ_EMPTY(&asoc->resetHead) &&
2132 ((ordered == 0) ||
2133 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2134 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2135 /* Candidate for express delivery */
2136 /*
2137 * Its not fragmented, No PD-API is up, Nothing in the
2138 * delivery queue, Its un-ordered OR ordered and the next to
2139 * deliver AND nothing else is stuck on the stream queue,
2140 * And there is room for it in the socket buffer. Lets just
2141 * stuff it up the buffer....
2142 */
2143 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2144 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2145 asoc->highest_tsn_inside_nr_map = tsn;
2146 }
2147 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2148 control, mid);
2149
2150 sctp_add_to_readq(stcb->sctp_ep, stcb,
2151 control, &stcb->sctp_socket->so_rcv,
2152 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2153
2154 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2155 /* for ordered, bump what we delivered */
2156 asoc->strmin[sid].last_mid_delivered++;
2157 }
2158 SCTP_STAT_INCR(sctps_recvexpress);
2159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2160 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2161 SCTP_STR_LOG_FROM_EXPRS_DEL);
2162 }
2163 control = NULL;
2164 goto finish_express_del;
2165 }
2166
2167 /* Now will we need a chunk too? */
2168 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2169 sctp_alloc_a_chunk(stcb, chk);
2170 if (chk == NULL) {
2171 /* No memory so we drop the chunk */
2172 SCTP_STAT_INCR(sctps_nomem);
2173 if (last_chunk == 0) {
2174 /* we copied it, free the copy */
2175 sctp_m_freem(dmbuf);
2176 }
2177 return (0);
2178 }
2179 chk->rec.data.tsn = tsn;
2180 chk->no_fr_allowed = 0;
2181 chk->rec.data.fsn = fsn;
2182 chk->rec.data.mid = mid;
2183 chk->rec.data.sid = sid;
2184 chk->rec.data.ppid = ppid;
2185 chk->rec.data.context = stcb->asoc.context;
2186 chk->rec.data.doing_fast_retransmit = 0;
2187 chk->rec.data.rcv_flags = chk_flags;
2188 chk->asoc = asoc;
2189 chk->send_size = the_len;
2190 chk->whoTo = net;
2191 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2192 chk,
2193 control, mid);
2194 atomic_add_int(&net->ref_count, 1);
2195 chk->data = dmbuf;
2196 }
2197 /* Set the appropriate TSN mark */
2198 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2199 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2200 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2201 asoc->highest_tsn_inside_nr_map = tsn;
2202 }
2203 } else {
2204 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2205 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2206 asoc->highest_tsn_inside_map = tsn;
2207 }
2208 }
2209 /* Now is it complete (i.e. not fragmented)? */
2210 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2211 /*
2212 * Special check for when streams are resetting. We
2213 * could be more smart about this and check the
2214 * actual stream to see if it is not being reset..
2215 * that way we would not create a HOLB when amongst
2216 * streams being reset and those not being reset.
2217 *
2218 */
2219 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2220 SCTP_TSN_GT(tsn, liste->tsn)) {
2221 /*
2222 * yep its past where we need to reset... go
2223 * ahead and queue it.
2224 */
2225 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2226 /* first one on */
2227 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2228 } else {
2229 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2230 unsigned char inserted = 0;
2231 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2232 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2233
2234 continue;
2235 } else {
2236 /* found it */
2237 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2238 inserted = 1;
2239 break;
2240 }
2241 }
2242 if (inserted == 0) {
2243 /*
2244 * must be put at end, use
2245 * prevP (all setup from
2246 * loop) to setup nextP.
2247 */
2248 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2249 }
2250 }
2251 goto finish_express_del;
2252 }
2253 if (chk_flags & SCTP_DATA_UNORDERED) {
2254 /* queue directly into socket buffer */
2255 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2256 control, mid);
2257 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2258 sctp_add_to_readq(stcb->sctp_ep, stcb,
2259 control,
2260 &stcb->sctp_socket->so_rcv, 1,
2261 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2262
2263 } else {
2264 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2265 mid);
2266 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2267 if (*abort_flag) {
2268 if (last_chunk) {
2269 *m = NULL;
2270 }
2271 return (0);
2272 }
2273 }
2274 goto finish_express_del;
2275 }
2276 /* If we reach here its a reassembly */
2277 need_reasm_check = 1;
2278 SCTPDBG(SCTP_DEBUG_XXX,
2279 "Queue data to stream for reasm control: %p MID: %u\n",
2280 control, mid);
2281 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2282 if (*abort_flag) {
2283 /*
2284 * the assoc is now gone and chk was put onto the
2285 * reasm queue, which has all been freed.
2286 */
2287 if (last_chunk) {
2288 *m = NULL;
2289 }
2290 return (0);
2291 }
2292 finish_express_del:
2293 /* Here we tidy up things */
2294 if (tsn == (asoc->cumulative_tsn + 1)) {
2295 /* Update cum-ack */
2296 asoc->cumulative_tsn = tsn;
2297 }
2298 if (last_chunk) {
2299 *m = NULL;
2300 }
2301 if (ordered) {
2302 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2303 } else {
2304 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2305 }
2306 SCTP_STAT_INCR(sctps_recvdata);
2307 /* Set it present please */
2308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2309 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2310 }
2311 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2312 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2313 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2314 }
2315 if (need_reasm_check) {
2316 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2317 need_reasm_check = 0;
2318 }
2319 /* check the special flag for stream resets */
2320 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2321 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2322 /*
2323 * we have finished working through the backlogged TSN's now
2324 * time to reset streams. 1: call reset function. 2: free
2325 * pending_reply space 3: distribute any chunks in
2326 * pending_reply_queue.
2327 */
2328 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2329 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2330 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2331 SCTP_FREE(liste, SCTP_M_STRESET);
2332 /*sa_ignore FREED_MEMORY*/
2333 liste = TAILQ_FIRST(&asoc->resetHead);
2334 if (TAILQ_EMPTY(&asoc->resetHead)) {
2335 /* All can be removed */
2336 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2337 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2338 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2339 if (*abort_flag) {
2340 return (0);
2341 }
2342 if (need_reasm_check) {
2343 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2344 need_reasm_check = 0;
2345 }
2346 }
2347 } else {
2348 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2349 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2350 break;
2351 }
2352 /*
2353 * if control->sinfo_tsn is <= liste->tsn we can
2354 * process it which is the NOT of
2355 * control->sinfo_tsn > liste->tsn
2356 */
2357 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2358 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2359 if (*abort_flag) {
2360 return (0);
2361 }
2362 if (need_reasm_check) {
2363 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2364 need_reasm_check = 0;
2365 }
2366 }
2367 }
2368 }
2369 return (1);
2370 }
2371
2372 static const int8_t sctp_map_lookup_tab[256] = {
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 5,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 6,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 5,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 4,
2387 0, 1, 0, 2, 0, 1, 0, 3,
2388 0, 1, 0, 2, 0, 1, 0, 7,
2389 0, 1, 0, 2, 0, 1, 0, 3,
2390 0, 1, 0, 2, 0, 1, 0, 4,
2391 0, 1, 0, 2, 0, 1, 0, 3,
2392 0, 1, 0, 2, 0, 1, 0, 5,
2393 0, 1, 0, 2, 0, 1, 0, 3,
2394 0, 1, 0, 2, 0, 1, 0, 4,
2395 0, 1, 0, 2, 0, 1, 0, 3,
2396 0, 1, 0, 2, 0, 1, 0, 6,
2397 0, 1, 0, 2, 0, 1, 0, 3,
2398 0, 1, 0, 2, 0, 1, 0, 4,
2399 0, 1, 0, 2, 0, 1, 0, 3,
2400 0, 1, 0, 2, 0, 1, 0, 5,
2401 0, 1, 0, 2, 0, 1, 0, 3,
2402 0, 1, 0, 2, 0, 1, 0, 4,
2403 0, 1, 0, 2, 0, 1, 0, 3,
2404 0, 1, 0, 2, 0, 1, 0, 8
2405 };
2406
2407
2408 void
sctp_slide_mapping_arrays(struct sctp_tcb * stcb)2409 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2410 {
2411 /*
2412 * Now we also need to check the mapping array in a couple of ways.
2413 * 1) Did we move the cum-ack point?
2414 *
2415 * When you first glance at this you might think
2416 * that all entries that make up the position
2417 * of the cum-ack would be in the nr-mapping array
2418 * only.. i.e. things up to the cum-ack are always
2419 * deliverable. Thats true with one exception, when
2420 * its a fragmented message we may not deliver the data
2421 * until some threshold (or all of it) is in place. So
2422 * we must OR the nr_mapping_array and mapping_array to
2423 * get a true picture of the cum-ack.
2424 */
2425 struct sctp_association *asoc;
2426 int at;
2427 uint8_t val;
2428 int slide_from, slide_end, lgap, distance;
2429 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2430
2431 asoc = &stcb->asoc;
2432
2433 old_cumack = asoc->cumulative_tsn;
2434 old_base = asoc->mapping_array_base_tsn;
2435 old_highest = asoc->highest_tsn_inside_map;
2436 /*
2437 * We could probably improve this a small bit by calculating the
2438 * offset of the current cum-ack as the starting point.
2439 */
2440 at = 0;
2441 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2442 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2443 if (val == 0xff) {
2444 at += 8;
2445 } else {
2446 /* there is a 0 bit */
2447 at += sctp_map_lookup_tab[val];
2448 break;
2449 }
2450 }
2451 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2452
2453 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2454 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2455 #ifdef INVARIANTS
2456 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2457 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2458 #else
2459 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2460 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2461 sctp_print_mapping_array(asoc);
2462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2463 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2464 }
2465 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2466 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2467 #endif
2468 }
2469 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2470 highest_tsn = asoc->highest_tsn_inside_nr_map;
2471 } else {
2472 highest_tsn = asoc->highest_tsn_inside_map;
2473 }
2474 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2475 /* The complete array was completed by a single FR */
2476 /* highest becomes the cum-ack */
2477 int clr;
2478 #ifdef INVARIANTS
2479 unsigned int i;
2480 #endif
2481
2482 /* clear the array */
2483 clr = ((at+7) >> 3);
2484 if (clr > asoc->mapping_array_size) {
2485 clr = asoc->mapping_array_size;
2486 }
2487 memset(asoc->mapping_array, 0, clr);
2488 memset(asoc->nr_mapping_array, 0, clr);
2489 #ifdef INVARIANTS
2490 for (i = 0; i < asoc->mapping_array_size; i++) {
2491 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2492 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2493 sctp_print_mapping_array(asoc);
2494 }
2495 }
2496 #endif
2497 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2498 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2499 } else if (at >= 8) {
2500 /* we can slide the mapping array down */
2501 /* slide_from holds where we hit the first NON 0xff byte */
2502
2503 /*
2504 * now calculate the ceiling of the move using our highest
2505 * TSN value
2506 */
2507 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2508 slide_end = (lgap >> 3);
2509 if (slide_end < slide_from) {
2510 sctp_print_mapping_array(asoc);
2511 #ifdef INVARIANTS
2512 panic("impossible slide");
2513 #else
2514 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2515 lgap, slide_end, slide_from, at);
2516 return;
2517 #endif
2518 }
2519 if (slide_end > asoc->mapping_array_size) {
2520 #ifdef INVARIANTS
2521 panic("would overrun buffer");
2522 #else
2523 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2524 asoc->mapping_array_size, slide_end);
2525 slide_end = asoc->mapping_array_size;
2526 #endif
2527 }
2528 distance = (slide_end - slide_from) + 1;
2529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2530 sctp_log_map(old_base, old_cumack, old_highest,
2531 SCTP_MAP_PREPARE_SLIDE);
2532 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2533 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2534 }
2535 if (distance + slide_from > asoc->mapping_array_size ||
2536 distance < 0) {
2537 /*
2538 * Here we do NOT slide forward the array so that
2539 * hopefully when more data comes in to fill it up
2540 * we will be able to slide it forward. Really I
2541 * don't think this should happen :-0
2542 */
2543
2544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2545 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2546 (uint32_t) asoc->mapping_array_size,
2547 SCTP_MAP_SLIDE_NONE);
2548 }
2549 } else {
2550 int ii;
2551
2552 for (ii = 0; ii < distance; ii++) {
2553 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2554 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2555
2556 }
2557 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2558 asoc->mapping_array[ii] = 0;
2559 asoc->nr_mapping_array[ii] = 0;
2560 }
2561 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2562 asoc->highest_tsn_inside_map += (slide_from << 3);
2563 }
2564 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2565 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2566 }
2567 asoc->mapping_array_base_tsn += (slide_from << 3);
2568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2569 sctp_log_map(asoc->mapping_array_base_tsn,
2570 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2571 SCTP_MAP_SLIDE_RESULT);
2572 }
2573 }
2574 }
2575 }
2576
2577 void
sctp_sack_check(struct sctp_tcb * stcb,int was_a_gap)2578 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2579 {
2580 struct sctp_association *asoc;
2581 uint32_t highest_tsn;
2582 int is_a_gap;
2583
2584 sctp_slide_mapping_arrays(stcb);
2585 asoc = &stcb->asoc;
2586 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2587 highest_tsn = asoc->highest_tsn_inside_nr_map;
2588 } else {
2589 highest_tsn = asoc->highest_tsn_inside_map;
2590 }
2591 /* Is there a gap now? */
2592 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2593
2594 /*
2595 * Now we need to see if we need to queue a sack or just start the
2596 * timer (if allowed).
2597 */
2598 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2599 /*
2600 * Ok special case, in SHUTDOWN-SENT case. here we
2601 * maker sure SACK timer is off and instead send a
2602 * SHUTDOWN and a SACK
2603 */
2604 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2605 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2606 stcb->sctp_ep, stcb, NULL,
2607 SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2608 }
2609 sctp_send_shutdown(stcb,
2610 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2611 if (is_a_gap) {
2612 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2613 }
2614 } else {
2615 /*
2616 * CMT DAC algorithm: increase number of packets
2617 * received since last ack
2618 */
2619 stcb->asoc.cmt_dac_pkts_rcvd++;
2620
2621 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
2622 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2623 * longer is one */
2624 (stcb->asoc.numduptsns) || /* we have dup's */
2625 (is_a_gap) || /* is still a gap */
2626 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2627 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2628 ) {
2629
2630 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2631 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2632 (stcb->asoc.send_sack == 0) &&
2633 (stcb->asoc.numduptsns == 0) &&
2634 (stcb->asoc.delayed_ack) &&
2635 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2636
2637 /*
2638 * CMT DAC algorithm: With CMT,
2639 * delay acks even in the face of
2640
2641 * reordering. Therefore, if acks
2642 * that do not have to be sent
2643 * because of the above reasons,
2644 * will be delayed. That is, acks
2645 * that would have been sent due to
2646 * gap reports will be delayed with
2647 * DAC. Start the delayed ack timer.
2648 */
2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 stcb->sctp_ep, stcb, NULL);
2651 } else {
2652 /*
2653 * Ok we must build a SACK since the
2654 * timer is pending, we got our
2655 * first packet OR there are gaps or
2656 * duplicates.
2657 */
2658 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2659 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2660 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2661 }
2662 } else {
2663 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2664 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2665 stcb->sctp_ep, stcb, NULL);
2666 }
2667 }
2668 }
2669 }
2670
2671 int
sctp_process_data(struct mbuf ** mm,int iphlen,int * offset,int length,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t * high_tsn)2672 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2673 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2674 struct sctp_nets *net, uint32_t *high_tsn)
2675 {
2676 struct sctp_chunkhdr *ch, chunk_buf;
2677 struct sctp_association *asoc;
2678 int num_chunks = 0; /* number of control chunks processed */
2679 int stop_proc = 0;
2680 int break_flag, last_chunk;
2681 int abort_flag = 0, was_a_gap;
2682 struct mbuf *m;
2683 uint32_t highest_tsn;
2684 uint16_t chk_length;
2685
2686 /* set the rwnd */
2687 sctp_set_rwnd(stcb, &stcb->asoc);
2688
2689 m = *mm;
2690 SCTP_TCB_LOCK_ASSERT(stcb);
2691 asoc = &stcb->asoc;
2692 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2693 highest_tsn = asoc->highest_tsn_inside_nr_map;
2694 } else {
2695 highest_tsn = asoc->highest_tsn_inside_map;
2696 }
2697 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2698 /*
2699 * setup where we got the last DATA packet from for any SACK that
2700 * may need to go out. Don't bump the net. This is done ONLY when a
2701 * chunk is assigned.
2702 */
2703 asoc->last_data_chunk_from = net;
2704
2705 #ifndef __Panda__
2706 /*-
2707 * Now before we proceed we must figure out if this is a wasted
2708 * cluster... i.e. it is a small packet sent in and yet the driver
2709 * underneath allocated a full cluster for it. If so we must copy it
2710 * to a smaller mbuf and free up the cluster mbuf. This will help
2711 * with cluster starvation. Note for __Panda__ we don't do this
2712 * since it has clusters all the way down to 64 bytes.
2713 */
2714 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2715 /* we only handle mbufs that are singletons.. not chains */
2716 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2717 if (m) {
2718 /* ok lets see if we can copy the data up */
2719 caddr_t *from, *to;
2720 /* get the pointers and copy */
2721 to = mtod(m, caddr_t *);
2722 from = mtod((*mm), caddr_t *);
2723 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2724 /* copy the length and free up the old */
2725 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2726 sctp_m_freem(*mm);
2727 /* success, back copy */
2728 *mm = m;
2729 } else {
2730 /* We are in trouble in the mbuf world .. yikes */
2731 m = *mm;
2732 }
2733 }
2734 #endif
2735 /* get pointer to the first chunk header */
2736 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2737 sizeof(struct sctp_chunkhdr),
2738 (uint8_t *)&chunk_buf);
2739 if (ch == NULL) {
2740 return (1);
2741 }
2742 /*
2743 * process all DATA chunks...
2744 */
2745 *high_tsn = asoc->cumulative_tsn;
2746 break_flag = 0;
2747 asoc->data_pkts_seen++;
2748 while (stop_proc == 0) {
2749 /* validate chunk length */
2750 chk_length = ntohs(ch->chunk_length);
2751 if (length - *offset < chk_length) {
2752 /* all done, mutulated chunk */
2753 stop_proc = 1;
2754 continue;
2755 }
2756 if ((asoc->idata_supported == 1) &&
2757 (ch->chunk_type == SCTP_DATA)) {
2758 struct mbuf *op_err;
2759 char msg[SCTP_DIAG_INFO_LEN];
2760
2761 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2762 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2763 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2764 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2765 return (2);
2766 }
2767 if ((asoc->idata_supported == 0) &&
2768 (ch->chunk_type == SCTP_IDATA)) {
2769 struct mbuf *op_err;
2770 char msg[SCTP_DIAG_INFO_LEN];
2771
2772 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2773 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2774 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2775 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2776 return (2);
2777 }
2778 if ((ch->chunk_type == SCTP_DATA) ||
2779 (ch->chunk_type == SCTP_IDATA)) {
2780 uint16_t clen;
2781
2782 if (ch->chunk_type == SCTP_DATA) {
2783 clen = sizeof(struct sctp_data_chunk);
2784 } else {
2785 clen = sizeof(struct sctp_idata_chunk);
2786 }
2787 if (chk_length < clen) {
2788 /*
2789 * Need to send an abort since we had a
2790 * invalid data chunk.
2791 */
2792 struct mbuf *op_err;
2793 char msg[SCTP_DIAG_INFO_LEN];
2794
2795 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2796 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2797 chk_length);
2798 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2799 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2800 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2801 return (2);
2802 }
2803 #ifdef SCTP_AUDITING_ENABLED
2804 sctp_audit_log(0xB1, 0);
2805 #endif
2806 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2807 last_chunk = 1;
2808 } else {
2809 last_chunk = 0;
2810 }
2811 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2812 chk_length, net, high_tsn, &abort_flag, &break_flag,
2813 last_chunk, ch->chunk_type)) {
2814 num_chunks++;
2815 }
2816 if (abort_flag)
2817 return (2);
2818
2819 if (break_flag) {
2820 /*
2821 * Set because of out of rwnd space and no
2822 * drop rep space left.
2823 */
2824 stop_proc = 1;
2825 continue;
2826 }
2827 } else {
2828 /* not a data chunk in the data region */
2829 switch (ch->chunk_type) {
2830 case SCTP_INITIATION:
2831 case SCTP_INITIATION_ACK:
2832 case SCTP_SELECTIVE_ACK:
2833 case SCTP_NR_SELECTIVE_ACK:
2834 case SCTP_HEARTBEAT_REQUEST:
2835 case SCTP_HEARTBEAT_ACK:
2836 case SCTP_ABORT_ASSOCIATION:
2837 case SCTP_SHUTDOWN:
2838 case SCTP_SHUTDOWN_ACK:
2839 case SCTP_OPERATION_ERROR:
2840 case SCTP_COOKIE_ECHO:
2841 case SCTP_COOKIE_ACK:
2842 case SCTP_ECN_ECHO:
2843 case SCTP_ECN_CWR:
2844 case SCTP_SHUTDOWN_COMPLETE:
2845 case SCTP_AUTHENTICATION:
2846 case SCTP_ASCONF_ACK:
2847 case SCTP_PACKET_DROPPED:
2848 case SCTP_STREAM_RESET:
2849 case SCTP_FORWARD_CUM_TSN:
2850 case SCTP_ASCONF:
2851 {
2852 /*
2853 * Now, what do we do with KNOWN chunks that
2854 * are NOT in the right place?
2855 *
2856 * For now, I do nothing but ignore them. We
2857 * may later want to add sysctl stuff to
2858 * switch out and do either an ABORT() or
2859 * possibly process them.
2860 */
2861 struct mbuf *op_err;
2862 char msg[SCTP_DIAG_INFO_LEN];
2863
2864 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2865 ch->chunk_type);
2866 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2867 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2868 return (2);
2869 }
2870 default:
2871 /*
2872 * Unknown chunk type: use bit rules after
2873 * checking length
2874 */
2875 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2876 /*
2877 * Need to send an abort since we had a
2878 * invalid chunk.
2879 */
2880 struct mbuf *op_err;
2881 char msg[SCTP_DIAG_INFO_LEN];
2882
2883 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2884 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2885 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2886 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2887 return (2);
2888 }
2889 if (ch->chunk_type & 0x40) {
2890 /* Add a error report to the queue */
2891 struct mbuf *op_err;
2892 struct sctp_gen_error_cause *cause;
2893
2894 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2895 0, M_NOWAIT, 1, MT_DATA);
2896 if (op_err != NULL) {
2897 cause = mtod(op_err, struct sctp_gen_error_cause *);
2898 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2899 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2900 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2901 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2902 if (SCTP_BUF_NEXT(op_err) != NULL) {
2903 sctp_queue_op_err(stcb, op_err);
2904 } else {
2905 sctp_m_freem(op_err);
2906 }
2907 }
2908 }
2909 if ((ch->chunk_type & 0x80) == 0) {
2910 /* discard the rest of this packet */
2911 stop_proc = 1;
2912 } /* else skip this bad chunk and
2913 * continue... */
2914 break;
2915 } /* switch of chunk type */
2916 }
2917 *offset += SCTP_SIZE32(chk_length);
2918 if ((*offset >= length) || stop_proc) {
2919 /* no more data left in the mbuf chain */
2920 stop_proc = 1;
2921 continue;
2922 }
2923 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2924 sizeof(struct sctp_chunkhdr),
2925 (uint8_t *)&chunk_buf);
2926 if (ch == NULL) {
2927 *offset = length;
2928 stop_proc = 1;
2929 continue;
2930 }
2931 }
2932 if (break_flag) {
2933 /*
2934 * we need to report rwnd overrun drops.
2935 */
2936 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2937 }
2938 if (num_chunks) {
2939 /*
2940 * Did we get data, if so update the time for auto-close and
2941 * give peer credit for being alive.
2942 */
2943 SCTP_STAT_INCR(sctps_recvpktwithdata);
2944 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2945 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2946 stcb->asoc.overall_error_count,
2947 0,
2948 SCTP_FROM_SCTP_INDATA,
2949 __LINE__);
2950 }
2951 stcb->asoc.overall_error_count = 0;
2952 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2953 }
2954 /* now service all of the reassm queue if needed */
2955 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2956 /* Assure that we ack right away */
2957 stcb->asoc.send_sack = 1;
2958 }
2959 /* Start a sack timer or QUEUE a SACK for sending */
2960 sctp_sack_check(stcb, was_a_gap);
2961 return (0);
2962 }
2963
2964 static int
sctp_process_segment_range(struct sctp_tcb * stcb,struct sctp_tmit_chunk ** p_tp1,uint32_t last_tsn,uint16_t frag_strt,uint16_t frag_end,int nr_sacking,int * num_frs,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int * rto_ok)2965 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2966 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2967 int *num_frs,
2968 uint32_t *biggest_newly_acked_tsn,
2969 uint32_t *this_sack_lowest_newack,
2970 int *rto_ok)
2971 {
2972 struct sctp_tmit_chunk *tp1;
2973 unsigned int theTSN;
2974 int j, wake_him = 0, circled = 0;
2975
2976 /* Recover the tp1 we last saw */
2977 tp1 = *p_tp1;
2978 if (tp1 == NULL) {
2979 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2980 }
2981 for (j = frag_strt; j <= frag_end; j++) {
2982 theTSN = j + last_tsn;
2983 while (tp1) {
2984 if (tp1->rec.data.doing_fast_retransmit)
2985 (*num_frs) += 1;
2986
2987 /*-
2988 * CMT: CUCv2 algorithm. For each TSN being
2989 * processed from the sent queue, track the
2990 * next expected pseudo-cumack, or
2991 * rtx_pseudo_cumack, if required. Separate
2992 * cumack trackers for first transmissions,
2993 * and retransmissions.
2994 */
2995 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2996 (tp1->whoTo->find_pseudo_cumack == 1) &&
2997 (tp1->snd_count == 1)) {
2998 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2999 tp1->whoTo->find_pseudo_cumack = 0;
3000 }
3001 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3002 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3003 (tp1->snd_count > 1)) {
3004 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3005 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3006 }
3007 if (tp1->rec.data.tsn == theTSN) {
3008 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3009 /*-
3010 * must be held until
3011 * cum-ack passes
3012 */
3013 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3014 /*-
3015 * If it is less than RESEND, it is
3016 * now no-longer in flight.
3017 * Higher values may already be set
3018 * via previous Gap Ack Blocks...
3019 * i.e. ACKED or RESEND.
3020 */
3021 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3022 *biggest_newly_acked_tsn)) {
3023 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3024 }
3025 /*-
3026 * CMT: SFR algo (and HTNA) - set
3027 * saw_newack to 1 for dest being
3028 * newly acked. update
3029 * this_sack_highest_newack if
3030 * appropriate.
3031 */
3032 if (tp1->rec.data.chunk_was_revoked == 0)
3033 tp1->whoTo->saw_newack = 1;
3034
3035 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3036 tp1->whoTo->this_sack_highest_newack)) {
3037 tp1->whoTo->this_sack_highest_newack =
3038 tp1->rec.data.tsn;
3039 }
3040 /*-
3041 * CMT DAC algo: also update
3042 * this_sack_lowest_newack
3043 */
3044 if (*this_sack_lowest_newack == 0) {
3045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3046 sctp_log_sack(*this_sack_lowest_newack,
3047 last_tsn,
3048 tp1->rec.data.tsn,
3049 0,
3050 0,
3051 SCTP_LOG_TSN_ACKED);
3052 }
3053 *this_sack_lowest_newack = tp1->rec.data.tsn;
3054 }
3055 /*-
3056 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3057 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3058 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3059 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3060 * Separate pseudo_cumack trackers for first transmissions and
3061 * retransmissions.
3062 */
3063 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3064 if (tp1->rec.data.chunk_was_revoked == 0) {
3065 tp1->whoTo->new_pseudo_cumack = 1;
3066 }
3067 tp1->whoTo->find_pseudo_cumack = 1;
3068 }
3069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3070 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3071 }
3072 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3073 if (tp1->rec.data.chunk_was_revoked == 0) {
3074 tp1->whoTo->new_pseudo_cumack = 1;
3075 }
3076 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3077 }
3078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3079 sctp_log_sack(*biggest_newly_acked_tsn,
3080 last_tsn,
3081 tp1->rec.data.tsn,
3082 frag_strt,
3083 frag_end,
3084 SCTP_LOG_TSN_ACKED);
3085 }
3086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3087 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3088 tp1->whoTo->flight_size,
3089 tp1->book_size,
3090 (uint32_t)(uintptr_t)tp1->whoTo,
3091 tp1->rec.data.tsn);
3092 }
3093 sctp_flight_size_decrease(tp1);
3094 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3095 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3096 tp1);
3097 }
3098 sctp_total_flight_decrease(stcb, tp1);
3099
3100 tp1->whoTo->net_ack += tp1->send_size;
3101 if (tp1->snd_count < 2) {
3102 /*-
3103 * True non-retransmitted chunk
3104 */
3105 tp1->whoTo->net_ack2 += tp1->send_size;
3106
3107 /*-
3108 * update RTO too ?
3109 */
3110 if (tp1->do_rtt) {
3111 if (*rto_ok &&
3112 sctp_calculate_rto(stcb,
3113 &stcb->asoc,
3114 tp1->whoTo,
3115 &tp1->sent_rcv_time,
3116 SCTP_RTT_FROM_DATA)) {
3117 *rto_ok = 0;
3118 }
3119 if (tp1->whoTo->rto_needed == 0) {
3120 tp1->whoTo->rto_needed = 1;
3121 }
3122 tp1->do_rtt = 0;
3123 }
3124 }
3125
3126 }
3127 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3128 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3129 stcb->asoc.this_sack_highest_gap)) {
3130 stcb->asoc.this_sack_highest_gap =
3131 tp1->rec.data.tsn;
3132 }
3133 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3134 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3135 #ifdef SCTP_AUDITING_ENABLED
3136 sctp_audit_log(0xB2,
3137 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3138 #endif
3139 }
3140 }
3141 /*-
3142 * All chunks NOT UNSENT fall through here and are marked
3143 * (leave PR-SCTP ones that are to skip alone though)
3144 */
3145 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3146 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3147 tp1->sent = SCTP_DATAGRAM_MARKED;
3148 }
3149 if (tp1->rec.data.chunk_was_revoked) {
3150 /* deflate the cwnd */
3151 tp1->whoTo->cwnd -= tp1->book_size;
3152 tp1->rec.data.chunk_was_revoked = 0;
3153 }
3154 /* NR Sack code here */
3155 if (nr_sacking &&
3156 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3157 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3158 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3159 #ifdef INVARIANTS
3160 } else {
3161 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3162 #endif
3163 }
3164 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3165 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3166 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3167 stcb->asoc.trigger_reset = 1;
3168 }
3169 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3170 if (tp1->data) {
3171 /* sa_ignore NO_NULL_CHK */
3172 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3173 sctp_m_freem(tp1->data);
3174 tp1->data = NULL;
3175 }
3176 wake_him++;
3177 }
3178 }
3179 break;
3180 } /* if (tp1->tsn == theTSN) */
3181 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3182 break;
3183 }
3184 tp1 = TAILQ_NEXT(tp1, sctp_next);
3185 if ((tp1 == NULL) && (circled == 0)) {
3186 circled++;
3187 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3188 }
3189 } /* end while (tp1) */
3190 if (tp1 == NULL) {
3191 circled = 0;
3192 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3193 }
3194 /* In case the fragments were not in order we must reset */
3195 } /* end for (j = fragStart */
3196 *p_tp1 = tp1;
3197 return (wake_him); /* Return value only used for nr-sack */
3198 }
3199
3200
3201 static int
sctp_handle_segments(struct mbuf * m,int * offset,struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t last_tsn,uint32_t * biggest_tsn_acked,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int num_seg,int num_nr_seg,int * rto_ok)3202 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3203 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3204 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3205 int num_seg, int num_nr_seg, int *rto_ok)
3206 {
3207 struct sctp_gap_ack_block *frag, block;
3208 struct sctp_tmit_chunk *tp1;
3209 int i;
3210 int num_frs = 0;
3211 int chunk_freed;
3212 int non_revocable;
3213 uint16_t frag_strt, frag_end, prev_frag_end;
3214
3215 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3216 prev_frag_end = 0;
3217 chunk_freed = 0;
3218
3219 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3220 if (i == num_seg) {
3221 prev_frag_end = 0;
3222 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3223 }
3224 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3225 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3226 *offset += sizeof(block);
3227 if (frag == NULL) {
3228 return (chunk_freed);
3229 }
3230 frag_strt = ntohs(frag->start);
3231 frag_end = ntohs(frag->end);
3232
3233 if (frag_strt > frag_end) {
3234 /* This gap report is malformed, skip it. */
3235 continue;
3236 }
3237 if (frag_strt <= prev_frag_end) {
3238 /* This gap report is not in order, so restart. */
3239 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3240 }
3241 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3242 *biggest_tsn_acked = last_tsn + frag_end;
3243 }
3244 if (i < num_seg) {
3245 non_revocable = 0;
3246 } else {
3247 non_revocable = 1;
3248 }
3249 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3250 non_revocable, &num_frs, biggest_newly_acked_tsn,
3251 this_sack_lowest_newack, rto_ok)) {
3252 chunk_freed = 1;
3253 }
3254 prev_frag_end = frag_end;
3255 }
3256 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3257 if (num_frs)
3258 sctp_log_fr(*biggest_tsn_acked,
3259 *biggest_newly_acked_tsn,
3260 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3261 }
3262 return (chunk_freed);
3263 }
3264
3265 static void
sctp_check_for_revoked(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t cumack,uint32_t biggest_tsn_acked)3266 sctp_check_for_revoked(struct sctp_tcb *stcb,
3267 struct sctp_association *asoc, uint32_t cumack,
3268 uint32_t biggest_tsn_acked)
3269 {
3270 struct sctp_tmit_chunk *tp1;
3271
3272 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3273 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3274 /*
3275 * ok this guy is either ACK or MARKED. If it is
3276 * ACKED it has been previously acked but not this
3277 * time i.e. revoked. If it is MARKED it was ACK'ed
3278 * again.
3279 */
3280 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3281 break;
3282 }
3283 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3284 /* it has been revoked */
3285 tp1->sent = SCTP_DATAGRAM_SENT;
3286 tp1->rec.data.chunk_was_revoked = 1;
3287 /* We must add this stuff back in to
3288 * assure timers and such get started.
3289 */
3290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3291 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3292 tp1->whoTo->flight_size,
3293 tp1->book_size,
3294 (uint32_t)(uintptr_t)tp1->whoTo,
3295 tp1->rec.data.tsn);
3296 }
3297 sctp_flight_size_increase(tp1);
3298 sctp_total_flight_increase(stcb, tp1);
3299 /* We inflate the cwnd to compensate for our
3300 * artificial inflation of the flight_size.
3301 */
3302 tp1->whoTo->cwnd += tp1->book_size;
3303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3304 sctp_log_sack(asoc->last_acked_seq,
3305 cumack,
3306 tp1->rec.data.tsn,
3307 0,
3308 0,
3309 SCTP_LOG_TSN_REVOKED);
3310 }
3311 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3312 /* it has been re-acked in this SACK */
3313 tp1->sent = SCTP_DATAGRAM_ACKED;
3314 }
3315 }
3316 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3317 break;
3318 }
3319 }
3320
3321
3322 static void
sctp_strike_gap_ack_chunks(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t biggest_tsn_acked,uint32_t biggest_tsn_newly_acked,uint32_t this_sack_lowest_newack,int accum_moved)3323 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3324 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3325 {
3326 struct sctp_tmit_chunk *tp1;
3327 int strike_flag = 0;
3328 struct timeval now;
3329 int tot_retrans = 0;
3330 uint32_t sending_seq;
3331 struct sctp_nets *net;
3332 int num_dests_sacked = 0;
3333
3334 /*
3335 * select the sending_seq, this is either the next thing ready to be
3336 * sent but not transmitted, OR, the next seq we assign.
3337 */
3338 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3339 if (tp1 == NULL) {
3340 sending_seq = asoc->sending_seq;
3341 } else {
3342 sending_seq = tp1->rec.data.tsn;
3343 }
3344
3345 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3346 if ((asoc->sctp_cmt_on_off > 0) &&
3347 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3348 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3349 if (net->saw_newack)
3350 num_dests_sacked++;
3351 }
3352 }
3353 if (stcb->asoc.prsctp_supported) {
3354 (void)SCTP_GETTIME_TIMEVAL(&now);
3355 }
3356 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3357 strike_flag = 0;
3358 if (tp1->no_fr_allowed) {
3359 /* this one had a timeout or something */
3360 continue;
3361 }
3362 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3363 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3364 sctp_log_fr(biggest_tsn_newly_acked,
3365 tp1->rec.data.tsn,
3366 tp1->sent,
3367 SCTP_FR_LOG_CHECK_STRIKE);
3368 }
3369 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3370 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3371 /* done */
3372 break;
3373 }
3374 if (stcb->asoc.prsctp_supported) {
3375 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3376 /* Is it expired? */
3377 #ifndef __FreeBSD__
3378 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3379 #else
3380 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3381 #endif
3382 /* Yes so drop it */
3383 if (tp1->data != NULL) {
3384 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3385 SCTP_SO_NOT_LOCKED);
3386 }
3387 continue;
3388 }
3389 }
3390
3391 }
3392 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3393 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3394 /* we are beyond the tsn in the sack */
3395 break;
3396 }
3397 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3398 /* either a RESEND, ACKED, or MARKED */
3399 /* skip */
3400 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3401 /* Continue strikin FWD-TSN chunks */
3402 tp1->rec.data.fwd_tsn_cnt++;
3403 }
3404 continue;
3405 }
3406 /*
3407 * CMT : SFR algo (covers part of DAC and HTNA as well)
3408 */
3409 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3410 /*
3411 * No new acks were receieved for data sent to this
3412 * dest. Therefore, according to the SFR algo for
3413 * CMT, no data sent to this dest can be marked for
3414 * FR using this SACK.
3415 */
3416 continue;
3417 } else if (tp1->whoTo &&
3418 SCTP_TSN_GT(tp1->rec.data.tsn,
3419 tp1->whoTo->this_sack_highest_newack) &&
3420 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3421 /*
3422 * CMT: New acks were receieved for data sent to
3423 * this dest. But no new acks were seen for data
3424 * sent after tp1. Therefore, according to the SFR
3425 * algo for CMT, tp1 cannot be marked for FR using
3426 * this SACK. This step covers part of the DAC algo
3427 * and the HTNA algo as well.
3428 */
3429 continue;
3430 }
3431 /*
3432 * Here we check to see if we were have already done a FR
3433 * and if so we see if the biggest TSN we saw in the sack is
3434 * smaller than the recovery point. If so we don't strike
3435 * the tsn... otherwise we CAN strike the TSN.
3436 */
3437 /*
3438 * @@@ JRI: Check for CMT
3439 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3440 */
3441 if (accum_moved && asoc->fast_retran_loss_recovery) {
3442 /*
3443 * Strike the TSN if in fast-recovery and cum-ack
3444 * moved.
3445 */
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 sctp_log_fr(biggest_tsn_newly_acked,
3448 tp1->rec.data.tsn,
3449 tp1->sent,
3450 SCTP_FR_LOG_STRIKE_CHUNK);
3451 }
3452 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3453 tp1->sent++;
3454 }
3455 if ((asoc->sctp_cmt_on_off > 0) &&
3456 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3457 /*
3458 * CMT DAC algorithm: If SACK flag is set to
3459 * 0, then lowest_newack test will not pass
3460 * because it would have been set to the
3461 * cumack earlier. If not already to be
3462 * rtx'd, If not a mixed sack and if tp1 is
3463 * not between two sacked TSNs, then mark by
3464 * one more.
3465 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3466 * two packets have been received after this missing TSN.
3467 */
3468 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3469 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3471 sctp_log_fr(16 + num_dests_sacked,
3472 tp1->rec.data.tsn,
3473 tp1->sent,
3474 SCTP_FR_LOG_STRIKE_CHUNK);
3475 }
3476 tp1->sent++;
3477 }
3478 }
3479 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3480 (asoc->sctp_cmt_on_off == 0)) {
3481 /*
3482 * For those that have done a FR we must take
3483 * special consideration if we strike. I.e the
3484 * biggest_newly_acked must be higher than the
3485 * sending_seq at the time we did the FR.
3486 */
3487 if (
3488 #ifdef SCTP_FR_TO_ALTERNATE
3489 /*
3490 * If FR's go to new networks, then we must only do
3491 * this for singly homed asoc's. However if the FR's
3492 * go to the same network (Armando's work) then its
3493 * ok to FR multiple times.
3494 */
3495 (asoc->numnets < 2)
3496 #else
3497 (1)
3498 #endif
3499 ) {
3500
3501 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3502 tp1->rec.data.fast_retran_tsn)) {
3503 /*
3504 * Strike the TSN, since this ack is
3505 * beyond where things were when we
3506 * did a FR.
3507 */
3508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3509 sctp_log_fr(biggest_tsn_newly_acked,
3510 tp1->rec.data.tsn,
3511 tp1->sent,
3512 SCTP_FR_LOG_STRIKE_CHUNK);
3513 }
3514 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3515 tp1->sent++;
3516 }
3517 strike_flag = 1;
3518 if ((asoc->sctp_cmt_on_off > 0) &&
3519 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3520 /*
3521 * CMT DAC algorithm: If
3522 * SACK flag is set to 0,
3523 * then lowest_newack test
3524 * will not pass because it
3525 * would have been set to
3526 * the cumack earlier. If
3527 * not already to be rtx'd,
3528 * If not a mixed sack and
3529 * if tp1 is not between two
3530 * sacked TSNs, then mark by
3531 * one more.
3532 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3533 * two packets have been received after this missing TSN.
3534 */
3535 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3536 (num_dests_sacked == 1) &&
3537 SCTP_TSN_GT(this_sack_lowest_newack,
3538 tp1->rec.data.tsn)) {
3539 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3540 sctp_log_fr(32 + num_dests_sacked,
3541 tp1->rec.data.tsn,
3542 tp1->sent,
3543 SCTP_FR_LOG_STRIKE_CHUNK);
3544 }
3545 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3546 tp1->sent++;
3547 }
3548 }
3549 }
3550 }
3551 }
3552 /*
3553 * JRI: TODO: remove code for HTNA algo. CMT's
3554 * SFR algo covers HTNA.
3555 */
3556 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3557 biggest_tsn_newly_acked)) {
3558 /*
3559 * We don't strike these: This is the HTNA
3560 * algorithm i.e. we don't strike If our TSN is
3561 * larger than the Highest TSN Newly Acked.
3562 */
3563 ;
3564 } else {
3565 /* Strike the TSN */
3566 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3567 sctp_log_fr(biggest_tsn_newly_acked,
3568 tp1->rec.data.tsn,
3569 tp1->sent,
3570 SCTP_FR_LOG_STRIKE_CHUNK);
3571 }
3572 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3573 tp1->sent++;
3574 }
3575 if ((asoc->sctp_cmt_on_off > 0) &&
3576 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3577 /*
3578 * CMT DAC algorithm: If SACK flag is set to
3579 * 0, then lowest_newack test will not pass
3580 * because it would have been set to the
3581 * cumack earlier. If not already to be
3582 * rtx'd, If not a mixed sack and if tp1 is
3583 * not between two sacked TSNs, then mark by
3584 * one more.
3585 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3586 * two packets have been received after this missing TSN.
3587 */
3588 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3589 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3591 sctp_log_fr(48 + num_dests_sacked,
3592 tp1->rec.data.tsn,
3593 tp1->sent,
3594 SCTP_FR_LOG_STRIKE_CHUNK);
3595 }
3596 tp1->sent++;
3597 }
3598 }
3599 }
3600 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3601 struct sctp_nets *alt;
3602
3603 /* fix counts and things */
3604 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3605 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3606 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3607 tp1->book_size,
3608 (uint32_t)(uintptr_t)tp1->whoTo,
3609 tp1->rec.data.tsn);
3610 }
3611 if (tp1->whoTo) {
3612 tp1->whoTo->net_ack++;
3613 sctp_flight_size_decrease(tp1);
3614 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3615 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3616 tp1);
3617 }
3618 }
3619
3620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3621 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3622 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3623 }
3624 /* add back to the rwnd */
3625 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3626
3627 /* remove from the total flight */
3628 sctp_total_flight_decrease(stcb, tp1);
3629
3630 if ((stcb->asoc.prsctp_supported) &&
3631 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3632 /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3633 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3634 /* Yes, so drop it */
3635 if (tp1->data != NULL) {
3636 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3637 SCTP_SO_NOT_LOCKED);
3638 }
3639 /* Make sure to flag we had a FR */
3640 if (tp1->whoTo != NULL) {
3641 tp1->whoTo->net_ack++;
3642 }
3643 continue;
3644 }
3645 }
3646 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3648 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3649 0, SCTP_FR_MARKED);
3650 }
3651 if (strike_flag) {
3652 /* This is a subsequent FR */
3653 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3654 }
3655 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3656 if (asoc->sctp_cmt_on_off > 0) {
3657 /*
3658 * CMT: Using RTX_SSTHRESH policy for CMT.
3659 * If CMT is being used, then pick dest with
3660 * largest ssthresh for any retransmission.
3661 */
3662 tp1->no_fr_allowed = 1;
3663 alt = tp1->whoTo;
3664 /*sa_ignore NO_NULL_CHK*/
3665 if (asoc->sctp_cmt_pf > 0) {
3666 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3667 alt = sctp_find_alternate_net(stcb, alt, 2);
3668 } else {
3669 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3670 /*sa_ignore NO_NULL_CHK*/
3671 alt = sctp_find_alternate_net(stcb, alt, 1);
3672 }
3673 if (alt == NULL) {
3674 alt = tp1->whoTo;
3675 }
3676 /*
3677 * CUCv2: If a different dest is picked for
3678 * the retransmission, then new
3679 * (rtx-)pseudo_cumack needs to be tracked
3680 * for orig dest. Let CUCv2 track new (rtx-)
3681 * pseudo-cumack always.
3682 */
3683 if (tp1->whoTo) {
3684 tp1->whoTo->find_pseudo_cumack = 1;
3685 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3686 }
3687
3688 } else {/* CMT is OFF */
3689
3690 #ifdef SCTP_FR_TO_ALTERNATE
3691 /* Can we find an alternate? */
3692 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3693 #else
3694 /*
3695 * default behavior is to NOT retransmit
3696 * FR's to an alternate. Armando Caro's
3697 * paper details why.
3698 */
3699 alt = tp1->whoTo;
3700 #endif
3701 }
3702
3703 tp1->rec.data.doing_fast_retransmit = 1;
3704 tot_retrans++;
3705 /* mark the sending seq for possible subsequent FR's */
3706 /*
3707 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3708 * (uint32_t)tpi->rec.data.tsn);
3709 */
3710 if (TAILQ_EMPTY(&asoc->send_queue)) {
3711 /*
3712 * If the queue of send is empty then its
3713 * the next sequence number that will be
3714 * assigned so we subtract one from this to
3715 * get the one we last sent.
3716 */
3717 tp1->rec.data.fast_retran_tsn = sending_seq;
3718 } else {
3719 /*
3720 * If there are chunks on the send queue
3721 * (unsent data that has made it from the
3722 * stream queues but not out the door, we
3723 * take the first one (which will have the
3724 * lowest TSN) and subtract one to get the
3725 * one we last sent.
3726 */
3727 struct sctp_tmit_chunk *ttt;
3728
3729 ttt = TAILQ_FIRST(&asoc->send_queue);
3730 tp1->rec.data.fast_retran_tsn =
3731 ttt->rec.data.tsn;
3732 }
3733
3734 if (tp1->do_rtt) {
3735 /*
3736 * this guy had a RTO calculation pending on
3737 * it, cancel it
3738 */
3739 if ((tp1->whoTo != NULL) &&
3740 (tp1->whoTo->rto_needed == 0)) {
3741 tp1->whoTo->rto_needed = 1;
3742 }
3743 tp1->do_rtt = 0;
3744 }
3745 if (alt != tp1->whoTo) {
3746 /* yes, there is an alternate. */
3747 sctp_free_remote_addr(tp1->whoTo);
3748 /*sa_ignore FREED_MEMORY*/
3749 tp1->whoTo = alt;
3750 atomic_add_int(&alt->ref_count, 1);
3751 }
3752 }
3753 }
3754 }
3755
3756 struct sctp_tmit_chunk *
3757 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3758 struct sctp_association *asoc)
3759 {
3760 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3761 struct timeval now;
3762 int now_filled = 0;
3763
3764 if (asoc->prsctp_supported == 0) {
3765 return (NULL);
3766 }
3767 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3768 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3769 tp1->sent != SCTP_DATAGRAM_RESEND &&
3770 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3771 /* no chance to advance, out of here */
3772 break;
3773 }
3774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3775 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3776 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3777 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3778 asoc->advanced_peer_ack_point,
3779 tp1->rec.data.tsn, 0, 0);
3780 }
3781 }
3782 if (!PR_SCTP_ENABLED(tp1->flags)) {
3783 /*
3784 * We can't fwd-tsn past any that are reliable aka
3785 * retransmitted until the asoc fails.
3786 */
3787 break;
3788 }
3789 if (!now_filled) {
3790 (void)SCTP_GETTIME_TIMEVAL(&now);
3791 now_filled = 1;
3792 }
3793 /*
3794 * now we got a chunk which is marked for another
3795 * retransmission to a PR-stream but has run out its chances
3796 * already maybe OR has been marked to skip now. Can we skip
3797 * it if its a resend?
3798 */
3799 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3800 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3801 /*
3802 * Now is this one marked for resend and its time is
3803 * now up?
3804 */
3805 #ifndef __FreeBSD__
3806 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3807 #else
3808 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3809 #endif
3810 /* Yes so drop it */
3811 if (tp1->data) {
3812 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3813 1, SCTP_SO_NOT_LOCKED);
3814 }
3815 } else {
3816 /*
3817 * No, we are done when hit one for resend
3818 * whos time as not expired.
3819 */
3820 break;
3821 }
3822 }
3823 /*
3824 * Ok now if this chunk is marked to drop it we can clean up
3825 * the chunk, advance our peer ack point and we can check
3826 * the next chunk.
3827 */
3828 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3829 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3830 /* advance PeerAckPoint goes forward */
3831 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3832 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3833 a_adv = tp1;
3834 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3835 /* No update but we do save the chk */
3836 a_adv = tp1;
3837 }
3838 } else {
3839 /*
3840 * If it is still in RESEND we can advance no
3841 * further
3842 */
3843 break;
3844 }
3845 }
3846 return (a_adv);
3847 }
3848
3849 static int
3850 sctp_fs_audit(struct sctp_association *asoc)
3851 {
3852 struct sctp_tmit_chunk *chk;
3853 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3854 int ret;
3855 #ifndef INVARIANTS
3856 int entry_flight, entry_cnt;
3857 #endif
3858
3859 ret = 0;
3860 #ifndef INVARIANTS
3861 entry_flight = asoc->total_flight;
3862 entry_cnt = asoc->total_flight_count;
3863 #endif
3864 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3865 return (0);
3866
3867 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3868 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3869 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3870 chk->rec.data.tsn,
3871 chk->send_size,
3872 chk->snd_count);
3873 inflight++;
3874 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3875 resend++;
3876 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3877 inbetween++;
3878 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3879 above++;
3880 } else {
3881 acked++;
3882 }
3883 }
3884
3885 if ((inflight > 0) || (inbetween > 0)) {
3886 #ifdef INVARIANTS
3887 panic("Flight size-express incorrect? \n");
3888 #else
3889 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3890 entry_flight, entry_cnt);
3891
3892 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3893 inflight, inbetween, resend, above, acked);
3894 ret = 1;
3895 #endif
3896 }
3897 return (ret);
3898 }
3899
3900
3901 static void
3902 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3903 struct sctp_association *asoc,
3904 struct sctp_tmit_chunk *tp1)
3905 {
3906 tp1->window_probe = 0;
3907 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3908 /* TSN's skipped we do NOT move back. */
3909 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3910 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3911 tp1->book_size,
3912 (uint32_t)(uintptr_t)tp1->whoTo,
3913 tp1->rec.data.tsn);
3914 return;
3915 }
3916 /* First setup this by shrinking flight */
3917 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3918 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3919 tp1);
3920 }
3921 sctp_flight_size_decrease(tp1);
3922 sctp_total_flight_decrease(stcb, tp1);
3923 /* Now mark for resend */
3924 tp1->sent = SCTP_DATAGRAM_RESEND;
3925 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3926
3927 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3928 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3929 tp1->whoTo->flight_size,
3930 tp1->book_size,
3931 (uint32_t)(uintptr_t)tp1->whoTo,
3932 tp1->rec.data.tsn);
3933 }
3934 }
3935
3936 void
3937 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3938 uint32_t rwnd, int *abort_now, int ecne_seen)
3939 {
3940 struct sctp_nets *net;
3941 struct sctp_association *asoc;
3942 struct sctp_tmit_chunk *tp1, *tp2;
3943 uint32_t old_rwnd;
3944 int win_probe_recovery = 0;
3945 int win_probe_recovered = 0;
3946 int j, done_once = 0;
3947 int rto_ok = 1;
3948 uint32_t send_s;
3949
3950 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3951 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3952 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3953 }
3954 SCTP_TCB_LOCK_ASSERT(stcb);
3955 #ifdef SCTP_ASOCLOG_OF_TSNS
3956 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3957 stcb->asoc.cumack_log_at++;
3958 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3959 stcb->asoc.cumack_log_at = 0;
3960 }
3961 #endif
3962 asoc = &stcb->asoc;
3963 old_rwnd = asoc->peers_rwnd;
3964 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3965 /* old ack */
3966 return;
3967 } else if (asoc->last_acked_seq == cumack) {
3968 /* Window update sack */
3969 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3970 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3971 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3972 /* SWS sender side engages */
3973 asoc->peers_rwnd = 0;
3974 }
3975 if (asoc->peers_rwnd > old_rwnd) {
3976 goto again;
3977 }
3978 return;
3979 }
3980
3981 /* First setup for CC stuff */
3982 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3983 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3984 /* Drag along the window_tsn for cwr's */
3985 net->cwr_window_tsn = cumack;
3986 }
3987 net->prev_cwnd = net->cwnd;
3988 net->net_ack = 0;
3989 net->net_ack2 = 0;
3990
3991 /*
3992 * CMT: Reset CUC and Fast recovery algo variables before
3993 * SACK processing
3994 */
3995 net->new_pseudo_cumack = 0;
3996 net->will_exit_fast_recovery = 0;
3997 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3998 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3999 }
4000 }
4001 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4002 tp1 = TAILQ_LAST(&asoc->sent_queue,
4003 sctpchunk_listhead);
4004 send_s = tp1->rec.data.tsn + 1;
4005 } else {
4006 send_s = asoc->sending_seq;
4007 }
4008 if (SCTP_TSN_GE(cumack, send_s)) {
4009 struct mbuf *op_err;
4010 char msg[SCTP_DIAG_INFO_LEN];
4011
4012 *abort_now = 1;
4013 /* XXX */
4014 SCTP_SNPRINTF(msg, sizeof(msg),
4015 "Cum ack %8.8x greater or equal than TSN %8.8x",
4016 cumack, send_s);
4017 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4018 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4019 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4020 return;
4021 }
4022 asoc->this_sack_highest_gap = cumack;
4023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4024 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4025 stcb->asoc.overall_error_count,
4026 0,
4027 SCTP_FROM_SCTP_INDATA,
4028 __LINE__);
4029 }
4030 stcb->asoc.overall_error_count = 0;
4031 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4032 /* process the new consecutive TSN first */
4033 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4034 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4035 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4036 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4037 }
4038 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4039 /*
4040 * If it is less than ACKED, it is
4041 * now no-longer in flight. Higher
4042 * values may occur during marking
4043 */
4044 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4046 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4047 tp1->whoTo->flight_size,
4048 tp1->book_size,
4049 (uint32_t)(uintptr_t)tp1->whoTo,
4050 tp1->rec.data.tsn);
4051 }
4052 sctp_flight_size_decrease(tp1);
4053 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4054 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4055 tp1);
4056 }
4057 /* sa_ignore NO_NULL_CHK */
4058 sctp_total_flight_decrease(stcb, tp1);
4059 }
4060 tp1->whoTo->net_ack += tp1->send_size;
4061 if (tp1->snd_count < 2) {
4062 /*
4063 * True non-retransmitted
4064 * chunk
4065 */
4066 tp1->whoTo->net_ack2 +=
4067 tp1->send_size;
4068
4069 /* update RTO too? */
4070 if (tp1->do_rtt) {
4071 if (rto_ok &&
4072 sctp_calculate_rto(stcb,
4073 &stcb->asoc,
4074 tp1->whoTo,
4075 &tp1->sent_rcv_time,
4076 SCTP_RTT_FROM_DATA)) {
4077 rto_ok = 0;
4078 }
4079 if (tp1->whoTo->rto_needed == 0) {
4080 tp1->whoTo->rto_needed = 1;
4081 }
4082 tp1->do_rtt = 0;
4083 }
4084 }
4085 /*
4086 * CMT: CUCv2 algorithm. From the
4087 * cumack'd TSNs, for each TSN being
4088 * acked for the first time, set the
4089 * following variables for the
4090 * corresp destination.
4091 * new_pseudo_cumack will trigger a
4092 * cwnd update.
4093 * find_(rtx_)pseudo_cumack will
4094 * trigger search for the next
4095 * expected (rtx-)pseudo-cumack.
4096 */
4097 tp1->whoTo->new_pseudo_cumack = 1;
4098 tp1->whoTo->find_pseudo_cumack = 1;
4099 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4100
4101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4102 /* sa_ignore NO_NULL_CHK */
4103 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4104 }
4105 }
4106 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4107 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4108 }
4109 if (tp1->rec.data.chunk_was_revoked) {
4110 /* deflate the cwnd */
4111 tp1->whoTo->cwnd -= tp1->book_size;
4112 tp1->rec.data.chunk_was_revoked = 0;
4113 }
4114 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4115 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4116 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4117 #ifdef INVARIANTS
4118 } else {
4119 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4120 #endif
4121 }
4122 }
4123 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4124 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4125 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4126 asoc->trigger_reset = 1;
4127 }
4128 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4129 if (tp1->data) {
4130 /* sa_ignore NO_NULL_CHK */
4131 sctp_free_bufspace(stcb, asoc, tp1, 1);
4132 sctp_m_freem(tp1->data);
4133 tp1->data = NULL;
4134 }
4135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4136 sctp_log_sack(asoc->last_acked_seq,
4137 cumack,
4138 tp1->rec.data.tsn,
4139 0,
4140 0,
4141 SCTP_LOG_FREE_SENT);
4142 }
4143 asoc->sent_queue_cnt--;
4144 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4145 } else {
4146 break;
4147 }
4148 }
4149
4150 }
4151 #if defined(__Userspace__)
4152 if (stcb->sctp_ep->recv_callback) {
4153 if (stcb->sctp_socket) {
4154 uint32_t inqueue_bytes, sb_free_now;
4155 struct sctp_inpcb *inp;
4156
4157 inp = stcb->sctp_ep;
4158 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4159 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4160
4161 /* check if the amount free in the send socket buffer crossed the threshold */
4162 if (inp->send_callback &&
4163 (((inp->send_sb_threshold > 0) &&
4164 (sb_free_now >= inp->send_sb_threshold) &&
4165 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4166 (inp->send_sb_threshold == 0))) {
4167 atomic_add_int(&stcb->asoc.refcnt, 1);
4168 SCTP_TCB_UNLOCK(stcb);
4169 inp->send_callback(stcb->sctp_socket, sb_free_now);
4170 SCTP_TCB_LOCK(stcb);
4171 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4172 }
4173 }
4174 } else if (stcb->sctp_socket) {
4175 #else
4176 /* sa_ignore NO_NULL_CHK */
4177 if (stcb->sctp_socket) {
4178 #endif
4179 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4180 struct socket *so;
4181
4182 #endif
4183 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4185 /* sa_ignore NO_NULL_CHK */
4186 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4187 }
4188 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4189 so = SCTP_INP_SO(stcb->sctp_ep);
4190 atomic_add_int(&stcb->asoc.refcnt, 1);
4191 SCTP_TCB_UNLOCK(stcb);
4192 SCTP_SOCKET_LOCK(so, 1);
4193 SCTP_TCB_LOCK(stcb);
4194 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4195 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4196 /* assoc was freed while we were unlocked */
4197 SCTP_SOCKET_UNLOCK(so, 1);
4198 return;
4199 }
4200 #endif
4201 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4202 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4203 SCTP_SOCKET_UNLOCK(so, 1);
4204 #endif
4205 } else {
4206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4207 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4208 }
4209 }
4210
4211 /* JRS - Use the congestion control given in the CC module */
4212 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4213 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4214 if (net->net_ack2 > 0) {
4215 /*
4216 * Karn's rule applies to clearing error count, this
4217 * is optional.
4218 */
4219 net->error_count = 0;
4220 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4221 /* addr came good */
4222 net->dest_state |= SCTP_ADDR_REACHABLE;
4223 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4224 0, (void *)net, SCTP_SO_NOT_LOCKED);
4225 }
4226 if (net == stcb->asoc.primary_destination) {
4227 if (stcb->asoc.alternate) {
4228 /* release the alternate, primary is good */
4229 sctp_free_remote_addr(stcb->asoc.alternate);
4230 stcb->asoc.alternate = NULL;
4231 }
4232 }
4233 if (net->dest_state & SCTP_ADDR_PF) {
4234 net->dest_state &= ~SCTP_ADDR_PF;
4235 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4236 stcb->sctp_ep, stcb, net,
4237 SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4238 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4239 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4240 /* Done with this net */
4241 net->net_ack = 0;
4242 }
4243 /* restore any doubled timers */
4244 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4245 if (net->RTO < stcb->asoc.minrto) {
4246 net->RTO = stcb->asoc.minrto;
4247 }
4248 if (net->RTO > stcb->asoc.maxrto) {
4249 net->RTO = stcb->asoc.maxrto;
4250 }
4251 }
4252 }
4253 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4254 }
4255 asoc->last_acked_seq = cumack;
4256
4257 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4258 /* nothing left in-flight */
4259 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4260 net->flight_size = 0;
4261 net->partial_bytes_acked = 0;
4262 }
4263 asoc->total_flight = 0;
4264 asoc->total_flight_count = 0;
4265 }
4266
4267 /* RWND update */
4268 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4269 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4270 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4271 /* SWS sender side engages */
4272 asoc->peers_rwnd = 0;
4273 }
4274 if (asoc->peers_rwnd > old_rwnd) {
4275 win_probe_recovery = 1;
4276 }
4277 /* Now assure a timer where data is queued at */
4278 again:
4279 j = 0;
4280 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4281 if (win_probe_recovery && (net->window_probe)) {
4282 win_probe_recovered = 1;
4283 /*
4284 * Find first chunk that was used with window probe
4285 * and clear the sent
4286 */
4287 /* sa_ignore FREED_MEMORY */
4288 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4289 if (tp1->window_probe) {
4290 /* move back to data send queue */
4291 sctp_window_probe_recovery(stcb, asoc, tp1);
4292 break;
4293 }
4294 }
4295 }
4296 if (net->flight_size) {
4297 j++;
4298 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4299 if (net->window_probe) {
4300 net->window_probe = 0;
4301 }
4302 } else {
4303 if (net->window_probe) {
4304 /* In window probes we must assure a timer is still running there */
4305 net->window_probe = 0;
4306 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4307 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4308 }
4309 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4310 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4311 stcb, net,
4312 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4313 }
4314 }
4315 }
4316 if ((j == 0) &&
4317 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4318 (asoc->sent_queue_retran_cnt == 0) &&
4319 (win_probe_recovered == 0) &&
4320 (done_once == 0)) {
4321 /* huh, this should not happen unless all packets
4322 * are PR-SCTP and marked to skip of course.
4323 */
4324 if (sctp_fs_audit(asoc)) {
4325 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4326 net->flight_size = 0;
4327 }
4328 asoc->total_flight = 0;
4329 asoc->total_flight_count = 0;
4330 asoc->sent_queue_retran_cnt = 0;
4331 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4332 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4333 sctp_flight_size_increase(tp1);
4334 sctp_total_flight_increase(stcb, tp1);
4335 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4336 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4337 }
4338 }
4339 }
4340 done_once = 1;
4341 goto again;
4342 }
4343 /**********************************/
4344 /* Now what about shutdown issues */
4345 /**********************************/
4346 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4347 /* nothing left on sendqueue.. consider done */
4348 /* clean up */
4349 if ((asoc->stream_queue_cnt == 1) &&
4350 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4351 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4352 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
4353 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4354 }
4355 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4356 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4357 (asoc->stream_queue_cnt == 1) &&
4358 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4359 struct mbuf *op_err;
4360
4361 *abort_now = 1;
4362 /* XXX */
4363 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4364 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4365 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4366 return;
4367 }
4368 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4369 (asoc->stream_queue_cnt == 0)) {
4370 struct sctp_nets *netp;
4371
4372 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4373 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4374 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4375 }
4376 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4377 sctp_stop_timers_for_shutdown(stcb);
4378 if (asoc->alternate) {
4379 netp = asoc->alternate;
4380 } else {
4381 netp = asoc->primary_destination;
4382 }
4383 sctp_send_shutdown(stcb, netp);
4384 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4385 stcb->sctp_ep, stcb, netp);
4386 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4387 stcb->sctp_ep, stcb, NULL);
4388 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4389 (asoc->stream_queue_cnt == 0)) {
4390 struct sctp_nets *netp;
4391
4392 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4393 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4394 sctp_stop_timers_for_shutdown(stcb);
4395 if (asoc->alternate) {
4396 netp = asoc->alternate;
4397 } else {
4398 netp = asoc->primary_destination;
4399 }
4400 sctp_send_shutdown_ack(stcb, netp);
4401 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4402 stcb->sctp_ep, stcb, netp);
4403 }
4404 }
4405 /*********************************************/
4406 /* Here we perform PR-SCTP procedures */
4407 /* (section 4.2) */
4408 /*********************************************/
4409 /* C1. update advancedPeerAckPoint */
4410 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4411 asoc->advanced_peer_ack_point = cumack;
4412 }
4413 /* PR-Sctp issues need to be addressed too */
4414 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4415 struct sctp_tmit_chunk *lchk;
4416 uint32_t old_adv_peer_ack_point;
4417
4418 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4419 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4420 /* C3. See if we need to send a Fwd-TSN */
4421 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4422 /*
4423 * ISSUE with ECN, see FWD-TSN processing.
4424 */
4425 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4426 send_forward_tsn(stcb, asoc);
4427 } else if (lchk) {
4428 /* try to FR fwd-tsn's that get lost too */
4429 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4430 send_forward_tsn(stcb, asoc);
4431 }
4432 }
4433 }
4434 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4435 if (lchk->whoTo != NULL) {
4436 break;
4437 }
4438 }
4439 if (lchk != NULL) {
4440 /* Assure a timer is up */
4441 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4442 stcb->sctp_ep, stcb, lchk->whoTo);
4443 }
4444 }
4445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4446 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4447 rwnd,
4448 stcb->asoc.peers_rwnd,
4449 stcb->asoc.total_flight,
4450 stcb->asoc.total_output_queue_size);
4451 }
4452 }
4453
4454 void
4455 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4456 struct sctp_tcb *stcb,
4457 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4458 int *abort_now, uint8_t flags,
4459 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4460 {
4461 struct sctp_association *asoc;
4462 struct sctp_tmit_chunk *tp1, *tp2;
4463 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4464 uint16_t wake_him = 0;
4465 uint32_t send_s = 0;
4466 long j;
4467 int accum_moved = 0;
4468 int will_exit_fast_recovery = 0;
4469 uint32_t a_rwnd, old_rwnd;
4470 int win_probe_recovery = 0;
4471 int win_probe_recovered = 0;
4472 struct sctp_nets *net = NULL;
4473 int done_once;
4474 int rto_ok = 1;
4475 uint8_t reneged_all = 0;
4476 uint8_t cmt_dac_flag;
4477 /*
4478 * we take any chance we can to service our queues since we cannot
4479 * get awoken when the socket is read from :<
4480 */
4481 /*
4482 * Now perform the actual SACK handling: 1) Verify that it is not an
4483 * old sack, if so discard. 2) If there is nothing left in the send
4484 * queue (cum-ack is equal to last acked) then you have a duplicate
4485 * too, update any rwnd change and verify no timers are running.
4486 * then return. 3) Process any new consequtive data i.e. cum-ack
4487 * moved process these first and note that it moved. 4) Process any
4488 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4489 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4490 * sync up flightsizes and things, stop all timers and also check
4491 * for shutdown_pending state. If so then go ahead and send off the
4492 * shutdown. If in shutdown recv, send off the shutdown-ack and
4493 * start that timer, Ret. 9) Strike any non-acked things and do FR
4494 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4495 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4496 * if in shutdown_recv state.
4497 */
4498 SCTP_TCB_LOCK_ASSERT(stcb);
4499 /* CMT DAC algo */
4500 this_sack_lowest_newack = 0;
4501 SCTP_STAT_INCR(sctps_slowpath_sack);
4502 last_tsn = cum_ack;
4503 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4504 #ifdef SCTP_ASOCLOG_OF_TSNS
4505 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4506 stcb->asoc.cumack_log_at++;
4507 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4508 stcb->asoc.cumack_log_at = 0;
4509 }
4510 #endif
4511 a_rwnd = rwnd;
4512
4513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4514 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4515 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4516 }
4517
4518 old_rwnd = stcb->asoc.peers_rwnd;
4519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4520 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4521 stcb->asoc.overall_error_count,
4522 0,
4523 SCTP_FROM_SCTP_INDATA,
4524 __LINE__);
4525 }
4526 stcb->asoc.overall_error_count = 0;
4527 asoc = &stcb->asoc;
4528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4529 sctp_log_sack(asoc->last_acked_seq,
4530 cum_ack,
4531 0,
4532 num_seg,
4533 num_dup,
4534 SCTP_LOG_NEW_SACK);
4535 }
4536 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4537 uint16_t i;
4538 uint32_t *dupdata, dblock;
4539
4540 for (i = 0; i < num_dup; i++) {
4541 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4542 sizeof(uint32_t), (uint8_t *)&dblock);
4543 if (dupdata == NULL) {
4544 break;
4545 }
4546 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4547 }
4548 }
4549 /* reality check */
4550 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4551 tp1 = TAILQ_LAST(&asoc->sent_queue,
4552 sctpchunk_listhead);
4553 send_s = tp1->rec.data.tsn + 1;
4554 } else {
4555 tp1 = NULL;
4556 send_s = asoc->sending_seq;
4557 }
4558 if (SCTP_TSN_GE(cum_ack, send_s)) {
4559 struct mbuf *op_err;
4560 char msg[SCTP_DIAG_INFO_LEN];
4561
4562 /*
4563 * no way, we have not even sent this TSN out yet.
4564 * Peer is hopelessly messed up with us.
4565 */
4566 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4567 cum_ack, send_s);
4568 if (tp1) {
4569 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4570 tp1->rec.data.tsn, (void *)tp1);
4571 }
4572 hopeless_peer:
4573 *abort_now = 1;
4574 /* XXX */
4575 SCTP_SNPRINTF(msg, sizeof(msg),
4576 "Cum ack %8.8x greater or equal than TSN %8.8x",
4577 cum_ack, send_s);
4578 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4579 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4580 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4581 return;
4582 }
4583 /**********************/
4584 /* 1) check the range */
4585 /**********************/
4586 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4587 /* acking something behind */
4588 return;
4589 }
4590
4591 /* update the Rwnd of the peer */
4592 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4593 TAILQ_EMPTY(&asoc->send_queue) &&
4594 (asoc->stream_queue_cnt == 0)) {
4595 /* nothing left on send/sent and strmq */
4596 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4597 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4598 asoc->peers_rwnd, 0, 0, a_rwnd);
4599 }
4600 asoc->peers_rwnd = a_rwnd;
4601 if (asoc->sent_queue_retran_cnt) {
4602 asoc->sent_queue_retran_cnt = 0;
4603 }
4604 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4605 /* SWS sender side engages */
4606 asoc->peers_rwnd = 0;
4607 }
4608 /* stop any timers */
4609 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4610 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4611 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4612 net->partial_bytes_acked = 0;
4613 net->flight_size = 0;
4614 }
4615 asoc->total_flight = 0;
4616 asoc->total_flight_count = 0;
4617 return;
4618 }
4619 /*
4620 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4621 * things. The total byte count acked is tracked in netAckSz AND
4622 * netAck2 is used to track the total bytes acked that are un-
4623 * amibguious and were never retransmitted. We track these on a per
4624 * destination address basis.
4625 */
4626 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4627 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4628 /* Drag along the window_tsn for cwr's */
4629 net->cwr_window_tsn = cum_ack;
4630 }
4631 net->prev_cwnd = net->cwnd;
4632 net->net_ack = 0;
4633 net->net_ack2 = 0;
4634
4635 /*
4636 * CMT: Reset CUC and Fast recovery algo variables before
4637 * SACK processing
4638 */
4639 net->new_pseudo_cumack = 0;
4640 net->will_exit_fast_recovery = 0;
4641 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4642 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4643 }
4644
4645 /*
4646 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4647 * to be greater than the cumack. Also reset saw_newack to 0
4648 * for all dests.
4649 */
4650 net->saw_newack = 0;
4651 net->this_sack_highest_newack = last_tsn;
4652 }
4653 /* process the new consecutive TSN first */
4654 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4655 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4656 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4657 accum_moved = 1;
4658 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4659 /*
4660 * If it is less than ACKED, it is
4661 * now no-longer in flight. Higher
4662 * values may occur during marking
4663 */
4664 if ((tp1->whoTo->dest_state &
4665 SCTP_ADDR_UNCONFIRMED) &&
4666 (tp1->snd_count < 2)) {
4667 /*
4668 * If there was no retran
4669 * and the address is
4670 * un-confirmed and we sent
4671 * there and are now
4672 * sacked.. its confirmed,
4673 * mark it so.
4674 */
4675 tp1->whoTo->dest_state &=
4676 ~SCTP_ADDR_UNCONFIRMED;
4677 }
4678 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4680 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4681 tp1->whoTo->flight_size,
4682 tp1->book_size,
4683 (uint32_t)(uintptr_t)tp1->whoTo,
4684 tp1->rec.data.tsn);
4685 }
4686 sctp_flight_size_decrease(tp1);
4687 sctp_total_flight_decrease(stcb, tp1);
4688 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4689 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4690 tp1);
4691 }
4692 }
4693 tp1->whoTo->net_ack += tp1->send_size;
4694
4695 /* CMT SFR and DAC algos */
4696 this_sack_lowest_newack = tp1->rec.data.tsn;
4697 tp1->whoTo->saw_newack = 1;
4698
4699 if (tp1->snd_count < 2) {
4700 /*
4701 * True non-retransmitted
4702 * chunk
4703 */
4704 tp1->whoTo->net_ack2 +=
4705 tp1->send_size;
4706
4707 /* update RTO too? */
4708 if (tp1->do_rtt) {
4709 if (rto_ok &&
4710 sctp_calculate_rto(stcb,
4711 &stcb->asoc,
4712 tp1->whoTo,
4713 &tp1->sent_rcv_time,
4714 SCTP_RTT_FROM_DATA)) {
4715 rto_ok = 0;
4716 }
4717 if (tp1->whoTo->rto_needed == 0) {
4718 tp1->whoTo->rto_needed = 1;
4719 }
4720 tp1->do_rtt = 0;
4721 }
4722 }
4723 /*
4724 * CMT: CUCv2 algorithm. From the
4725 * cumack'd TSNs, for each TSN being
4726 * acked for the first time, set the
4727 * following variables for the
4728 * corresp destination.
4729 * new_pseudo_cumack will trigger a
4730 * cwnd update.
4731 * find_(rtx_)pseudo_cumack will
4732 * trigger search for the next
4733 * expected (rtx-)pseudo-cumack.
4734 */
4735 tp1->whoTo->new_pseudo_cumack = 1;
4736 tp1->whoTo->find_pseudo_cumack = 1;
4737 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4738
4739
4740 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4741 sctp_log_sack(asoc->last_acked_seq,
4742 cum_ack,
4743 tp1->rec.data.tsn,
4744 0,
4745 0,
4746 SCTP_LOG_TSN_ACKED);
4747 }
4748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4749 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4750 }
4751 }
4752 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4753 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4754 #ifdef SCTP_AUDITING_ENABLED
4755 sctp_audit_log(0xB3,
4756 (asoc->sent_queue_retran_cnt & 0x000000ff));
4757 #endif
4758 }
4759 if (tp1->rec.data.chunk_was_revoked) {
4760 /* deflate the cwnd */
4761 tp1->whoTo->cwnd -= tp1->book_size;
4762 tp1->rec.data.chunk_was_revoked = 0;
4763 }
4764 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4765 tp1->sent = SCTP_DATAGRAM_ACKED;
4766 }
4767 }
4768 } else {
4769 break;
4770 }
4771 }
4772 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4773 /* always set this up to cum-ack */
4774 asoc->this_sack_highest_gap = last_tsn;
4775
4776 if ((num_seg > 0) || (num_nr_seg > 0)) {
4777
4778 /*
4779 * thisSackHighestGap will increase while handling NEW
4780 * segments this_sack_highest_newack will increase while
4781 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4782 * used for CMT DAC algo. saw_newack will also change.
4783 */
4784 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4785 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4786 num_seg, num_nr_seg, &rto_ok)) {
4787 wake_him++;
4788 }
4789 /*
4790 * validate the biggest_tsn_acked in the gap acks if
4791 * strict adherence is wanted.
4792 */
4793 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4794 /*
4795 * peer is either confused or we are under
4796 * attack. We must abort.
4797 */
4798 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4799 biggest_tsn_acked, send_s);
4800 goto hopeless_peer;
4801 }
4802 }
4803 /*******************************************/
4804 /* cancel ALL T3-send timer if accum moved */
4805 /*******************************************/
4806 if (asoc->sctp_cmt_on_off > 0) {
4807 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4808 if (net->new_pseudo_cumack)
4809 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4810 stcb, net,
4811 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4812
4813 }
4814 } else {
4815 if (accum_moved) {
4816 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4817 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4818 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4819 }
4820 }
4821 }
4822 /********************************************/
4823 /* drop the acked chunks from the sentqueue */
4824 /********************************************/
4825 asoc->last_acked_seq = cum_ack;
4826
4827 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4828 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4829 break;
4830 }
4831 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4832 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4833 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4834 #ifdef INVARIANTS
4835 } else {
4836 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4837 #endif
4838 }
4839 }
4840 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4841 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4842 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4843 asoc->trigger_reset = 1;
4844 }
4845 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4846 if (PR_SCTP_ENABLED(tp1->flags)) {
4847 if (asoc->pr_sctp_cnt != 0)
4848 asoc->pr_sctp_cnt--;
4849 }
4850 asoc->sent_queue_cnt--;
4851 if (tp1->data) {
4852 /* sa_ignore NO_NULL_CHK */
4853 sctp_free_bufspace(stcb, asoc, tp1, 1);
4854 sctp_m_freem(tp1->data);
4855 tp1->data = NULL;
4856 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4857 asoc->sent_queue_cnt_removeable--;
4858 }
4859 }
4860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4861 sctp_log_sack(asoc->last_acked_seq,
4862 cum_ack,
4863 tp1->rec.data.tsn,
4864 0,
4865 0,
4866 SCTP_LOG_FREE_SENT);
4867 }
4868 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4869 wake_him++;
4870 }
4871 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4872 #ifdef INVARIANTS
4873 panic("Warning flight size is positive and should be 0");
4874 #else
4875 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4876 asoc->total_flight);
4877 #endif
4878 asoc->total_flight = 0;
4879 }
4880
4881 #if defined(__Userspace__)
4882 if (stcb->sctp_ep->recv_callback) {
4883 if (stcb->sctp_socket) {
4884 uint32_t inqueue_bytes, sb_free_now;
4885 struct sctp_inpcb *inp;
4886
4887 inp = stcb->sctp_ep;
4888 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4889 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4890
4891 /* check if the amount free in the send socket buffer crossed the threshold */
4892 if (inp->send_callback &&
4893 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4894 (inp->send_sb_threshold == 0))) {
4895 atomic_add_int(&stcb->asoc.refcnt, 1);
4896 SCTP_TCB_UNLOCK(stcb);
4897 inp->send_callback(stcb->sctp_socket, sb_free_now);
4898 SCTP_TCB_LOCK(stcb);
4899 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4900 }
4901 }
4902 } else if ((wake_him) && (stcb->sctp_socket)) {
4903 #else
4904 /* sa_ignore NO_NULL_CHK */
4905 if ((wake_him) && (stcb->sctp_socket)) {
4906 #endif
4907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4908 struct socket *so;
4909
4910 #endif
4911 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4912 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4913 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4914 }
4915 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4916 so = SCTP_INP_SO(stcb->sctp_ep);
4917 atomic_add_int(&stcb->asoc.refcnt, 1);
4918 SCTP_TCB_UNLOCK(stcb);
4919 SCTP_SOCKET_LOCK(so, 1);
4920 SCTP_TCB_LOCK(stcb);
4921 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4922 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4923 /* assoc was freed while we were unlocked */
4924 SCTP_SOCKET_UNLOCK(so, 1);
4925 return;
4926 }
4927 #endif
4928 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4929 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4930 SCTP_SOCKET_UNLOCK(so, 1);
4931 #endif
4932 } else {
4933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4934 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4935 }
4936 }
4937
4938 if (asoc->fast_retran_loss_recovery && accum_moved) {
4939 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4940 /* Setup so we will exit RFC2582 fast recovery */
4941 will_exit_fast_recovery = 1;
4942 }
4943 }
4944 /*
4945 * Check for revoked fragments:
4946 *
4947 * if Previous sack - Had no frags then we can't have any revoked if
4948 * Previous sack - Had frag's then - If we now have frags aka
4949 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4950 * some of them. else - The peer revoked all ACKED fragments, since
4951 * we had some before and now we have NONE.
4952 */
4953
4954 if (num_seg) {
4955 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4956 asoc->saw_sack_with_frags = 1;
4957 } else if (asoc->saw_sack_with_frags) {
4958 int cnt_revoked = 0;
4959
4960 /* Peer revoked all dg's marked or acked */
4961 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4962 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4963 tp1->sent = SCTP_DATAGRAM_SENT;
4964 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4965 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4966 tp1->whoTo->flight_size,
4967 tp1->book_size,
4968 (uint32_t)(uintptr_t)tp1->whoTo,
4969 tp1->rec.data.tsn);
4970 }
4971 sctp_flight_size_increase(tp1);
4972 sctp_total_flight_increase(stcb, tp1);
4973 tp1->rec.data.chunk_was_revoked = 1;
4974 /*
4975 * To ensure that this increase in
4976 * flightsize, which is artificial,
4977 * does not throttle the sender, we
4978 * also increase the cwnd
4979 * artificially.
4980 */
4981 tp1->whoTo->cwnd += tp1->book_size;
4982 cnt_revoked++;
4983 }
4984 }
4985 if (cnt_revoked) {
4986 reneged_all = 1;
4987 }
4988 asoc->saw_sack_with_frags = 0;
4989 }
4990 if (num_nr_seg > 0)
4991 asoc->saw_sack_with_nr_frags = 1;
4992 else
4993 asoc->saw_sack_with_nr_frags = 0;
4994
4995 /* JRS - Use the congestion control given in the CC module */
4996 if (ecne_seen == 0) {
4997 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4998 if (net->net_ack2 > 0) {
4999 /*
5000 * Karn's rule applies to clearing error count, this
5001 * is optional.
5002 */
5003 net->error_count = 0;
5004 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
5005 /* addr came good */
5006 net->dest_state |= SCTP_ADDR_REACHABLE;
5007 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
5008 0, (void *)net, SCTP_SO_NOT_LOCKED);
5009 }
5010
5011 if (net == stcb->asoc.primary_destination) {
5012 if (stcb->asoc.alternate) {
5013 /* release the alternate, primary is good */
5014 sctp_free_remote_addr(stcb->asoc.alternate);
5015 stcb->asoc.alternate = NULL;
5016 }
5017 }
5018
5019 if (net->dest_state & SCTP_ADDR_PF) {
5020 net->dest_state &= ~SCTP_ADDR_PF;
5021 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5022 stcb->sctp_ep, stcb, net,
5023 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5024 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5025 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5026 /* Done with this net */
5027 net->net_ack = 0;
5028 }
5029 /* restore any doubled timers */
5030 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5031 if (net->RTO < stcb->asoc.minrto) {
5032 net->RTO = stcb->asoc.minrto;
5033 }
5034 if (net->RTO > stcb->asoc.maxrto) {
5035 net->RTO = stcb->asoc.maxrto;
5036 }
5037 }
5038 }
5039 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5040 }
5041
5042 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5043 /* nothing left in-flight */
5044 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5045 /* stop all timers */
5046 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5047 stcb, net,
5048 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5049 net->flight_size = 0;
5050 net->partial_bytes_acked = 0;
5051 }
5052 asoc->total_flight = 0;
5053 asoc->total_flight_count = 0;
5054 }
5055
5056 /**********************************/
5057 /* Now what about shutdown issues */
5058 /**********************************/
5059 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5060 /* nothing left on sendqueue.. consider done */
5061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5062 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5063 asoc->peers_rwnd, 0, 0, a_rwnd);
5064 }
5065 asoc->peers_rwnd = a_rwnd;
5066 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5067 /* SWS sender side engages */
5068 asoc->peers_rwnd = 0;
5069 }
5070 /* clean up */
5071 if ((asoc->stream_queue_cnt == 1) &&
5072 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5073 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5074 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
5075 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5076 }
5077 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5078 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5079 (asoc->stream_queue_cnt == 1) &&
5080 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5081 struct mbuf *op_err;
5082
5083 *abort_now = 1;
5084 /* XXX */
5085 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5086 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5087 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5088 return;
5089 }
5090 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5091 (asoc->stream_queue_cnt == 0)) {
5092 struct sctp_nets *netp;
5093
5094 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5095 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5096 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5097 }
5098 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5099 sctp_stop_timers_for_shutdown(stcb);
5100 if (asoc->alternate) {
5101 netp = asoc->alternate;
5102 } else {
5103 netp = asoc->primary_destination;
5104 }
5105 sctp_send_shutdown(stcb, netp);
5106 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5107 stcb->sctp_ep, stcb, netp);
5108 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5109 stcb->sctp_ep, stcb, NULL);
5110 return;
5111 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5112 (asoc->stream_queue_cnt == 0)) {
5113 struct sctp_nets *netp;
5114
5115 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5116 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5117 sctp_stop_timers_for_shutdown(stcb);
5118 if (asoc->alternate) {
5119 netp = asoc->alternate;
5120 } else {
5121 netp = asoc->primary_destination;
5122 }
5123 sctp_send_shutdown_ack(stcb, netp);
5124 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5125 stcb->sctp_ep, stcb, netp);
5126 return;
5127 }
5128 }
5129 /*
5130 * Now here we are going to recycle net_ack for a different use...
5131 * HEADS UP.
5132 */
5133 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5134 net->net_ack = 0;
5135 }
5136
5137 /*
5138 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5139 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5140 * automatically ensure that.
5141 */
5142 if ((asoc->sctp_cmt_on_off > 0) &&
5143 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5144 (cmt_dac_flag == 0)) {
5145 this_sack_lowest_newack = cum_ack;
5146 }
5147 if ((num_seg > 0) || (num_nr_seg > 0)) {
5148 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5149 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5150 }
5151 /* JRS - Use the congestion control given in the CC module */
5152 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5153
5154 /* Now are we exiting loss recovery ? */
5155 if (will_exit_fast_recovery) {
5156 /* Ok, we must exit fast recovery */
5157 asoc->fast_retran_loss_recovery = 0;
5158 }
5159 if ((asoc->sat_t3_loss_recovery) &&
5160 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5161 /* end satellite t3 loss recovery */
5162 asoc->sat_t3_loss_recovery = 0;
5163 }
5164 /*
5165 * CMT Fast recovery
5166 */
5167 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5168 if (net->will_exit_fast_recovery) {
5169 /* Ok, we must exit fast recovery */
5170 net->fast_retran_loss_recovery = 0;
5171 }
5172 }
5173
5174 /* Adjust and set the new rwnd value */
5175 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5176 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5177 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5178 }
5179 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5180 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5181 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5182 /* SWS sender side engages */
5183 asoc->peers_rwnd = 0;
5184 }
5185 if (asoc->peers_rwnd > old_rwnd) {
5186 win_probe_recovery = 1;
5187 }
5188
5189 /*
5190 * Now we must setup so we have a timer up for anyone with
5191 * outstanding data.
5192 */
5193 done_once = 0;
5194 again:
5195 j = 0;
5196 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5197 if (win_probe_recovery && (net->window_probe)) {
5198 win_probe_recovered = 1;
5199 /*-
5200 * Find first chunk that was used with
5201 * window probe and clear the event. Put
5202 * it back into the send queue as if has
5203 * not been sent.
5204 */
5205 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5206 if (tp1->window_probe) {
5207 sctp_window_probe_recovery(stcb, asoc, tp1);
5208 break;
5209 }
5210 }
5211 }
5212 if (net->flight_size) {
5213 j++;
5214 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5215 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5216 stcb->sctp_ep, stcb, net);
5217 }
5218 if (net->window_probe) {
5219 net->window_probe = 0;
5220 }
5221 } else {
5222 if (net->window_probe) {
5223 /* In window probes we must assure a timer is still running there */
5224 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5225 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5226 stcb->sctp_ep, stcb, net);
5227
5228 }
5229 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5230 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5231 stcb, net,
5232 SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5233 }
5234 }
5235 }
5236 if ((j == 0) &&
5237 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5238 (asoc->sent_queue_retran_cnt == 0) &&
5239 (win_probe_recovered == 0) &&
5240 (done_once == 0)) {
5241 /* huh, this should not happen unless all packets
5242 * are PR-SCTP and marked to skip of course.
5243 */
5244 if (sctp_fs_audit(asoc)) {
5245 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5246 net->flight_size = 0;
5247 }
5248 asoc->total_flight = 0;
5249 asoc->total_flight_count = 0;
5250 asoc->sent_queue_retran_cnt = 0;
5251 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5252 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5253 sctp_flight_size_increase(tp1);
5254 sctp_total_flight_increase(stcb, tp1);
5255 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5256 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5257 }
5258 }
5259 }
5260 done_once = 1;
5261 goto again;
5262 }
5263 /*********************************************/
5264 /* Here we perform PR-SCTP procedures */
5265 /* (section 4.2) */
5266 /*********************************************/
5267 /* C1. update advancedPeerAckPoint */
5268 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5269 asoc->advanced_peer_ack_point = cum_ack;
5270 }
5271 /* C2. try to further move advancedPeerAckPoint ahead */
5272 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5273 struct sctp_tmit_chunk *lchk;
5274 uint32_t old_adv_peer_ack_point;
5275
5276 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5277 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5278 /* C3. See if we need to send a Fwd-TSN */
5279 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5280 /*
5281 * ISSUE with ECN, see FWD-TSN processing.
5282 */
5283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5284 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5285 0xee, cum_ack, asoc->advanced_peer_ack_point,
5286 old_adv_peer_ack_point);
5287 }
5288 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5289 send_forward_tsn(stcb, asoc);
5290 } else if (lchk) {
5291 /* try to FR fwd-tsn's that get lost too */
5292 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5293 send_forward_tsn(stcb, asoc);
5294 }
5295 }
5296 }
5297 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5298 if (lchk->whoTo != NULL) {
5299 break;
5300 }
5301 }
5302 if (lchk != NULL) {
5303 /* Assure a timer is up */
5304 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5305 stcb->sctp_ep, stcb, lchk->whoTo);
5306 }
5307 }
5308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5309 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5310 a_rwnd,
5311 stcb->asoc.peers_rwnd,
5312 stcb->asoc.total_flight,
5313 stcb->asoc.total_output_queue_size);
5314 }
5315 }
5316
5317 void
5318 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5319 {
5320 /* Copy cum-ack */
5321 uint32_t cum_ack, a_rwnd;
5322
5323 cum_ack = ntohl(cp->cumulative_tsn_ack);
5324 /* Arrange so a_rwnd does NOT change */
5325 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5326
5327 /* Now call the express sack handling */
5328 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5329 }
5330
5331 static void
5332 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5333 struct sctp_stream_in *strmin)
5334 {
5335 struct sctp_queued_to_read *control, *ncontrol;
5336 struct sctp_association *asoc;
5337 uint32_t mid;
5338 int need_reasm_check = 0;
5339
5340 asoc = &stcb->asoc;
5341 mid = strmin->last_mid_delivered;
5342 /*
5343 * First deliver anything prior to and including the stream no that
5344 * came in.
5345 */
5346 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5347 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5348 /* this is deliverable now */
5349 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5350 if (control->on_strm_q) {
5351 if (control->on_strm_q == SCTP_ON_ORDERED) {
5352 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5353 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5354 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5355 #ifdef INVARIANTS
5356 } else {
5357 panic("strmin: %p ctl: %p unknown %d",
5358 strmin, control, control->on_strm_q);
5359 #endif
5360 }
5361 control->on_strm_q = 0;
5362 }
5363 /* subtract pending on streams */
5364 if (asoc->size_on_all_streams >= control->length) {
5365 asoc->size_on_all_streams -= control->length;
5366 } else {
5367 #ifdef INVARIANTS
5368 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5369 #else
5370 asoc->size_on_all_streams = 0;
5371 #endif
5372 }
5373 sctp_ucount_decr(asoc->cnt_on_all_streams);
5374 /* deliver it to at least the delivery-q */
5375 if (stcb->sctp_socket) {
5376 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5377 sctp_add_to_readq(stcb->sctp_ep, stcb,
5378 control,
5379 &stcb->sctp_socket->so_rcv,
5380 1, SCTP_READ_LOCK_HELD,
5381 SCTP_SO_NOT_LOCKED);
5382 }
5383 } else {
5384 /* Its a fragmented message */
5385 if (control->first_frag_seen) {
5386 /* Make it so this is next to deliver, we restore later */
5387 strmin->last_mid_delivered = control->mid - 1;
5388 need_reasm_check = 1;
5389 break;
5390 }
5391 }
5392 } else {
5393 /* no more delivery now. */
5394 break;
5395 }
5396 }
5397 if (need_reasm_check) {
5398 int ret;
5399 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5400 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5401 /* Restore the next to deliver unless we are ahead */
5402 strmin->last_mid_delivered = mid;
5403 }
5404 if (ret == 0) {
5405 /* Left the front Partial one on */
5406 return;
5407 }
5408 need_reasm_check = 0;
5409 }
5410 /*
5411 * now we must deliver things in queue the normal way if any are
5412 * now ready.
5413 */
5414 mid = strmin->last_mid_delivered + 1;
5415 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5416 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5417 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5418 /* this is deliverable now */
5419 if (control->on_strm_q) {
5420 if (control->on_strm_q == SCTP_ON_ORDERED) {
5421 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5422 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5423 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5424 #ifdef INVARIANTS
5425 } else {
5426 panic("strmin: %p ctl: %p unknown %d",
5427 strmin, control, control->on_strm_q);
5428 #endif
5429 }
5430 control->on_strm_q = 0;
5431 }
5432 /* subtract pending on streams */
5433 if (asoc->size_on_all_streams >= control->length) {
5434 asoc->size_on_all_streams -= control->length;
5435 } else {
5436 #ifdef INVARIANTS
5437 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5438 #else
5439 asoc->size_on_all_streams = 0;
5440 #endif
5441 }
5442 sctp_ucount_decr(asoc->cnt_on_all_streams);
5443 /* deliver it to at least the delivery-q */
5444 strmin->last_mid_delivered = control->mid;
5445 if (stcb->sctp_socket) {
5446 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5447 sctp_add_to_readq(stcb->sctp_ep, stcb,
5448 control,
5449 &stcb->sctp_socket->so_rcv, 1,
5450 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5451
5452 }
5453 mid = strmin->last_mid_delivered + 1;
5454 } else {
5455 /* Its a fragmented message */
5456 if (control->first_frag_seen) {
5457 /* Make it so this is next to deliver */
5458 strmin->last_mid_delivered = control->mid - 1;
5459 need_reasm_check = 1;
5460 break;
5461 }
5462 }
5463 } else {
5464 break;
5465 }
5466 }
5467 if (need_reasm_check) {
5468 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5469 }
5470 }
5471
5472
5473
5474 static void
5475 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5476 struct sctp_association *asoc,
5477 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5478 {
5479 struct sctp_queued_to_read *control;
5480 struct sctp_stream_in *strm;
5481 struct sctp_tmit_chunk *chk, *nchk;
5482 int cnt_removed=0;
5483
5484 /*
5485 * For now large messages held on the stream reasm that are
5486 * complete will be tossed too. We could in theory do more
5487 * work to spin through and stop after dumping one msg aka
5488 * seeing the start of a new msg at the head, and call the
5489 * delivery function... to see if it can be delivered... But
5490 * for now we just dump everything on the queue.
5491 */
5492 strm = &asoc->strmin[stream];
5493 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5494 if (control == NULL) {
5495 /* Not found */
5496 return;
5497 }
5498 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5499 return;
5500 }
5501 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5502 /* Purge hanging chunks */
5503 if (!asoc->idata_supported && (ordered == 0)) {
5504 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5505 break;
5506 }
5507 }
5508 cnt_removed++;
5509 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5510 if (asoc->size_on_reasm_queue >= chk->send_size) {
5511 asoc->size_on_reasm_queue -= chk->send_size;
5512 } else {
5513 #ifdef INVARIANTS
5514 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5515 #else
5516 asoc->size_on_reasm_queue = 0;
5517 #endif
5518 }
5519 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5520 if (chk->data) {
5521 sctp_m_freem(chk->data);
5522 chk->data = NULL;
5523 }
5524 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5525 }
5526 if (!TAILQ_EMPTY(&control->reasm)) {
5527 /* This has to be old data, unordered */
5528 if (control->data) {
5529 sctp_m_freem(control->data);
5530 control->data = NULL;
5531 }
5532 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5533 chk = TAILQ_FIRST(&control->reasm);
5534 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5535 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5536 sctp_add_chk_to_control(control, strm, stcb, asoc,
5537 chk, SCTP_READ_LOCK_HELD);
5538 }
5539 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5540 return;
5541 }
5542 if (control->on_strm_q == SCTP_ON_ORDERED) {
5543 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5544 if (asoc->size_on_all_streams >= control->length) {
5545 asoc->size_on_all_streams -= control->length;
5546 } else {
5547 #ifdef INVARIANTS
5548 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5549 #else
5550 asoc->size_on_all_streams = 0;
5551 #endif
5552 }
5553 sctp_ucount_decr(asoc->cnt_on_all_streams);
5554 control->on_strm_q = 0;
5555 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5556 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5557 control->on_strm_q = 0;
5558 #ifdef INVARIANTS
5559 } else if (control->on_strm_q) {
5560 panic("strm: %p ctl: %p unknown %d",
5561 strm, control, control->on_strm_q);
5562 #endif
5563 }
5564 control->on_strm_q = 0;
5565 if (control->on_read_q == 0) {
5566 sctp_free_remote_addr(control->whoFrom);
5567 if (control->data) {
5568 sctp_m_freem(control->data);
5569 control->data = NULL;
5570 }
5571 sctp_free_a_readq(stcb, control);
5572 }
5573 }
5574
5575 void
5576 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5577 struct sctp_forward_tsn_chunk *fwd,
5578 int *abort_flag, struct mbuf *m , int offset)
5579 {
5580 /* The pr-sctp fwd tsn */
5581 /*
5582 * here we will perform all the data receiver side steps for
5583 * processing FwdTSN, as required in by pr-sctp draft:
5584 *
5585 * Assume we get FwdTSN(x):
5586 *
5587 * 1) update local cumTSN to x
5588 * 2) try to further advance cumTSN to x + others we have
5589 * 3) examine and update re-ordering queue on pr-in-streams
5590 * 4) clean up re-assembly queue
5591 * 5) Send a sack to report where we are.
5592 */
5593 struct sctp_association *asoc;
5594 uint32_t new_cum_tsn, gap;
5595 unsigned int i, fwd_sz, m_size;
5596 uint32_t str_seq;
5597 struct sctp_stream_in *strm;
5598 struct sctp_queued_to_read *control, *sv;
5599
5600 asoc = &stcb->asoc;
5601 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5602 SCTPDBG(SCTP_DEBUG_INDATA1,
5603 "Bad size too small/big fwd-tsn\n");
5604 return;
5605 }
5606 m_size = (stcb->asoc.mapping_array_size << 3);
5607 /*************************************************************/
5608 /* 1. Here we update local cumTSN and shift the bitmap array */
5609 /*************************************************************/
5610 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5611
5612 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5613 /* Already got there ... */
5614 return;
5615 }
5616 /*
5617 * now we know the new TSN is more advanced, let's find the actual
5618 * gap
5619 */
5620 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5621 asoc->cumulative_tsn = new_cum_tsn;
5622 if (gap >= m_size) {
5623 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5624 struct mbuf *op_err;
5625 char msg[SCTP_DIAG_INFO_LEN];
5626
5627 /*
5628 * out of range (of single byte chunks in the rwnd I
5629 * give out). This must be an attacker.
5630 */
5631 *abort_flag = 1;
5632 SCTP_SNPRINTF(msg, sizeof(msg),
5633 "New cum ack %8.8x too high, highest TSN %8.8x",
5634 new_cum_tsn, asoc->highest_tsn_inside_map);
5635 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5636 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5637 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5638 return;
5639 }
5640 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5641
5642 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5643 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5644 asoc->highest_tsn_inside_map = new_cum_tsn;
5645
5646 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5647 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5648
5649 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5650 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5651 }
5652 } else {
5653 SCTP_TCB_LOCK_ASSERT(stcb);
5654 for (i = 0; i <= gap; i++) {
5655 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5656 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5657 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5658 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5659 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5660 }
5661 }
5662 }
5663 }
5664 /*************************************************************/
5665 /* 2. Clear up re-assembly queue */
5666 /*************************************************************/
5667
5668 /* This is now done as part of clearing up the stream/seq */
5669 if (asoc->idata_supported == 0) {
5670 uint16_t sid;
5671 /* Flush all the un-ordered data based on cum-tsn */
5672 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5673 for (sid = 0 ; sid < asoc->streamincnt; sid++) {
5674 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5675 }
5676 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5677 }
5678 /*******************************************************/
5679 /* 3. Update the PR-stream re-ordering queues and fix */
5680 /* delivery issues as needed. */
5681 /*******************************************************/
5682 fwd_sz -= sizeof(*fwd);
5683 if (m && fwd_sz) {
5684 /* New method. */
5685 unsigned int num_str;
5686 uint32_t mid, cur_mid;
5687 uint16_t sid;
5688 uint16_t ordered, flags;
5689 struct sctp_strseq *stseq, strseqbuf;
5690 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5691 offset += sizeof(*fwd);
5692
5693 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5694 if (asoc->idata_supported) {
5695 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5696 } else {
5697 num_str = fwd_sz / sizeof(struct sctp_strseq);
5698 }
5699 for (i = 0; i < num_str; i++) {
5700 if (asoc->idata_supported) {
5701 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5702 sizeof(struct sctp_strseq_mid),
5703 (uint8_t *)&strseqbuf_m);
5704 offset += sizeof(struct sctp_strseq_mid);
5705 if (stseq_m == NULL) {
5706 break;
5707 }
5708 sid = ntohs(stseq_m->sid);
5709 mid = ntohl(stseq_m->mid);
5710 flags = ntohs(stseq_m->flags);
5711 if (flags & PR_SCTP_UNORDERED_FLAG) {
5712 ordered = 0;
5713 } else {
5714 ordered = 1;
5715 }
5716 } else {
5717 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5718 sizeof(struct sctp_strseq),
5719 (uint8_t *)&strseqbuf);
5720 offset += sizeof(struct sctp_strseq);
5721 if (stseq == NULL) {
5722 break;
5723 }
5724 sid = ntohs(stseq->sid);
5725 mid = (uint32_t)ntohs(stseq->ssn);
5726 ordered = 1;
5727 }
5728 /* Convert */
5729
5730 /* now process */
5731
5732 /*
5733 * Ok we now look for the stream/seq on the read queue
5734 * where its not all delivered. If we find it we transmute the
5735 * read entry into a PDI_ABORTED.
5736 */
5737 if (sid >= asoc->streamincnt) {
5738 /* screwed up streams, stop! */
5739 break;
5740 }
5741 if ((asoc->str_of_pdapi == sid) &&
5742 (asoc->ssn_of_pdapi == mid)) {
5743 /* If this is the one we were partially delivering
5744 * now then we no longer are. Note this will change
5745 * with the reassembly re-write.
5746 */
5747 asoc->fragmented_delivery_inprogress = 0;
5748 }
5749 strm = &asoc->strmin[sid];
5750 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5751 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5752 }
5753 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5754 if ((control->sinfo_stream == sid) &&
5755 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5756 str_seq = (sid << 16) | (0x0000ffff & mid);
5757 control->pdapi_aborted = 1;
5758 sv = stcb->asoc.control_pdapi;
5759 control->end_added = 1;
5760 if (control->on_strm_q == SCTP_ON_ORDERED) {
5761 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5762 if (asoc->size_on_all_streams >= control->length) {
5763 asoc->size_on_all_streams -= control->length;
5764 } else {
5765 #ifdef INVARIANTS
5766 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5767 #else
5768 asoc->size_on_all_streams = 0;
5769 #endif
5770 }
5771 sctp_ucount_decr(asoc->cnt_on_all_streams);
5772 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5773 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5774 #ifdef INVARIANTS
5775 } else if (control->on_strm_q) {
5776 panic("strm: %p ctl: %p unknown %d",
5777 strm, control, control->on_strm_q);
5778 #endif
5779 }
5780 control->on_strm_q = 0;
5781 stcb->asoc.control_pdapi = control;
5782 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5783 stcb,
5784 SCTP_PARTIAL_DELIVERY_ABORTED,
5785 (void *)&str_seq,
5786 SCTP_SO_NOT_LOCKED);
5787 stcb->asoc.control_pdapi = sv;
5788 break;
5789 } else if ((control->sinfo_stream == sid) &&
5790 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5791 /* We are past our victim SSN */
5792 break;
5793 }
5794 }
5795 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5796 /* Update the sequence number */
5797 strm->last_mid_delivered = mid;
5798 }
5799 /* now kick the stream the new way */
5800 /*sa_ignore NO_NULL_CHK*/
5801 sctp_kick_prsctp_reorder_queue(stcb, strm);
5802 }
5803 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5804 }
5805 /*
5806 * Now slide thing forward.
5807 */
5808 sctp_slide_mapping_arrays(stcb);
5809 }
5810