1 /*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifdef __FreeBSD__
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 264838 2014-04-23 21:20:55Z tuexen $");
36 #endif
37
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49
50
51 /*
52 * NOTES: On the outbound side of things I need to check the sack timer to
53 * see if I should generate a sack into the chunk queue (if I have data to
54 * send that is and will be sending it .. for bundling.
55 *
56 * The callback in sctp_usrreq.c will get called when the socket is read from.
57 * This will cause sctp_service_queues() to get called on the top entry in
58 * the list.
59 */
60
61 void
sctp_set_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 {
64 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 }
66
67 /* Calculate what the rwnd would be */
68 uint32_t
sctp_calc_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 uint32_t calc = 0;
72
73 /*
74 * This is really set wrong with respect to a 1-2-m socket. Since
75 * the sb_cc is the count that everyone as put up. When we re-write
76 * sctp_soreceive then we will fix this so that ONLY this
77 * associations data is taken into account.
78 */
79 if (stcb->sctp_socket == NULL)
80 return (calc);
81
82 if (stcb->asoc.sb_cc == 0 &&
83 asoc->size_on_reasm_queue == 0 &&
84 asoc->size_on_all_streams == 0) {
85 /* Full rwnd granted */
86 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 return (calc);
88 }
89 /* get actual space */
90 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91
92 /*
93 * take out what has NOT been put on socket queue and we yet hold
94 * for putting up.
95 */
96 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
97 asoc->cnt_on_reasm_queue * MSIZE));
98 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
99 asoc->cnt_on_all_streams * MSIZE));
100
101 if (calc == 0) {
102 /* out of space */
103 return (calc);
104 }
105
106 /* what is the overhead of all these rwnd's */
107 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
108 /* If the window gets too small due to ctrl-stuff, reduce it
109 * to 1, even it is 0. SWS engaged
110 */
111 if (calc < stcb->asoc.my_rwnd_control_len) {
112 calc = 1;
113 }
114 return (calc);
115 }
116
117
118
119 /*
120 * Build out our readq entry based on the incoming packet.
121 */
122 struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t tsn,uint32_t ppid,uint32_t context,uint16_t stream_no,uint16_t stream_seq,uint8_t flags,struct mbuf * dm)123 sctp_build_readq_entry(struct sctp_tcb *stcb,
124 struct sctp_nets *net,
125 uint32_t tsn, uint32_t ppid,
126 uint32_t context, uint16_t stream_no,
127 uint16_t stream_seq, uint8_t flags,
128 struct mbuf *dm)
129 {
130 struct sctp_queued_to_read *read_queue_e = NULL;
131
132 sctp_alloc_a_readq(stcb, read_queue_e);
133 if (read_queue_e == NULL) {
134 goto failed_build;
135 }
136 read_queue_e->sinfo_stream = stream_no;
137 read_queue_e->sinfo_ssn = stream_seq;
138 read_queue_e->sinfo_flags = (flags << 8);
139 read_queue_e->sinfo_ppid = ppid;
140 read_queue_e->sinfo_context = context;
141 read_queue_e->sinfo_timetolive = 0;
142 read_queue_e->sinfo_tsn = tsn;
143 read_queue_e->sinfo_cumtsn = tsn;
144 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 read_queue_e->whoFrom = net;
146 read_queue_e->length = 0;
147 atomic_add_int(&net->ref_count, 1);
148 read_queue_e->data = dm;
149 read_queue_e->spec_flags = 0;
150 read_queue_e->tail_mbuf = NULL;
151 read_queue_e->aux_data = NULL;
152 read_queue_e->stcb = stcb;
153 read_queue_e->port_from = stcb->rport;
154 read_queue_e->do_not_ref_stcb = 0;
155 read_queue_e->end_added = 0;
156 read_queue_e->some_taken = 0;
157 read_queue_e->pdapi_aborted = 0;
158 failed_build:
159 return (read_queue_e);
160 }
161
162
163 /*
164 * Build out our readq entry based on the incoming packet.
165 */
166 static struct sctp_queued_to_read *
sctp_build_readq_entry_chk(struct sctp_tcb * stcb,struct sctp_tmit_chunk * chk)167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168 struct sctp_tmit_chunk *chk)
169 {
170 struct sctp_queued_to_read *read_queue_e = NULL;
171
172 sctp_alloc_a_readq(stcb, read_queue_e);
173 if (read_queue_e == NULL) {
174 goto failed_build;
175 }
176 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 read_queue_e->sinfo_context = stcb->asoc.context;
181 read_queue_e->sinfo_timetolive = 0;
182 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 read_queue_e->whoFrom = chk->whoTo;
186 read_queue_e->aux_data = NULL;
187 read_queue_e->length = 0;
188 atomic_add_int(&chk->whoTo->ref_count, 1);
189 read_queue_e->data = chk->data;
190 read_queue_e->tail_mbuf = NULL;
191 read_queue_e->stcb = stcb;
192 read_queue_e->port_from = stcb->rport;
193 read_queue_e->spec_flags = 0;
194 read_queue_e->do_not_ref_stcb = 0;
195 read_queue_e->end_added = 0;
196 read_queue_e->some_taken = 0;
197 read_queue_e->pdapi_aborted = 0;
198 failed_build:
199 return (read_queue_e);
200 }
201
202
203 struct mbuf *
sctp_build_ctl_nchunk(struct sctp_inpcb * inp,struct sctp_sndrcvinfo * sinfo)204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
205 {
206 struct sctp_extrcvinfo *seinfo;
207 struct sctp_sndrcvinfo *outinfo;
208 struct sctp_rcvinfo *rcvinfo;
209 struct sctp_nxtinfo *nxtinfo;
210 #if defined(__Userspace_os_Windows)
211 WSACMSGHDR *cmh;
212 #else
213 struct cmsghdr *cmh;
214 #endif
215 struct mbuf *ret;
216 int len;
217 int use_extended;
218 int provide_nxt;
219
220 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
221 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
222 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
223 /* user does not want any ancillary data */
224 return (NULL);
225 }
226
227 len = 0;
228 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
229 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
230 }
231 seinfo = (struct sctp_extrcvinfo *)sinfo;
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
233 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
234 provide_nxt = 1;
235 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
236 } else {
237 provide_nxt = 0;
238 }
239 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
240 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
241 use_extended = 1;
242 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
243 } else {
244 use_extended = 0;
245 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
246 }
247 } else {
248 use_extended = 0;
249 }
250
251 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
252 if (ret == NULL) {
253 /* No space */
254 return (ret);
255 }
256 SCTP_BUF_LEN(ret) = 0;
257
258 /* We need a CMSG header followed by the struct */
259 #if defined(__Userspace_os_Windows)
260 cmh = mtod(ret, WSACMSGHDR *);
261 #else
262 cmh = mtod(ret, struct cmsghdr *);
263 #endif
264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
265 cmh->cmsg_level = IPPROTO_SCTP;
266 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
267 cmh->cmsg_type = SCTP_RCVINFO;
268 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
269 rcvinfo->rcv_sid = sinfo->sinfo_stream;
270 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
271 rcvinfo->rcv_flags = sinfo->sinfo_flags;
272 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
273 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
274 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
275 rcvinfo->rcv_context = sinfo->sinfo_context;
276 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
277 #if defined(__Userspace_os_Windows)
278 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
279 #else
280 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
281 #endif
282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
283 }
284 if (provide_nxt) {
285 cmh->cmsg_level = IPPROTO_SCTP;
286 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
287 cmh->cmsg_type = SCTP_NXTINFO;
288 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
289 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
290 nxtinfo->nxt_flags = 0;
291 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
292 nxtinfo->nxt_flags |= SCTP_UNORDERED;
293 }
294 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
295 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
296 }
297 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
298 nxtinfo->nxt_flags |= SCTP_COMPLETE;
299 }
300 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
301 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
302 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
303 #if defined(__Userspace_os_Windows)
304 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
305 #else
306 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
307 #endif
308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
309 }
310 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
311 cmh->cmsg_level = IPPROTO_SCTP;
312 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
313 if (use_extended) {
314 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
315 cmh->cmsg_type = SCTP_EXTRCV;
316 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
317 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
318 } else {
319 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
320 cmh->cmsg_type = SCTP_SNDRCV;
321 *outinfo = *sinfo;
322 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
323 }
324 }
325 return (ret);
326 }
327
328
329 static void
sctp_mark_non_revokable(struct sctp_association * asoc,uint32_t tsn)330 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
331 {
332 uint32_t gap, i, cumackp1;
333 int fnd = 0;
334
335 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
336 return;
337 }
338 cumackp1 = asoc->cumulative_tsn + 1;
339 if (SCTP_TSN_GT(cumackp1, tsn)) {
340 /* this tsn is behind the cum ack and thus we don't
341 * need to worry about it being moved from one to the other.
342 */
343 return;
344 }
345 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
346 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
347 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
348 sctp_print_mapping_array(asoc);
349 #ifdef INVARIANTS
350 panic("Things are really messed up now!!");
351 #endif
352 }
353 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
354 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
355 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
356 asoc->highest_tsn_inside_nr_map = tsn;
357 }
358 if (tsn == asoc->highest_tsn_inside_map) {
359 /* We must back down to see what the new highest is */
360 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
361 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
362 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
363 asoc->highest_tsn_inside_map = i;
364 fnd = 1;
365 break;
366 }
367 }
368 if (!fnd) {
369 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
370 }
371 }
372 }
373
374
375 /*
376 * We are delivering currently from the reassembly queue. We must continue to
377 * deliver until we either: 1) run out of space. 2) run out of sequential
378 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
379 */
380 static void
sctp_service_reassembly(struct sctp_tcb * stcb,struct sctp_association * asoc)381 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
382 {
383 struct sctp_tmit_chunk *chk, *nchk;
384 uint16_t nxt_todel;
385 uint16_t stream_no;
386 int end = 0;
387 int cntDel;
388 struct sctp_queued_to_read *control, *ctl, *nctl;
389
390 if (stcb == NULL)
391 return;
392
393 cntDel = stream_no = 0;
394 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
395 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
396 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
397 /* socket above is long gone or going.. */
398 abandon:
399 asoc->fragmented_delivery_inprogress = 0;
400 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
401 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
402 asoc->size_on_reasm_queue -= chk->send_size;
403 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
404 /*
405 * Lose the data pointer, since its in the socket
406 * buffer
407 */
408 if (chk->data) {
409 sctp_m_freem(chk->data);
410 chk->data = NULL;
411 }
412 /* Now free the address and data */
413 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
414 /*sa_ignore FREED_MEMORY*/
415 }
416 return;
417 }
418 SCTP_TCB_LOCK_ASSERT(stcb);
419 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
420 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
421 /* Can't deliver more :< */
422 return;
423 }
424 stream_no = chk->rec.data.stream_number;
425 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
426 if (nxt_todel != chk->rec.data.stream_seq &&
427 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
428 /*
429 * Not the next sequence to deliver in its stream OR
430 * unordered
431 */
432 return;
433 }
434 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
435
436 control = sctp_build_readq_entry_chk(stcb, chk);
437 if (control == NULL) {
438 /* out of memory? */
439 return;
440 }
441 /* save it off for our future deliveries */
442 stcb->asoc.control_pdapi = control;
443 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
444 end = 1;
445 else
446 end = 0;
447 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
448 sctp_add_to_readq(stcb->sctp_ep,
449 stcb, control, &stcb->sctp_socket->so_rcv, end,
450 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
451 cntDel++;
452 } else {
453 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
454 end = 1;
455 else
456 end = 0;
457 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
458 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
459 stcb->asoc.control_pdapi,
460 chk->data, end, chk->rec.data.TSN_seq,
461 &stcb->sctp_socket->so_rcv)) {
462 /*
463 * something is very wrong, either
464 * control_pdapi is NULL, or the tail_mbuf
465 * is corrupt, or there is a EOM already on
466 * the mbuf chain.
467 */
468 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
469 goto abandon;
470 } else {
471 #ifdef INVARIANTS
472 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
473 panic("This should not happen control_pdapi NULL?");
474 }
475 /* if we did not panic, it was a EOM */
476 panic("Bad chunking ??");
477 #else
478 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
479 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
480 }
481 SCTP_PRINTF("Bad chunking ??\n");
482 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
483
484 #endif
485 goto abandon;
486 }
487 }
488 cntDel++;
489 }
490 /* pull it we did it */
491 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
492 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
493 asoc->fragmented_delivery_inprogress = 0;
494 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
495 asoc->strmin[stream_no].last_sequence_delivered++;
496 }
497 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
498 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
499 }
500 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
501 /*
502 * turn the flag back on since we just delivered
503 * yet another one.
504 */
505 asoc->fragmented_delivery_inprogress = 1;
506 }
507 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
508 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
509 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
510 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
511
512 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
513 asoc->size_on_reasm_queue -= chk->send_size;
514 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
515 /* free up the chk */
516 chk->data = NULL;
517 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
518
519 if (asoc->fragmented_delivery_inprogress == 0) {
520 /*
521 * Now lets see if we can deliver the next one on
522 * the stream
523 */
524 struct sctp_stream_in *strm;
525
526 strm = &asoc->strmin[stream_no];
527 nxt_todel = strm->last_sequence_delivered + 1;
528 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
529 /* Deliver more if we can. */
530 if (nxt_todel == ctl->sinfo_ssn) {
531 TAILQ_REMOVE(&strm->inqueue, ctl, next);
532 asoc->size_on_all_streams -= ctl->length;
533 sctp_ucount_decr(asoc->cnt_on_all_streams);
534 strm->last_sequence_delivered++;
535 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
536 sctp_add_to_readq(stcb->sctp_ep, stcb,
537 ctl,
538 &stcb->sctp_socket->so_rcv, 1,
539 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
540 } else {
541 break;
542 }
543 nxt_todel = strm->last_sequence_delivered + 1;
544 }
545 break;
546 }
547 }
548 }
549
550 /*
551 * Queue the chunk either right into the socket buffer if it is the next one
552 * to go OR put it in the correct place in the delivery queue. If we do
553 * append to the so_buf, keep doing so until we are out of order. One big
554 * question still remains, what to do when the socket buffer is FULL??
555 */
556 static void
sctp_queue_data_to_stream(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,int * abort_flag)557 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
558 struct sctp_queued_to_read *control, int *abort_flag)
559 {
560 /*
561 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
562 * all the data in one stream this could happen quite rapidly. One
563 * could use the TSN to keep track of things, but this scheme breaks
564 * down in the other type of stream useage that could occur. Send a
565 * single msg to stream 0, send 4Billion messages to stream 1, now
566 * send a message to stream 0. You have a situation where the TSN
567 * has wrapped but not in the stream. Is this worth worrying about
568 * or should we just change our queue sort at the bottom to be by
569 * TSN.
570 *
571 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
572 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
573 * assignment this could happen... and I don't see how this would be
574 * a violation. So for now I am undecided an will leave the sort by
575 * SSN alone. Maybe a hybred approach is the answer
576 *
577 */
578 struct sctp_stream_in *strm;
579 struct sctp_queued_to_read *at;
580 int queue_needed;
581 uint16_t nxt_todel;
582 struct mbuf *op_err;
583 char msg[SCTP_DIAG_INFO_LEN];
584
585 queue_needed = 1;
586 asoc->size_on_all_streams += control->length;
587 sctp_ucount_incr(asoc->cnt_on_all_streams);
588 strm = &asoc->strmin[control->sinfo_stream];
589 nxt_todel = strm->last_sequence_delivered + 1;
590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
591 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
592 }
593 SCTPDBG(SCTP_DEBUG_INDATA1,
594 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
595 (uint32_t) control->sinfo_stream,
596 (uint32_t) strm->last_sequence_delivered,
597 (uint32_t) nxt_todel);
598 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
599 /* The incoming sseq is behind where we last delivered? */
600 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
601 control->sinfo_ssn, strm->last_sequence_delivered);
602 protocol_error:
603 /*
604 * throw it in the stream so it gets cleaned up in
605 * association destruction
606 */
607 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
608 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
609 strm->last_sequence_delivered, control->sinfo_tsn,
610 control->sinfo_stream, control->sinfo_ssn);
611 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1;
613 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
614 *abort_flag = 1;
615 return;
616
617 }
618 if (nxt_todel == control->sinfo_ssn) {
619 /* can be delivered right away? */
620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
621 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
622 }
623 /* EY it wont be queued if it could be delivered directly*/
624 queue_needed = 0;
625 asoc->size_on_all_streams -= control->length;
626 sctp_ucount_decr(asoc->cnt_on_all_streams);
627 strm->last_sequence_delivered++;
628
629 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
630 sctp_add_to_readq(stcb->sctp_ep, stcb,
631 control,
632 &stcb->sctp_socket->so_rcv, 1,
633 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
634 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
635 /* all delivered */
636 nxt_todel = strm->last_sequence_delivered + 1;
637 if (nxt_todel == control->sinfo_ssn) {
638 TAILQ_REMOVE(&strm->inqueue, control, next);
639 asoc->size_on_all_streams -= control->length;
640 sctp_ucount_decr(asoc->cnt_on_all_streams);
641 strm->last_sequence_delivered++;
642 /*
643 * We ignore the return of deliver_data here
644 * since we always can hold the chunk on the
645 * d-queue. And we have a finite number that
646 * can be delivered from the strq.
647 */
648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
649 sctp_log_strm_del(control, NULL,
650 SCTP_STR_LOG_FROM_IMMED_DEL);
651 }
652 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
653 sctp_add_to_readq(stcb->sctp_ep, stcb,
654 control,
655 &stcb->sctp_socket->so_rcv, 1,
656 SCTP_READ_LOCK_NOT_HELD,
657 SCTP_SO_NOT_LOCKED);
658 continue;
659 }
660 break;
661 }
662 }
663 if (queue_needed) {
664 /*
665 * Ok, we did not deliver this guy, find the correct place
666 * to put it on the queue.
667 */
668 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
669 goto protocol_error;
670 }
671 if (TAILQ_EMPTY(&strm->inqueue)) {
672 /* Empty queue */
673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
675 }
676 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
677 } else {
678 TAILQ_FOREACH(at, &strm->inqueue, next) {
679 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
680 /*
681 * one in queue is bigger than the
682 * new one, insert before this one
683 */
684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
685 sctp_log_strm_del(control, at,
686 SCTP_STR_LOG_FROM_INSERT_MD);
687 }
688 TAILQ_INSERT_BEFORE(at, control, next);
689 break;
690 } else if (at->sinfo_ssn == control->sinfo_ssn) {
691 /*
692 * Gak, He sent me a duplicate str
693 * seq number
694 */
695 /*
696 * foo bar, I guess I will just free
697 * this new guy, should we abort
698 * too? FIX ME MAYBE? Or it COULD be
699 * that the SSN's have wrapped.
700 * Maybe I should compare to TSN
701 * somehow... sigh for now just blow
702 * away the chunk!
703 */
704
705 if (control->data)
706 sctp_m_freem(control->data);
707 control->data = NULL;
708 asoc->size_on_all_streams -= control->length;
709 sctp_ucount_decr(asoc->cnt_on_all_streams);
710 if (control->whoFrom) {
711 sctp_free_remote_addr(control->whoFrom);
712 control->whoFrom = NULL;
713 }
714 sctp_free_a_readq(stcb, control);
715 return;
716 } else {
717 if (TAILQ_NEXT(at, next) == NULL) {
718 /*
719 * We are at the end, insert
720 * it after this one
721 */
722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 sctp_log_strm_del(control, at,
724 SCTP_STR_LOG_FROM_INSERT_TL);
725 }
726 TAILQ_INSERT_AFTER(&strm->inqueue,
727 at, control, next);
728 break;
729 }
730 }
731 }
732 }
733 }
734 }
735
736 /*
737 * Returns two things: You get the total size of the deliverable parts of the
738 * first fragmented message on the reassembly queue. And you get a 1 back if
739 * all of the message is ready or a 0 back if the message is still incomplete
740 */
741 static int
sctp_is_all_msg_on_reasm(struct sctp_association * asoc,uint32_t * t_size)742 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size)
743 {
744 struct sctp_tmit_chunk *chk;
745 uint32_t tsn;
746
747 *t_size = 0;
748 chk = TAILQ_FIRST(&asoc->reasmqueue);
749 if (chk == NULL) {
750 /* nothing on the queue */
751 return (0);
752 }
753 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
754 /* Not a first on the queue */
755 return (0);
756 }
757 tsn = chk->rec.data.TSN_seq;
758 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
759 if (tsn != chk->rec.data.TSN_seq) {
760 return (0);
761 }
762 *t_size += chk->send_size;
763 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
764 return (1);
765 }
766 tsn++;
767 }
768 return (0);
769 }
770
771 static void
sctp_deliver_reasm_check(struct sctp_tcb * stcb,struct sctp_association * asoc)772 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
773 {
774 struct sctp_tmit_chunk *chk;
775 uint16_t nxt_todel;
776 uint32_t tsize, pd_point;
777
778 doit_again:
779 chk = TAILQ_FIRST(&asoc->reasmqueue);
780 if (chk == NULL) {
781 /* Huh? */
782 asoc->size_on_reasm_queue = 0;
783 asoc->cnt_on_reasm_queue = 0;
784 return;
785 }
786 if (asoc->fragmented_delivery_inprogress == 0) {
787 nxt_todel =
788 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
789 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
790 (nxt_todel == chk->rec.data.stream_seq ||
791 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
792 /*
793 * Yep the first one is here and its ok to deliver
794 * but should we?
795 */
796 if (stcb->sctp_socket) {
797 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
798 stcb->sctp_ep->partial_delivery_point);
799 } else {
800 pd_point = stcb->sctp_ep->partial_delivery_point;
801 }
802 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
803 /*
804 * Yes, we setup to start reception, by
805 * backing down the TSN just in case we
806 * can't deliver. If we
807 */
808 asoc->fragmented_delivery_inprogress = 1;
809 asoc->tsn_last_delivered =
810 chk->rec.data.TSN_seq - 1;
811 asoc->str_of_pdapi =
812 chk->rec.data.stream_number;
813 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 asoc->fragment_flags = chk->rec.data.rcv_flags;
816 sctp_service_reassembly(stcb, asoc);
817 }
818 }
819 } else {
820 /* Service re-assembly will deliver stream data queued
821 * at the end of fragmented delivery.. but it wont know
822 * to go back and call itself again... we do that here
823 * with the got doit_again
824 */
825 sctp_service_reassembly(stcb, asoc);
826 if (asoc->fragmented_delivery_inprogress == 0) {
827 /* finished our Fragmented delivery, could be
828 * more waiting?
829 */
830 goto doit_again;
831 }
832 }
833 }
834
835 /*
836 * Dump onto the re-assembly queue, in its proper place. After dumping on the
837 * queue, see if anthing can be delivered. If so pull it off (or as much as
838 * we can. If we run out of space then we must dump what we can and set the
839 * appropriate flag to say we queued what we could.
840 */
841 static void
sctp_queue_data_for_reasm(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int * abort_flag)842 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
843 struct sctp_tmit_chunk *chk, int *abort_flag)
844 {
845 struct mbuf *op_err;
846 char msg[SCTP_DIAG_INFO_LEN];
847 uint32_t cum_ackp1, prev_tsn, post_tsn;
848 struct sctp_tmit_chunk *at, *prev, *next;
849
850 prev = next = NULL;
851 cum_ackp1 = asoc->tsn_last_delivered + 1;
852 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
853 /* This is the first one on the queue */
854 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
855 /*
856 * we do not check for delivery of anything when only one
857 * fragment is here
858 */
859 asoc->size_on_reasm_queue = chk->send_size;
860 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
861 if (chk->rec.data.TSN_seq == cum_ackp1) {
862 if (asoc->fragmented_delivery_inprogress == 0 &&
863 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
864 SCTP_DATA_FIRST_FRAG) {
865 /*
866 * An empty queue, no delivery inprogress,
867 * we hit the next one and it does NOT have
868 * a FIRST fragment mark.
869 */
870 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
871 snprintf(msg, sizeof(msg),
872 "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
873 chk->rec.data.TSN_seq,
874 chk->rec.data.stream_number,
875 chk->rec.data.stream_seq);
876 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
877 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2;
878 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
879 *abort_flag = 1;
880 } else if (asoc->fragmented_delivery_inprogress &&
881 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
882 /*
883 * We are doing a partial delivery and the
884 * NEXT chunk MUST be either the LAST or
885 * MIDDLE fragment NOT a FIRST
886 */
887 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
888 snprintf(msg, sizeof(msg),
889 "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
890 chk->rec.data.TSN_seq,
891 chk->rec.data.stream_number,
892 chk->rec.data.stream_seq);
893 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
894 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3;
895 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
896 *abort_flag = 1;
897 } else if (asoc->fragmented_delivery_inprogress) {
898 /*
899 * Here we are ok with a MIDDLE or LAST
900 * piece
901 */
902 if (chk->rec.data.stream_number !=
903 asoc->str_of_pdapi) {
904 /* Got to be the right STR No */
905 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
906 chk->rec.data.stream_number,
907 asoc->str_of_pdapi);
908 snprintf(msg, sizeof(msg),
909 "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
910 asoc->str_of_pdapi,
911 chk->rec.data.TSN_seq,
912 chk->rec.data.stream_number,
913 chk->rec.data.stream_seq);
914 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
915 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4;
916 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
917 *abort_flag = 1;
918 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
919 SCTP_DATA_UNORDERED &&
920 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
921 /* Got to be the right STR Seq */
922 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
923 chk->rec.data.stream_seq,
924 asoc->ssn_of_pdapi);
925 snprintf(msg, sizeof(msg),
926 "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
927 asoc->ssn_of_pdapi,
928 chk->rec.data.TSN_seq,
929 chk->rec.data.stream_number,
930 chk->rec.data.stream_seq);
931 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
932 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5;
933 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
934 *abort_flag = 1;
935 }
936 }
937 }
938 return;
939 }
940 /* Find its place */
941 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
942 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
943 /*
944 * one in queue is bigger than the new one, insert
945 * before this one
946 */
947 /* A check */
948 asoc->size_on_reasm_queue += chk->send_size;
949 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
950 next = at;
951 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
952 break;
953 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
954 /* Gak, He sent me a duplicate str seq number */
955 /*
956 * foo bar, I guess I will just free this new guy,
957 * should we abort too? FIX ME MAYBE? Or it COULD be
958 * that the SSN's have wrapped. Maybe I should
959 * compare to TSN somehow... sigh for now just blow
960 * away the chunk!
961 */
962 if (chk->data) {
963 sctp_m_freem(chk->data);
964 chk->data = NULL;
965 }
966 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
967 return;
968 } else {
969 prev = at;
970 if (TAILQ_NEXT(at, sctp_next) == NULL) {
971 /*
972 * We are at the end, insert it after this
973 * one
974 */
975 /* check it first */
976 asoc->size_on_reasm_queue += chk->send_size;
977 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
978 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
979 break;
980 }
981 }
982 }
983 /* Now the audits */
984 if (prev) {
985 prev_tsn = chk->rec.data.TSN_seq - 1;
986 if (prev_tsn == prev->rec.data.TSN_seq) {
987 /*
988 * Ok the one I am dropping onto the end is the
989 * NEXT. A bit of valdiation here.
990 */
991 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
992 SCTP_DATA_FIRST_FRAG ||
993 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
994 SCTP_DATA_MIDDLE_FRAG) {
995 /*
996 * Insert chk MUST be a MIDDLE or LAST
997 * fragment
998 */
999 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1000 SCTP_DATA_FIRST_FRAG) {
1001 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1002 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1003 snprintf(msg, sizeof(msg),
1004 "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1005 chk->rec.data.TSN_seq,
1006 chk->rec.data.stream_number,
1007 chk->rec.data.stream_seq);
1008 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1009 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6;
1010 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1011 *abort_flag = 1;
1012 return;
1013 }
1014 if (chk->rec.data.stream_number !=
1015 prev->rec.data.stream_number) {
1016 /*
1017 * Huh, need the correct STR here,
1018 * they must be the same.
1019 */
1020 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1021 chk->rec.data.stream_number,
1022 prev->rec.data.stream_number);
1023 snprintf(msg, sizeof(msg),
1024 "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1025 prev->rec.data.stream_number,
1026 chk->rec.data.TSN_seq,
1027 chk->rec.data.stream_number,
1028 chk->rec.data.stream_seq);
1029 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1030 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
1031 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1032 *abort_flag = 1;
1033 return;
1034 }
1035 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1036 (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1037 /*
1038 * Huh, need the same ordering here,
1039 * they must be the same.
1040 */
1041 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1042 snprintf(msg, sizeof(msg),
1043 "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1044 (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1045 chk->rec.data.TSN_seq,
1046 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1047 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1048 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
1049 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1050 *abort_flag = 1;
1051 return;
1052 }
1053 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1054 chk->rec.data.stream_seq !=
1055 prev->rec.data.stream_seq) {
1056 /*
1057 * Huh, need the correct STR here,
1058 * they must be the same.
1059 */
1060 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1061 chk->rec.data.stream_seq,
1062 prev->rec.data.stream_seq);
1063 snprintf(msg, sizeof(msg),
1064 "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1065 prev->rec.data.stream_seq,
1066 chk->rec.data.TSN_seq,
1067 chk->rec.data.stream_number,
1068 chk->rec.data.stream_seq);
1069 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1070 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8;
1071 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1072 *abort_flag = 1;
1073 return;
1074 }
1075 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1076 SCTP_DATA_LAST_FRAG) {
1077 /* Insert chk MUST be a FIRST */
1078 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1079 SCTP_DATA_FIRST_FRAG) {
1080 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1081 snprintf(msg, sizeof(msg),
1082 "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1083 chk->rec.data.TSN_seq,
1084 chk->rec.data.stream_number,
1085 chk->rec.data.stream_seq);
1086 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1087 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9;
1088 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1089 *abort_flag = 1;
1090 return;
1091 }
1092 }
1093 }
1094 }
1095 if (next) {
1096 post_tsn = chk->rec.data.TSN_seq + 1;
1097 if (post_tsn == next->rec.data.TSN_seq) {
1098 /*
1099 * Ok the one I am inserting ahead of is my NEXT
1100 * one. A bit of valdiation here.
1101 */
1102 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1103 /* Insert chk MUST be a last fragment */
1104 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1105 != SCTP_DATA_LAST_FRAG) {
1106 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1107 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1108 snprintf(msg, sizeof(msg),
1109 "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1110 chk->rec.data.TSN_seq,
1111 chk->rec.data.stream_number,
1112 chk->rec.data.stream_seq);
1113 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1114 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10;
1115 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1116 *abort_flag = 1;
1117 return;
1118 }
1119 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1120 SCTP_DATA_MIDDLE_FRAG ||
1121 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1122 SCTP_DATA_LAST_FRAG) {
1123 /*
1124 * Insert chk CAN be MIDDLE or FIRST NOT
1125 * LAST
1126 */
1127 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1128 SCTP_DATA_LAST_FRAG) {
1129 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1130 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1131 snprintf(msg, sizeof(msg),
1132 "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1133 chk->rec.data.TSN_seq,
1134 chk->rec.data.stream_number,
1135 chk->rec.data.stream_seq);
1136 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1137 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11;
1138 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1139 *abort_flag = 1;
1140 return;
1141 }
1142 if (chk->rec.data.stream_number !=
1143 next->rec.data.stream_number) {
1144 /*
1145 * Huh, need the correct STR here,
1146 * they must be the same.
1147 */
1148 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1149 chk->rec.data.stream_number,
1150 next->rec.data.stream_number);
1151 snprintf(msg, sizeof(msg),
1152 "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1153 next->rec.data.stream_number,
1154 chk->rec.data.TSN_seq,
1155 chk->rec.data.stream_number,
1156 chk->rec.data.stream_seq);
1157 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1158 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
1159 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1160 *abort_flag = 1;
1161 return;
1162 }
1163 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1164 (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1165 /*
1166 * Huh, need the same ordering here,
1167 * they must be the same.
1168 */
1169 SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1170 snprintf(msg, sizeof(msg),
1171 "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1172 (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1173 chk->rec.data.TSN_seq,
1174 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1175 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1176 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
1177 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1178 *abort_flag = 1;
1179 return;
1180 }
1181 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1182 chk->rec.data.stream_seq !=
1183 next->rec.data.stream_seq) {
1184 /*
1185 * Huh, need the correct STR here,
1186 * they must be the same.
1187 */
1188 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1189 chk->rec.data.stream_seq,
1190 next->rec.data.stream_seq);
1191 snprintf(msg, sizeof(msg),
1192 "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1193 next->rec.data.stream_seq,
1194 chk->rec.data.TSN_seq,
1195 chk->rec.data.stream_number,
1196 chk->rec.data.stream_seq);
1197 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1198 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13;
1199 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1200 *abort_flag = 1;
1201 return;
1202 }
1203 }
1204 }
1205 }
1206 /* Do we need to do some delivery? check */
1207 sctp_deliver_reasm_check(stcb, asoc);
1208 }
1209
1210 /*
1211 * This is an unfortunate routine. It checks to make sure a evil guy is not
1212 * stuffing us full of bad packet fragments. A broken peer could also do this
1213 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1214 * :< more cycles.
1215 */
1216 static int
sctp_does_tsn_belong_to_reasm(struct sctp_association * asoc,uint32_t TSN_seq)1217 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1218 uint32_t TSN_seq)
1219 {
1220 struct sctp_tmit_chunk *at;
1221 uint32_t tsn_est;
1222
1223 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1224 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1225 /* is it one bigger? */
1226 tsn_est = at->rec.data.TSN_seq + 1;
1227 if (tsn_est == TSN_seq) {
1228 /* yep. It better be a last then */
1229 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1230 SCTP_DATA_LAST_FRAG) {
1231 /*
1232 * Ok this guy belongs next to a guy
1233 * that is NOT last, it should be a
1234 * middle/last, not a complete
1235 * chunk.
1236 */
1237 return (1);
1238 } else {
1239 /*
1240 * This guy is ok since its a LAST
1241 * and the new chunk is a fully
1242 * self- contained one.
1243 */
1244 return (0);
1245 }
1246 }
1247 } else if (TSN_seq == at->rec.data.TSN_seq) {
1248 /* Software error since I have a dup? */
1249 return (1);
1250 } else {
1251 /*
1252 * Ok, 'at' is larger than new chunk but does it
1253 * need to be right before it.
1254 */
1255 tsn_est = TSN_seq + 1;
1256 if (tsn_est == at->rec.data.TSN_seq) {
1257 /* Yep, It better be a first */
1258 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1259 SCTP_DATA_FIRST_FRAG) {
1260 return (1);
1261 } else {
1262 return (0);
1263 }
1264 }
1265 }
1266 }
1267 return (0);
1268 }
1269
1270 static int
sctp_process_a_data_chunk(struct sctp_tcb * stcb,struct sctp_association * asoc,struct mbuf ** m,int offset,struct sctp_data_chunk * ch,int chk_length,struct sctp_nets * net,uint32_t * high_tsn,int * abort_flag,int * break_flag,int last_chunk)1271 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1272 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1273 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1274 int *break_flag, int last_chunk)
1275 {
1276 /* Process a data chunk */
1277 /* struct sctp_tmit_chunk *chk; */
1278 struct sctp_tmit_chunk *chk;
1279 uint32_t tsn, gap;
1280 struct mbuf *dmbuf;
1281 int the_len;
1282 int need_reasm_check = 0;
1283 uint16_t strmno, strmseq;
1284 struct mbuf *op_err;
1285 char msg[SCTP_DIAG_INFO_LEN];
1286 struct sctp_queued_to_read *control;
1287 int ordered;
1288 uint32_t protocol_id;
1289 uint8_t chunk_flags;
1290 struct sctp_stream_reset_list *liste;
1291
1292 chk = NULL;
1293 tsn = ntohl(ch->dp.tsn);
1294 chunk_flags = ch->ch.chunk_flags;
1295 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1296 asoc->send_sack = 1;
1297 }
1298 protocol_id = ch->dp.protocol_id;
1299 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1301 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1302 }
1303 if (stcb == NULL) {
1304 return (0);
1305 }
1306 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1307 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1308 /* It is a duplicate */
1309 SCTP_STAT_INCR(sctps_recvdupdata);
1310 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1311 /* Record a dup for the next outbound sack */
1312 asoc->dup_tsns[asoc->numduptsns] = tsn;
1313 asoc->numduptsns++;
1314 }
1315 asoc->send_sack = 1;
1316 return (0);
1317 }
1318 /* Calculate the number of TSN's between the base and this TSN */
1319 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1320 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1321 /* Can't hold the bit in the mapping at max array, toss it */
1322 return (0);
1323 }
1324 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1325 SCTP_TCB_LOCK_ASSERT(stcb);
1326 if (sctp_expand_mapping_array(asoc, gap)) {
1327 /* Can't expand, drop it */
1328 return (0);
1329 }
1330 }
1331 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1332 *high_tsn = tsn;
1333 }
1334 /* See if we have received this one already */
1335 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1336 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1337 SCTP_STAT_INCR(sctps_recvdupdata);
1338 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1339 /* Record a dup for the next outbound sack */
1340 asoc->dup_tsns[asoc->numduptsns] = tsn;
1341 asoc->numduptsns++;
1342 }
1343 asoc->send_sack = 1;
1344 return (0);
1345 }
1346 /*
1347 * Check to see about the GONE flag, duplicates would cause a sack
1348 * to be sent up above
1349 */
1350 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1351 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1352 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1353 /*
1354 * wait a minute, this guy is gone, there is no longer a
1355 * receiver. Send peer an ABORT!
1356 */
1357 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1358 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1359 *abort_flag = 1;
1360 return (0);
1361 }
1362 /*
1363 * Now before going further we see if there is room. If NOT then we
1364 * MAY let one through only IF this TSN is the one we are waiting
1365 * for on a partial delivery API.
1366 */
1367
1368 /* now do the tests */
1369 if (((asoc->cnt_on_all_streams +
1370 asoc->cnt_on_reasm_queue +
1371 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1372 (((int)asoc->my_rwnd) <= 0)) {
1373 /*
1374 * When we have NO room in the rwnd we check to make sure
1375 * the reader is doing its job...
1376 */
1377 if (stcb->sctp_socket->so_rcv.sb_cc) {
1378 /* some to read, wake-up */
1379 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1380 struct socket *so;
1381
1382 so = SCTP_INP_SO(stcb->sctp_ep);
1383 atomic_add_int(&stcb->asoc.refcnt, 1);
1384 SCTP_TCB_UNLOCK(stcb);
1385 SCTP_SOCKET_LOCK(so, 1);
1386 SCTP_TCB_LOCK(stcb);
1387 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1388 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1389 /* assoc was freed while we were unlocked */
1390 SCTP_SOCKET_UNLOCK(so, 1);
1391 return (0);
1392 }
1393 #endif
1394 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1395 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1396 SCTP_SOCKET_UNLOCK(so, 1);
1397 #endif
1398 }
1399 /* now is it in the mapping array of what we have accepted? */
1400 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1401 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1402 /* Nope not in the valid range dump it */
1403 sctp_set_rwnd(stcb, asoc);
1404 if ((asoc->cnt_on_all_streams +
1405 asoc->cnt_on_reasm_queue +
1406 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1407 SCTP_STAT_INCR(sctps_datadropchklmt);
1408 } else {
1409 SCTP_STAT_INCR(sctps_datadroprwnd);
1410 }
1411 *break_flag = 1;
1412 return (0);
1413 }
1414 }
1415 strmno = ntohs(ch->dp.stream_id);
1416 if (strmno >= asoc->streamincnt) {
1417 struct sctp_paramhdr *phdr;
1418 struct mbuf *mb;
1419
1420 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1421 0, M_NOWAIT, 1, MT_DATA);
1422 if (mb != NULL) {
1423 /* add some space up front so prepend will work well */
1424 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1425 phdr = mtod(mb, struct sctp_paramhdr *);
1426 /*
1427 * Error causes are just param's and this one has
1428 * two back to back phdr, one with the error type
1429 * and size, the other with the streamid and a rsvd
1430 */
1431 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1432 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1433 phdr->param_length =
1434 htons(sizeof(struct sctp_paramhdr) * 2);
1435 phdr++;
1436 /* We insert the stream in the type field */
1437 phdr->param_type = ch->dp.stream_id;
1438 /* And set the length to 0 for the rsvd field */
1439 phdr->param_length = 0;
1440 sctp_queue_op_err(stcb, mb);
1441 }
1442 SCTP_STAT_INCR(sctps_badsid);
1443 SCTP_TCB_LOCK_ASSERT(stcb);
1444 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1445 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1446 asoc->highest_tsn_inside_nr_map = tsn;
1447 }
1448 if (tsn == (asoc->cumulative_tsn + 1)) {
1449 /* Update cum-ack */
1450 asoc->cumulative_tsn = tsn;
1451 }
1452 return (0);
1453 }
1454 /*
1455 * Before we continue lets validate that we are not being fooled by
1456 * an evil attacker. We can only have 4k chunks based on our TSN
1457 * spread allowed by the mapping array 512 * 8 bits, so there is no
1458 * way our stream sequence numbers could have wrapped. We of course
1459 * only validate the FIRST fragment so the bit must be set.
1460 */
1461 strmseq = ntohs(ch->dp.stream_sequence);
1462 #ifdef SCTP_ASOCLOG_OF_TSNS
1463 SCTP_TCB_LOCK_ASSERT(stcb);
1464 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1465 asoc->tsn_in_at = 0;
1466 asoc->tsn_in_wrapped = 1;
1467 }
1468 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1469 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1470 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1471 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1472 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1473 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1474 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1475 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1476 asoc->tsn_in_at++;
1477 #endif
1478 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1479 (TAILQ_EMPTY(&asoc->resetHead)) &&
1480 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1481 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1482 /* The incoming sseq is behind where we last delivered? */
1483 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1484 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1485
1486 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1487 asoc->strmin[strmno].last_sequence_delivered,
1488 tsn, strmno, strmseq);
1489 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1490 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14;
1491 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1492 *abort_flag = 1;
1493 return (0);
1494 }
1495 /************************************
1496 * From here down we may find ch-> invalid
1497 * so its a good idea NOT to use it.
1498 *************************************/
1499
1500 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1501 if (last_chunk == 0) {
1502 dmbuf = SCTP_M_COPYM(*m,
1503 (offset + sizeof(struct sctp_data_chunk)),
1504 the_len, M_NOWAIT);
1505 #ifdef SCTP_MBUF_LOGGING
1506 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1507 struct mbuf *mat;
1508
1509 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1510 if (SCTP_BUF_IS_EXTENDED(mat)) {
1511 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1512 }
1513 }
1514 }
1515 #endif
1516 } else {
1517 /* We can steal the last chunk */
1518 int l_len;
1519 dmbuf = *m;
1520 /* lop off the top part */
1521 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1522 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1523 l_len = SCTP_BUF_LEN(dmbuf);
1524 } else {
1525 /* need to count up the size hopefully
1526 * does not hit this to often :-0
1527 */
1528 struct mbuf *lat;
1529
1530 l_len = 0;
1531 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1532 l_len += SCTP_BUF_LEN(lat);
1533 }
1534 }
1535 if (l_len > the_len) {
1536 /* Trim the end round bytes off too */
1537 m_adj(dmbuf, -(l_len - the_len));
1538 }
1539 }
1540 if (dmbuf == NULL) {
1541 SCTP_STAT_INCR(sctps_nomem);
1542 return (0);
1543 }
1544 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1545 asoc->fragmented_delivery_inprogress == 0 &&
1546 TAILQ_EMPTY(&asoc->resetHead) &&
1547 ((ordered == 0) ||
1548 ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1549 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1550 /* Candidate for express delivery */
1551 /*
1552 * Its not fragmented, No PD-API is up, Nothing in the
1553 * delivery queue, Its un-ordered OR ordered and the next to
1554 * deliver AND nothing else is stuck on the stream queue,
1555 * And there is room for it in the socket buffer. Lets just
1556 * stuff it up the buffer....
1557 */
1558
1559 /* It would be nice to avoid this copy if we could :< */
1560 sctp_alloc_a_readq(stcb, control);
1561 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1562 protocol_id,
1563 strmno, strmseq,
1564 chunk_flags,
1565 dmbuf);
1566 if (control == NULL) {
1567 goto failed_express_del;
1568 }
1569 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1570 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1571 asoc->highest_tsn_inside_nr_map = tsn;
1572 }
1573 sctp_add_to_readq(stcb->sctp_ep, stcb,
1574 control, &stcb->sctp_socket->so_rcv,
1575 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1576
1577 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1578 /* for ordered, bump what we delivered */
1579 asoc->strmin[strmno].last_sequence_delivered++;
1580 }
1581 SCTP_STAT_INCR(sctps_recvexpress);
1582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1583 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1584 SCTP_STR_LOG_FROM_EXPRS_DEL);
1585 }
1586 control = NULL;
1587
1588 goto finish_express_del;
1589 }
1590 failed_express_del:
1591 /* If we reach here this is a new chunk */
1592 chk = NULL;
1593 control = NULL;
1594 /* Express for fragmented delivery? */
1595 if ((asoc->fragmented_delivery_inprogress) &&
1596 (stcb->asoc.control_pdapi) &&
1597 (asoc->str_of_pdapi == strmno) &&
1598 (asoc->ssn_of_pdapi == strmseq)
1599 ) {
1600 control = stcb->asoc.control_pdapi;
1601 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1602 /* Can't be another first? */
1603 goto failed_pdapi_express_del;
1604 }
1605 if (tsn == (control->sinfo_tsn + 1)) {
1606 /* Yep, we can add it on */
1607 int end = 0;
1608
1609 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1610 end = 1;
1611 }
1612 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1613 tsn,
1614 &stcb->sctp_socket->so_rcv)) {
1615 SCTP_PRINTF("Append fails end:%d\n", end);
1616 goto failed_pdapi_express_del;
1617 }
1618
1619 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1620 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1621 asoc->highest_tsn_inside_nr_map = tsn;
1622 }
1623 SCTP_STAT_INCR(sctps_recvexpressm);
1624 asoc->tsn_last_delivered = tsn;
1625 asoc->fragment_flags = chunk_flags;
1626 asoc->tsn_of_pdapi_last_delivered = tsn;
1627 asoc->last_flags_delivered = chunk_flags;
1628 asoc->last_strm_seq_delivered = strmseq;
1629 asoc->last_strm_no_delivered = strmno;
1630 if (end) {
1631 /* clean up the flags and such */
1632 asoc->fragmented_delivery_inprogress = 0;
1633 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1634 asoc->strmin[strmno].last_sequence_delivered++;
1635 }
1636 stcb->asoc.control_pdapi = NULL;
1637 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1638 /* There could be another message ready */
1639 need_reasm_check = 1;
1640 }
1641 }
1642 control = NULL;
1643 goto finish_express_del;
1644 }
1645 }
1646 failed_pdapi_express_del:
1647 control = NULL;
1648 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1649 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1650 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1651 asoc->highest_tsn_inside_nr_map = tsn;
1652 }
1653 } else {
1654 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1655 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1656 asoc->highest_tsn_inside_map = tsn;
1657 }
1658 }
1659 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1660 sctp_alloc_a_chunk(stcb, chk);
1661 if (chk == NULL) {
1662 /* No memory so we drop the chunk */
1663 SCTP_STAT_INCR(sctps_nomem);
1664 if (last_chunk == 0) {
1665 /* we copied it, free the copy */
1666 sctp_m_freem(dmbuf);
1667 }
1668 return (0);
1669 }
1670 chk->rec.data.TSN_seq = tsn;
1671 chk->no_fr_allowed = 0;
1672 chk->rec.data.stream_seq = strmseq;
1673 chk->rec.data.stream_number = strmno;
1674 chk->rec.data.payloadtype = protocol_id;
1675 chk->rec.data.context = stcb->asoc.context;
1676 chk->rec.data.doing_fast_retransmit = 0;
1677 chk->rec.data.rcv_flags = chunk_flags;
1678 chk->asoc = asoc;
1679 chk->send_size = the_len;
1680 chk->whoTo = net;
1681 atomic_add_int(&net->ref_count, 1);
1682 chk->data = dmbuf;
1683 } else {
1684 sctp_alloc_a_readq(stcb, control);
1685 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1686 protocol_id,
1687 strmno, strmseq,
1688 chunk_flags,
1689 dmbuf);
1690 if (control == NULL) {
1691 /* No memory so we drop the chunk */
1692 SCTP_STAT_INCR(sctps_nomem);
1693 if (last_chunk == 0) {
1694 /* we copied it, free the copy */
1695 sctp_m_freem(dmbuf);
1696 }
1697 return (0);
1698 }
1699 control->length = the_len;
1700 }
1701
1702 /* Mark it as received */
1703 /* Now queue it where it belongs */
1704 if (control != NULL) {
1705 /* First a sanity check */
1706 if (asoc->fragmented_delivery_inprogress) {
1707 /*
1708 * Ok, we have a fragmented delivery in progress if
1709 * this chunk is next to deliver OR belongs in our
1710 * view to the reassembly, the peer is evil or
1711 * broken.
1712 */
1713 uint32_t estimate_tsn;
1714
1715 estimate_tsn = asoc->tsn_last_delivered + 1;
1716 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1717 (estimate_tsn == control->sinfo_tsn)) {
1718 /* Evil/Broke peer */
1719 sctp_m_freem(control->data);
1720 control->data = NULL;
1721 if (control->whoFrom) {
1722 sctp_free_remote_addr(control->whoFrom);
1723 control->whoFrom = NULL;
1724 }
1725 sctp_free_a_readq(stcb, control);
1726 snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1727 tsn, strmno, strmseq);
1728 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1729 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15;
1730 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1731 *abort_flag = 1;
1732 if (last_chunk) {
1733 *m = NULL;
1734 }
1735 return (0);
1736 } else {
1737 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1738 sctp_m_freem(control->data);
1739 control->data = NULL;
1740 if (control->whoFrom) {
1741 sctp_free_remote_addr(control->whoFrom);
1742 control->whoFrom = NULL;
1743 }
1744 sctp_free_a_readq(stcb, control);
1745 snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1746 tsn, strmno, strmseq);
1747 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1748 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16;
1749 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1750 *abort_flag = 1;
1751 if (last_chunk) {
1752 *m = NULL;
1753 }
1754 return (0);
1755 }
1756 }
1757 } else {
1758 /* No PDAPI running */
1759 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1760 /*
1761 * Reassembly queue is NOT empty validate
1762 * that this tsn does not need to be in
1763 * reasembly queue. If it does then our peer
1764 * is broken or evil.
1765 */
1766 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1767 sctp_m_freem(control->data);
1768 control->data = NULL;
1769 if (control->whoFrom) {
1770 sctp_free_remote_addr(control->whoFrom);
1771 control->whoFrom = NULL;
1772 }
1773 sctp_free_a_readq(stcb, control);
1774 snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1775 tsn, strmno, strmseq);
1776 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1777 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17;
1778 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1779 *abort_flag = 1;
1780 if (last_chunk) {
1781 *m = NULL;
1782 }
1783 return (0);
1784 }
1785 }
1786 }
1787 /* ok, if we reach here we have passed the sanity checks */
1788 if (chunk_flags & SCTP_DATA_UNORDERED) {
1789 /* queue directly into socket buffer */
1790 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1791 sctp_add_to_readq(stcb->sctp_ep, stcb,
1792 control,
1793 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1794 } else {
1795 /*
1796 * Special check for when streams are resetting. We
1797 * could be more smart about this and check the
1798 * actual stream to see if it is not being reset..
1799 * that way we would not create a HOLB when amongst
1800 * streams being reset and those not being reset.
1801 *
1802 * We take complete messages that have a stream reset
1803 * intervening (aka the TSN is after where our
1804 * cum-ack needs to be) off and put them on a
1805 * pending_reply_queue. The reassembly ones we do
1806 * not have to worry about since they are all sorted
1807 * and proceessed by TSN order. It is only the
1808 * singletons I must worry about.
1809 */
1810 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1811 SCTP_TSN_GT(tsn, liste->tsn)) {
1812 /*
1813 * yep its past where we need to reset... go
1814 * ahead and queue it.
1815 */
1816 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1817 /* first one on */
1818 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1819 } else {
1820 struct sctp_queued_to_read *ctlOn, *nctlOn;
1821 unsigned char inserted = 0;
1822
1823 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1824 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1825 continue;
1826 } else {
1827 /* found it */
1828 TAILQ_INSERT_BEFORE(ctlOn, control, next);
1829 inserted = 1;
1830 break;
1831 }
1832 }
1833 if (inserted == 0) {
1834 /*
1835 * must be put at end, use
1836 * prevP (all setup from
1837 * loop) to setup nextP.
1838 */
1839 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1840 }
1841 }
1842 } else {
1843 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1844 if (*abort_flag) {
1845 if (last_chunk) {
1846 *m = NULL;
1847 }
1848 return (0);
1849 }
1850 }
1851 }
1852 } else {
1853 /* Into the re-assembly queue */
1854 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1855 if (*abort_flag) {
1856 /*
1857 * the assoc is now gone and chk was put onto the
1858 * reasm queue, which has all been freed.
1859 */
1860 if (last_chunk) {
1861 *m = NULL;
1862 }
1863 return (0);
1864 }
1865 }
1866 finish_express_del:
1867 if (tsn == (asoc->cumulative_tsn + 1)) {
1868 /* Update cum-ack */
1869 asoc->cumulative_tsn = tsn;
1870 }
1871 if (last_chunk) {
1872 *m = NULL;
1873 }
1874 if (ordered) {
1875 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1876 } else {
1877 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1878 }
1879 SCTP_STAT_INCR(sctps_recvdata);
1880 /* Set it present please */
1881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1882 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1883 }
1884 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1885 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1886 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1887 }
1888 /* check the special flag for stream resets */
1889 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1890 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1891 /*
1892 * we have finished working through the backlogged TSN's now
1893 * time to reset streams. 1: call reset function. 2: free
1894 * pending_reply space 3: distribute any chunks in
1895 * pending_reply_queue.
1896 */
1897 struct sctp_queued_to_read *ctl, *nctl;
1898
1899 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1900 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1901 SCTP_FREE(liste, SCTP_M_STRESET);
1902 /*sa_ignore FREED_MEMORY*/
1903 liste = TAILQ_FIRST(&asoc->resetHead);
1904 if (TAILQ_EMPTY(&asoc->resetHead)) {
1905 /* All can be removed */
1906 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1907 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1908 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1909 if (*abort_flag) {
1910 return (0);
1911 }
1912 }
1913 } else {
1914 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1915 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1916 break;
1917 }
1918 /*
1919 * if ctl->sinfo_tsn is <= liste->tsn we can
1920 * process it which is the NOT of
1921 * ctl->sinfo_tsn > liste->tsn
1922 */
1923 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1924 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1925 if (*abort_flag) {
1926 return (0);
1927 }
1928 }
1929 }
1930 /*
1931 * Now service re-assembly to pick up anything that has been
1932 * held on reassembly queue?
1933 */
1934 sctp_deliver_reasm_check(stcb, asoc);
1935 need_reasm_check = 0;
1936 }
1937
1938 if (need_reasm_check) {
1939 /* Another one waits ? */
1940 sctp_deliver_reasm_check(stcb, asoc);
1941 }
1942 return (1);
1943 }
1944
1945 int8_t sctp_map_lookup_tab[256] = {
1946 0, 1, 0, 2, 0, 1, 0, 3,
1947 0, 1, 0, 2, 0, 1, 0, 4,
1948 0, 1, 0, 2, 0, 1, 0, 3,
1949 0, 1, 0, 2, 0, 1, 0, 5,
1950 0, 1, 0, 2, 0, 1, 0, 3,
1951 0, 1, 0, 2, 0, 1, 0, 4,
1952 0, 1, 0, 2, 0, 1, 0, 3,
1953 0, 1, 0, 2, 0, 1, 0, 6,
1954 0, 1, 0, 2, 0, 1, 0, 3,
1955 0, 1, 0, 2, 0, 1, 0, 4,
1956 0, 1, 0, 2, 0, 1, 0, 3,
1957 0, 1, 0, 2, 0, 1, 0, 5,
1958 0, 1, 0, 2, 0, 1, 0, 3,
1959 0, 1, 0, 2, 0, 1, 0, 4,
1960 0, 1, 0, 2, 0, 1, 0, 3,
1961 0, 1, 0, 2, 0, 1, 0, 7,
1962 0, 1, 0, 2, 0, 1, 0, 3,
1963 0, 1, 0, 2, 0, 1, 0, 4,
1964 0, 1, 0, 2, 0, 1, 0, 3,
1965 0, 1, 0, 2, 0, 1, 0, 5,
1966 0, 1, 0, 2, 0, 1, 0, 3,
1967 0, 1, 0, 2, 0, 1, 0, 4,
1968 0, 1, 0, 2, 0, 1, 0, 3,
1969 0, 1, 0, 2, 0, 1, 0, 6,
1970 0, 1, 0, 2, 0, 1, 0, 3,
1971 0, 1, 0, 2, 0, 1, 0, 4,
1972 0, 1, 0, 2, 0, 1, 0, 3,
1973 0, 1, 0, 2, 0, 1, 0, 5,
1974 0, 1, 0, 2, 0, 1, 0, 3,
1975 0, 1, 0, 2, 0, 1, 0, 4,
1976 0, 1, 0, 2, 0, 1, 0, 3,
1977 0, 1, 0, 2, 0, 1, 0, 8
1978 };
1979
1980
1981 void
sctp_slide_mapping_arrays(struct sctp_tcb * stcb)1982 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1983 {
1984 /*
1985 * Now we also need to check the mapping array in a couple of ways.
1986 * 1) Did we move the cum-ack point?
1987 *
1988 * When you first glance at this you might think
1989 * that all entries that make up the postion
1990 * of the cum-ack would be in the nr-mapping array
1991 * only.. i.e. things up to the cum-ack are always
1992 * deliverable. Thats true with one exception, when
1993 * its a fragmented message we may not deliver the data
1994 * until some threshold (or all of it) is in place. So
1995 * we must OR the nr_mapping_array and mapping_array to
1996 * get a true picture of the cum-ack.
1997 */
1998 struct sctp_association *asoc;
1999 int at;
2000 uint8_t val;
2001 int slide_from, slide_end, lgap, distance;
2002 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2003
2004 asoc = &stcb->asoc;
2005
2006 old_cumack = asoc->cumulative_tsn;
2007 old_base = asoc->mapping_array_base_tsn;
2008 old_highest = asoc->highest_tsn_inside_map;
2009 /*
2010 * We could probably improve this a small bit by calculating the
2011 * offset of the current cum-ack as the starting point.
2012 */
2013 at = 0;
2014 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2015 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2016 if (val == 0xff) {
2017 at += 8;
2018 } else {
2019 /* there is a 0 bit */
2020 at += sctp_map_lookup_tab[val];
2021 break;
2022 }
2023 }
2024 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2025
2026 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2027 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2028 #ifdef INVARIANTS
2029 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2030 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2031 #else
2032 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2033 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2034 sctp_print_mapping_array(asoc);
2035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2036 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2037 }
2038 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2039 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2040 #endif
2041 }
2042 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2043 highest_tsn = asoc->highest_tsn_inside_nr_map;
2044 } else {
2045 highest_tsn = asoc->highest_tsn_inside_map;
2046 }
2047 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2048 /* The complete array was completed by a single FR */
2049 /* highest becomes the cum-ack */
2050 int clr;
2051 #ifdef INVARIANTS
2052 unsigned int i;
2053 #endif
2054
2055 /* clear the array */
2056 clr = ((at+7) >> 3);
2057 if (clr > asoc->mapping_array_size) {
2058 clr = asoc->mapping_array_size;
2059 }
2060 memset(asoc->mapping_array, 0, clr);
2061 memset(asoc->nr_mapping_array, 0, clr);
2062 #ifdef INVARIANTS
2063 for (i = 0; i < asoc->mapping_array_size; i++) {
2064 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2065 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2066 sctp_print_mapping_array(asoc);
2067 }
2068 }
2069 #endif
2070 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2071 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2072 } else if (at >= 8) {
2073 /* we can slide the mapping array down */
2074 /* slide_from holds where we hit the first NON 0xff byte */
2075
2076 /*
2077 * now calculate the ceiling of the move using our highest
2078 * TSN value
2079 */
2080 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2081 slide_end = (lgap >> 3);
2082 if (slide_end < slide_from) {
2083 sctp_print_mapping_array(asoc);
2084 #ifdef INVARIANTS
2085 panic("impossible slide");
2086 #else
2087 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2088 lgap, slide_end, slide_from, at);
2089 return;
2090 #endif
2091 }
2092 if (slide_end > asoc->mapping_array_size) {
2093 #ifdef INVARIANTS
2094 panic("would overrun buffer");
2095 #else
2096 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2097 asoc->mapping_array_size, slide_end);
2098 slide_end = asoc->mapping_array_size;
2099 #endif
2100 }
2101 distance = (slide_end - slide_from) + 1;
2102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2103 sctp_log_map(old_base, old_cumack, old_highest,
2104 SCTP_MAP_PREPARE_SLIDE);
2105 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2106 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2107 }
2108 if (distance + slide_from > asoc->mapping_array_size ||
2109 distance < 0) {
2110 /*
2111 * Here we do NOT slide forward the array so that
2112 * hopefully when more data comes in to fill it up
2113 * we will be able to slide it forward. Really I
2114 * don't think this should happen :-0
2115 */
2116
2117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2118 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2119 (uint32_t) asoc->mapping_array_size,
2120 SCTP_MAP_SLIDE_NONE);
2121 }
2122 } else {
2123 int ii;
2124
2125 for (ii = 0; ii < distance; ii++) {
2126 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2127 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2128
2129 }
2130 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2131 asoc->mapping_array[ii] = 0;
2132 asoc->nr_mapping_array[ii] = 0;
2133 }
2134 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2135 asoc->highest_tsn_inside_map += (slide_from << 3);
2136 }
2137 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2138 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2139 }
2140 asoc->mapping_array_base_tsn += (slide_from << 3);
2141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2142 sctp_log_map(asoc->mapping_array_base_tsn,
2143 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2144 SCTP_MAP_SLIDE_RESULT);
2145 }
2146 }
2147 }
2148 }
2149
2150 void
sctp_sack_check(struct sctp_tcb * stcb,int was_a_gap)2151 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2152 {
2153 struct sctp_association *asoc;
2154 uint32_t highest_tsn;
2155
2156 asoc = &stcb->asoc;
2157 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2158 highest_tsn = asoc->highest_tsn_inside_nr_map;
2159 } else {
2160 highest_tsn = asoc->highest_tsn_inside_map;
2161 }
2162
2163 /*
2164 * Now we need to see if we need to queue a sack or just start the
2165 * timer (if allowed).
2166 */
2167 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2168 /*
2169 * Ok special case, in SHUTDOWN-SENT case. here we
2170 * maker sure SACK timer is off and instead send a
2171 * SHUTDOWN and a SACK
2172 */
2173 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2174 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2175 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18);
2176 }
2177 sctp_send_shutdown(stcb,
2178 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2179 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2180 } else {
2181 int is_a_gap;
2182
2183 /* is there a gap now ? */
2184 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2185
2186 /*
2187 * CMT DAC algorithm: increase number of packets
2188 * received since last ack
2189 */
2190 stcb->asoc.cmt_dac_pkts_rcvd++;
2191
2192 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
2193 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2194 * longer is one */
2195 (stcb->asoc.numduptsns) || /* we have dup's */
2196 (is_a_gap) || /* is still a gap */
2197 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2198 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2199 ) {
2200
2201 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2202 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2203 (stcb->asoc.send_sack == 0) &&
2204 (stcb->asoc.numduptsns == 0) &&
2205 (stcb->asoc.delayed_ack) &&
2206 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2207
2208 /*
2209 * CMT DAC algorithm: With CMT,
2210 * delay acks even in the face of
2211
2212 * reordering. Therefore, if acks
2213 * that do not have to be sent
2214 * because of the above reasons,
2215 * will be delayed. That is, acks
2216 * that would have been sent due to
2217 * gap reports will be delayed with
2218 * DAC. Start the delayed ack timer.
2219 */
2220 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2221 stcb->sctp_ep, stcb, NULL);
2222 } else {
2223 /*
2224 * Ok we must build a SACK since the
2225 * timer is pending, we got our
2226 * first packet OR there are gaps or
2227 * duplicates.
2228 */
2229 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2230 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2231 }
2232 } else {
2233 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2234 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2235 stcb->sctp_ep, stcb, NULL);
2236 }
2237 }
2238 }
2239 }
2240
2241 void
sctp_service_queues(struct sctp_tcb * stcb,struct sctp_association * asoc)2242 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2243 {
2244 struct sctp_tmit_chunk *chk;
2245 uint32_t tsize, pd_point;
2246 uint16_t nxt_todel;
2247
2248 if (asoc->fragmented_delivery_inprogress) {
2249 sctp_service_reassembly(stcb, asoc);
2250 }
2251 /* Can we proceed further, i.e. the PD-API is complete */
2252 if (asoc->fragmented_delivery_inprogress) {
2253 /* no */
2254 return;
2255 }
2256 /*
2257 * Now is there some other chunk I can deliver from the reassembly
2258 * queue.
2259 */
2260 doit_again:
2261 chk = TAILQ_FIRST(&asoc->reasmqueue);
2262 if (chk == NULL) {
2263 asoc->size_on_reasm_queue = 0;
2264 asoc->cnt_on_reasm_queue = 0;
2265 return;
2266 }
2267 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2268 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2269 ((nxt_todel == chk->rec.data.stream_seq) ||
2270 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2271 /*
2272 * Yep the first one is here. We setup to start reception,
2273 * by backing down the TSN just in case we can't deliver.
2274 */
2275
2276 /*
2277 * Before we start though either all of the message should
2278 * be here or the socket buffer max or nothing on the
2279 * delivery queue and something can be delivered.
2280 */
2281 if (stcb->sctp_socket) {
2282 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2283 stcb->sctp_ep->partial_delivery_point);
2284 } else {
2285 pd_point = stcb->sctp_ep->partial_delivery_point;
2286 }
2287 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2288 asoc->fragmented_delivery_inprogress = 1;
2289 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2290 asoc->str_of_pdapi = chk->rec.data.stream_number;
2291 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2292 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2293 asoc->fragment_flags = chk->rec.data.rcv_flags;
2294 sctp_service_reassembly(stcb, asoc);
2295 if (asoc->fragmented_delivery_inprogress == 0) {
2296 goto doit_again;
2297 }
2298 }
2299 }
2300 }
2301
2302 int
sctp_process_data(struct mbuf ** mm,int iphlen,int * offset,int length,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t * high_tsn,uint8_t use_mflowid,uint32_t mflowid,uint32_t vrf_id,uint16_t port)2303 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2304 struct sockaddr *src, struct sockaddr *dst,
2305 struct sctphdr *sh, struct sctp_inpcb *inp,
2306 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn,
2307 #if defined(__FreeBSD__)
2308 uint8_t use_mflowid, uint32_t mflowid,
2309 #endif
2310 uint32_t vrf_id, uint16_t port)
2311 {
2312 struct sctp_data_chunk *ch, chunk_buf;
2313 struct sctp_association *asoc;
2314 int num_chunks = 0; /* number of control chunks processed */
2315 int stop_proc = 0;
2316 int chk_length, break_flag, last_chunk;
2317 int abort_flag = 0, was_a_gap;
2318 struct mbuf *m;
2319 uint32_t highest_tsn;
2320
2321 /* set the rwnd */
2322 sctp_set_rwnd(stcb, &stcb->asoc);
2323
2324 m = *mm;
2325 SCTP_TCB_LOCK_ASSERT(stcb);
2326 asoc = &stcb->asoc;
2327 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2328 highest_tsn = asoc->highest_tsn_inside_nr_map;
2329 } else {
2330 highest_tsn = asoc->highest_tsn_inside_map;
2331 }
2332 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2333 /*
2334 * setup where we got the last DATA packet from for any SACK that
2335 * may need to go out. Don't bump the net. This is done ONLY when a
2336 * chunk is assigned.
2337 */
2338 asoc->last_data_chunk_from = net;
2339
2340 #ifndef __Panda__
2341 /*-
2342 * Now before we proceed we must figure out if this is a wasted
2343 * cluster... i.e. it is a small packet sent in and yet the driver
2344 * underneath allocated a full cluster for it. If so we must copy it
2345 * to a smaller mbuf and free up the cluster mbuf. This will help
2346 * with cluster starvation. Note for __Panda__ we don't do this
2347 * since it has clusters all the way down to 64 bytes.
2348 */
2349 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2350 /* we only handle mbufs that are singletons.. not chains */
2351 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2352 if (m) {
2353 /* ok lets see if we can copy the data up */
2354 caddr_t *from, *to;
2355 /* get the pointers and copy */
2356 to = mtod(m, caddr_t *);
2357 from = mtod((*mm), caddr_t *);
2358 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2359 /* copy the length and free up the old */
2360 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2361 sctp_m_freem(*mm);
2362 /* sucess, back copy */
2363 *mm = m;
2364 } else {
2365 /* We are in trouble in the mbuf world .. yikes */
2366 m = *mm;
2367 }
2368 }
2369 #endif
2370 /* get pointer to the first chunk header */
2371 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2372 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2373 if (ch == NULL) {
2374 return (1);
2375 }
2376 /*
2377 * process all DATA chunks...
2378 */
2379 *high_tsn = asoc->cumulative_tsn;
2380 break_flag = 0;
2381 asoc->data_pkts_seen++;
2382 while (stop_proc == 0) {
2383 /* validate chunk length */
2384 chk_length = ntohs(ch->ch.chunk_length);
2385 if (length - *offset < chk_length) {
2386 /* all done, mutulated chunk */
2387 stop_proc = 1;
2388 continue;
2389 }
2390 if (ch->ch.chunk_type == SCTP_DATA) {
2391 if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2392 /*
2393 * Need to send an abort since we had a
2394 * invalid data chunk.
2395 */
2396 struct mbuf *op_err;
2397 char msg[SCTP_DIAG_INFO_LEN];
2398
2399 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2400 chk_length);
2401 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2402 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
2403 sctp_abort_association(inp, stcb, m, iphlen,
2404 src, dst, sh, op_err,
2405 #if defined(__FreeBSD__)
2406 use_mflowid, mflowid,
2407 #endif
2408 vrf_id, port);
2409 return (2);
2410 }
2411 if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2412 /*
2413 * Need to send an abort since we had an
2414 * empty data chunk.
2415 */
2416 struct mbuf *op_err;
2417
2418 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2419 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
2420 sctp_abort_association(inp, stcb, m, iphlen,
2421 src, dst, sh, op_err,
2422 #if defined(__FreeBSD__)
2423 use_mflowid, mflowid,
2424 #endif
2425 vrf_id, port);
2426 return (2);
2427 }
2428 #ifdef SCTP_AUDITING_ENABLED
2429 sctp_audit_log(0xB1, 0);
2430 #endif
2431 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2432 last_chunk = 1;
2433 } else {
2434 last_chunk = 0;
2435 }
2436 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2437 chk_length, net, high_tsn, &abort_flag, &break_flag,
2438 last_chunk)) {
2439 num_chunks++;
2440 }
2441 if (abort_flag)
2442 return (2);
2443
2444 if (break_flag) {
2445 /*
2446 * Set because of out of rwnd space and no
2447 * drop rep space left.
2448 */
2449 stop_proc = 1;
2450 continue;
2451 }
2452 } else {
2453 /* not a data chunk in the data region */
2454 switch (ch->ch.chunk_type) {
2455 case SCTP_INITIATION:
2456 case SCTP_INITIATION_ACK:
2457 case SCTP_SELECTIVE_ACK:
2458 case SCTP_NR_SELECTIVE_ACK:
2459 case SCTP_HEARTBEAT_REQUEST:
2460 case SCTP_HEARTBEAT_ACK:
2461 case SCTP_ABORT_ASSOCIATION:
2462 case SCTP_SHUTDOWN:
2463 case SCTP_SHUTDOWN_ACK:
2464 case SCTP_OPERATION_ERROR:
2465 case SCTP_COOKIE_ECHO:
2466 case SCTP_COOKIE_ACK:
2467 case SCTP_ECN_ECHO:
2468 case SCTP_ECN_CWR:
2469 case SCTP_SHUTDOWN_COMPLETE:
2470 case SCTP_AUTHENTICATION:
2471 case SCTP_ASCONF_ACK:
2472 case SCTP_PACKET_DROPPED:
2473 case SCTP_STREAM_RESET:
2474 case SCTP_FORWARD_CUM_TSN:
2475 case SCTP_ASCONF:
2476 /*
2477 * Now, what do we do with KNOWN chunks that
2478 * are NOT in the right place?
2479 *
2480 * For now, I do nothing but ignore them. We
2481 * may later want to add sysctl stuff to
2482 * switch out and do either an ABORT() or
2483 * possibly process them.
2484 */
2485 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2486 struct mbuf *op_err;
2487
2488 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2489 sctp_abort_association(inp, stcb,
2490 m, iphlen,
2491 src, dst,
2492 sh, op_err,
2493 #if defined(__FreeBSD__)
2494 use_mflowid, mflowid,
2495 #endif
2496 vrf_id, port);
2497 return (2);
2498 }
2499 break;
2500 default:
2501 /* unknown chunk type, use bit rules */
2502 if (ch->ch.chunk_type & 0x40) {
2503 /* Add a error report to the queue */
2504 struct mbuf *merr;
2505 struct sctp_paramhdr *phd;
2506
2507 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2508 if (merr) {
2509 phd = mtod(merr, struct sctp_paramhdr *);
2510 /*
2511 * We cheat and use param
2512 * type since we did not
2513 * bother to define a error
2514 * cause struct. They are
2515 * the same basic format
2516 * with different names.
2517 */
2518 phd->param_type =
2519 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2520 phd->param_length =
2521 htons(chk_length + sizeof(*phd));
2522 SCTP_BUF_LEN(merr) = sizeof(*phd);
2523 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2524 if (SCTP_BUF_NEXT(merr)) {
2525 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2526 sctp_m_freem(merr);
2527 } else {
2528 sctp_queue_op_err(stcb, merr);
2529 }
2530 } else {
2531 sctp_m_freem(merr);
2532 }
2533 }
2534 }
2535 if ((ch->ch.chunk_type & 0x80) == 0) {
2536 /* discard the rest of this packet */
2537 stop_proc = 1;
2538 } /* else skip this bad chunk and
2539 * continue... */
2540 break;
2541 } /* switch of chunk type */
2542 }
2543 *offset += SCTP_SIZE32(chk_length);
2544 if ((*offset >= length) || stop_proc) {
2545 /* no more data left in the mbuf chain */
2546 stop_proc = 1;
2547 continue;
2548 }
2549 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2550 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2551 if (ch == NULL) {
2552 *offset = length;
2553 stop_proc = 1;
2554 continue;
2555 }
2556 }
2557 if (break_flag) {
2558 /*
2559 * we need to report rwnd overrun drops.
2560 */
2561 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2562 }
2563 if (num_chunks) {
2564 /*
2565 * Did we get data, if so update the time for auto-close and
2566 * give peer credit for being alive.
2567 */
2568 SCTP_STAT_INCR(sctps_recvpktwithdata);
2569 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2570 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2571 stcb->asoc.overall_error_count,
2572 0,
2573 SCTP_FROM_SCTP_INDATA,
2574 __LINE__);
2575 }
2576 stcb->asoc.overall_error_count = 0;
2577 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2578 }
2579 /* now service all of the reassm queue if needed */
2580 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2581 sctp_service_queues(stcb, asoc);
2582
2583 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2584 /* Assure that we ack right away */
2585 stcb->asoc.send_sack = 1;
2586 }
2587 /* Start a sack timer or QUEUE a SACK for sending */
2588 sctp_sack_check(stcb, was_a_gap);
2589 return (0);
2590 }
2591
2592 static int
sctp_process_segment_range(struct sctp_tcb * stcb,struct sctp_tmit_chunk ** p_tp1,uint32_t last_tsn,uint16_t frag_strt,uint16_t frag_end,int nr_sacking,int * num_frs,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int * rto_ok)2593 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2594 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2595 int *num_frs,
2596 uint32_t *biggest_newly_acked_tsn,
2597 uint32_t *this_sack_lowest_newack,
2598 int *rto_ok)
2599 {
2600 struct sctp_tmit_chunk *tp1;
2601 unsigned int theTSN;
2602 int j, wake_him = 0, circled = 0;
2603
2604 /* Recover the tp1 we last saw */
2605 tp1 = *p_tp1;
2606 if (tp1 == NULL) {
2607 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2608 }
2609 for (j = frag_strt; j <= frag_end; j++) {
2610 theTSN = j + last_tsn;
2611 while (tp1) {
2612 if (tp1->rec.data.doing_fast_retransmit)
2613 (*num_frs) += 1;
2614
2615 /*-
2616 * CMT: CUCv2 algorithm. For each TSN being
2617 * processed from the sent queue, track the
2618 * next expected pseudo-cumack, or
2619 * rtx_pseudo_cumack, if required. Separate
2620 * cumack trackers for first transmissions,
2621 * and retransmissions.
2622 */
2623 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2624 (tp1->snd_count == 1)) {
2625 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2626 tp1->whoTo->find_pseudo_cumack = 0;
2627 }
2628 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2629 (tp1->snd_count > 1)) {
2630 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2631 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2632 }
2633 if (tp1->rec.data.TSN_seq == theTSN) {
2634 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2635 /*-
2636 * must be held until
2637 * cum-ack passes
2638 */
2639 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2640 /*-
2641 * If it is less than RESEND, it is
2642 * now no-longer in flight.
2643 * Higher values may already be set
2644 * via previous Gap Ack Blocks...
2645 * i.e. ACKED or RESEND.
2646 */
2647 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2648 *biggest_newly_acked_tsn)) {
2649 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2650 }
2651 /*-
2652 * CMT: SFR algo (and HTNA) - set
2653 * saw_newack to 1 for dest being
2654 * newly acked. update
2655 * this_sack_highest_newack if
2656 * appropriate.
2657 */
2658 if (tp1->rec.data.chunk_was_revoked == 0)
2659 tp1->whoTo->saw_newack = 1;
2660
2661 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2662 tp1->whoTo->this_sack_highest_newack)) {
2663 tp1->whoTo->this_sack_highest_newack =
2664 tp1->rec.data.TSN_seq;
2665 }
2666 /*-
2667 * CMT DAC algo: also update
2668 * this_sack_lowest_newack
2669 */
2670 if (*this_sack_lowest_newack == 0) {
2671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2672 sctp_log_sack(*this_sack_lowest_newack,
2673 last_tsn,
2674 tp1->rec.data.TSN_seq,
2675 0,
2676 0,
2677 SCTP_LOG_TSN_ACKED);
2678 }
2679 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2680 }
2681 /*-
2682 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2683 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2684 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2685 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2686 * Separate pseudo_cumack trackers for first transmissions and
2687 * retransmissions.
2688 */
2689 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2690 if (tp1->rec.data.chunk_was_revoked == 0) {
2691 tp1->whoTo->new_pseudo_cumack = 1;
2692 }
2693 tp1->whoTo->find_pseudo_cumack = 1;
2694 }
2695 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2696 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2697 }
2698 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2699 if (tp1->rec.data.chunk_was_revoked == 0) {
2700 tp1->whoTo->new_pseudo_cumack = 1;
2701 }
2702 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2703 }
2704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2705 sctp_log_sack(*biggest_newly_acked_tsn,
2706 last_tsn,
2707 tp1->rec.data.TSN_seq,
2708 frag_strt,
2709 frag_end,
2710 SCTP_LOG_TSN_ACKED);
2711 }
2712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2713 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2714 tp1->whoTo->flight_size,
2715 tp1->book_size,
2716 (uintptr_t)tp1->whoTo,
2717 tp1->rec.data.TSN_seq);
2718 }
2719 sctp_flight_size_decrease(tp1);
2720 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2721 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
2722 tp1);
2723 }
2724 sctp_total_flight_decrease(stcb, tp1);
2725
2726 tp1->whoTo->net_ack += tp1->send_size;
2727 if (tp1->snd_count < 2) {
2728 /*-
2729 * True non-retransmited chunk
2730 */
2731 tp1->whoTo->net_ack2 += tp1->send_size;
2732
2733 /*-
2734 * update RTO too ?
2735 */
2736 if (tp1->do_rtt) {
2737 if (*rto_ok) {
2738 tp1->whoTo->RTO =
2739 sctp_calculate_rto(stcb,
2740 &stcb->asoc,
2741 tp1->whoTo,
2742 &tp1->sent_rcv_time,
2743 sctp_align_safe_nocopy,
2744 SCTP_RTT_FROM_DATA);
2745 *rto_ok = 0;
2746 }
2747 if (tp1->whoTo->rto_needed == 0) {
2748 tp1->whoTo->rto_needed = 1;
2749 }
2750 tp1->do_rtt = 0;
2751 }
2752 }
2753
2754 }
2755 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2756 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2757 stcb->asoc.this_sack_highest_gap)) {
2758 stcb->asoc.this_sack_highest_gap =
2759 tp1->rec.data.TSN_seq;
2760 }
2761 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2762 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2763 #ifdef SCTP_AUDITING_ENABLED
2764 sctp_audit_log(0xB2,
2765 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2766 #endif
2767 }
2768 }
2769 /*-
2770 * All chunks NOT UNSENT fall through here and are marked
2771 * (leave PR-SCTP ones that are to skip alone though)
2772 */
2773 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2774 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2775 tp1->sent = SCTP_DATAGRAM_MARKED;
2776 }
2777 if (tp1->rec.data.chunk_was_revoked) {
2778 /* deflate the cwnd */
2779 tp1->whoTo->cwnd -= tp1->book_size;
2780 tp1->rec.data.chunk_was_revoked = 0;
2781 }
2782 /* NR Sack code here */
2783 if (nr_sacking &&
2784 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2785 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2786 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2787 #ifdef INVARIANTS
2788 } else {
2789 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2790 #endif
2791 }
2792 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2793 if (tp1->data) {
2794 /* sa_ignore NO_NULL_CHK */
2795 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2796 sctp_m_freem(tp1->data);
2797 tp1->data = NULL;
2798 }
2799 wake_him++;
2800 }
2801 }
2802 break;
2803 } /* if (tp1->TSN_seq == theTSN) */
2804 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2805 break;
2806 }
2807 tp1 = TAILQ_NEXT(tp1, sctp_next);
2808 if ((tp1 == NULL) && (circled == 0)) {
2809 circled++;
2810 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2811 }
2812 } /* end while (tp1) */
2813 if (tp1 == NULL) {
2814 circled = 0;
2815 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2816 }
2817 /* In case the fragments were not in order we must reset */
2818 } /* end for (j = fragStart */
2819 *p_tp1 = tp1;
2820 return (wake_him); /* Return value only used for nr-sack */
2821 }
2822
2823
2824 static int
sctp_handle_segments(struct mbuf * m,int * offset,struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t last_tsn,uint32_t * biggest_tsn_acked,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int num_seg,int num_nr_seg,int * rto_ok)2825 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2826 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
2827 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
2828 int num_seg, int num_nr_seg, int *rto_ok)
2829 {
2830 struct sctp_gap_ack_block *frag, block;
2831 struct sctp_tmit_chunk *tp1;
2832 int i;
2833 int num_frs = 0;
2834 int chunk_freed;
2835 int non_revocable;
2836 uint16_t frag_strt, frag_end, prev_frag_end;
2837
2838 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2839 prev_frag_end = 0;
2840 chunk_freed = 0;
2841
2842 for (i = 0; i < (num_seg + num_nr_seg); i++) {
2843 if (i == num_seg) {
2844 prev_frag_end = 0;
2845 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2846 }
2847 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2848 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
2849 *offset += sizeof(block);
2850 if (frag == NULL) {
2851 return (chunk_freed);
2852 }
2853 frag_strt = ntohs(frag->start);
2854 frag_end = ntohs(frag->end);
2855
2856 if (frag_strt > frag_end) {
2857 /* This gap report is malformed, skip it. */
2858 continue;
2859 }
2860 if (frag_strt <= prev_frag_end) {
2861 /* This gap report is not in order, so restart. */
2862 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2863 }
2864 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2865 *biggest_tsn_acked = last_tsn + frag_end;
2866 }
2867 if (i < num_seg) {
2868 non_revocable = 0;
2869 } else {
2870 non_revocable = 1;
2871 }
2872 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2873 non_revocable, &num_frs, biggest_newly_acked_tsn,
2874 this_sack_lowest_newack, rto_ok)) {
2875 chunk_freed = 1;
2876 }
2877 prev_frag_end = frag_end;
2878 }
2879 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2880 if (num_frs)
2881 sctp_log_fr(*biggest_tsn_acked,
2882 *biggest_newly_acked_tsn,
2883 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2884 }
2885 return (chunk_freed);
2886 }
2887
2888 static void
sctp_check_for_revoked(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t cumack,uint32_t biggest_tsn_acked)2889 sctp_check_for_revoked(struct sctp_tcb *stcb,
2890 struct sctp_association *asoc, uint32_t cumack,
2891 uint32_t biggest_tsn_acked)
2892 {
2893 struct sctp_tmit_chunk *tp1;
2894
2895 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2896 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2897 /*
2898 * ok this guy is either ACK or MARKED. If it is
2899 * ACKED it has been previously acked but not this
2900 * time i.e. revoked. If it is MARKED it was ACK'ed
2901 * again.
2902 */
2903 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2904 break;
2905 }
2906 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2907 /* it has been revoked */
2908 tp1->sent = SCTP_DATAGRAM_SENT;
2909 tp1->rec.data.chunk_was_revoked = 1;
2910 /* We must add this stuff back in to
2911 * assure timers and such get started.
2912 */
2913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2914 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2915 tp1->whoTo->flight_size,
2916 tp1->book_size,
2917 (uintptr_t)tp1->whoTo,
2918 tp1->rec.data.TSN_seq);
2919 }
2920 sctp_flight_size_increase(tp1);
2921 sctp_total_flight_increase(stcb, tp1);
2922 /* We inflate the cwnd to compensate for our
2923 * artificial inflation of the flight_size.
2924 */
2925 tp1->whoTo->cwnd += tp1->book_size;
2926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2927 sctp_log_sack(asoc->last_acked_seq,
2928 cumack,
2929 tp1->rec.data.TSN_seq,
2930 0,
2931 0,
2932 SCTP_LOG_TSN_REVOKED);
2933 }
2934 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2935 /* it has been re-acked in this SACK */
2936 tp1->sent = SCTP_DATAGRAM_ACKED;
2937 }
2938 }
2939 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2940 break;
2941 }
2942 }
2943
2944
2945 static void
sctp_strike_gap_ack_chunks(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t biggest_tsn_acked,uint32_t biggest_tsn_newly_acked,uint32_t this_sack_lowest_newack,int accum_moved)2946 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2947 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2948 {
2949 struct sctp_tmit_chunk *tp1;
2950 int strike_flag = 0;
2951 struct timeval now;
2952 int tot_retrans = 0;
2953 uint32_t sending_seq;
2954 struct sctp_nets *net;
2955 int num_dests_sacked = 0;
2956
2957 /*
2958 * select the sending_seq, this is either the next thing ready to be
2959 * sent but not transmitted, OR, the next seq we assign.
2960 */
2961 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2962 if (tp1 == NULL) {
2963 sending_seq = asoc->sending_seq;
2964 } else {
2965 sending_seq = tp1->rec.data.TSN_seq;
2966 }
2967
2968 /* CMT DAC algo: finding out if SACK is a mixed SACK */
2969 if ((asoc->sctp_cmt_on_off > 0) &&
2970 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2971 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2972 if (net->saw_newack)
2973 num_dests_sacked++;
2974 }
2975 }
2976 if (stcb->asoc.peer_supports_prsctp) {
2977 (void)SCTP_GETTIME_TIMEVAL(&now);
2978 }
2979 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2980 strike_flag = 0;
2981 if (tp1->no_fr_allowed) {
2982 /* this one had a timeout or something */
2983 continue;
2984 }
2985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2986 if (tp1->sent < SCTP_DATAGRAM_RESEND)
2987 sctp_log_fr(biggest_tsn_newly_acked,
2988 tp1->rec.data.TSN_seq,
2989 tp1->sent,
2990 SCTP_FR_LOG_CHECK_STRIKE);
2991 }
2992 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2993 tp1->sent == SCTP_DATAGRAM_UNSENT) {
2994 /* done */
2995 break;
2996 }
2997 if (stcb->asoc.peer_supports_prsctp) {
2998 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2999 /* Is it expired? */
3000 #ifndef __FreeBSD__
3001 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3002 #else
3003 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3004 #endif
3005 /* Yes so drop it */
3006 if (tp1->data != NULL) {
3007 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3008 SCTP_SO_NOT_LOCKED);
3009 }
3010 continue;
3011 }
3012 }
3013
3014 }
3015 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3016 /* we are beyond the tsn in the sack */
3017 break;
3018 }
3019 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3020 /* either a RESEND, ACKED, or MARKED */
3021 /* skip */
3022 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3023 /* Continue strikin FWD-TSN chunks */
3024 tp1->rec.data.fwd_tsn_cnt++;
3025 }
3026 continue;
3027 }
3028 /*
3029 * CMT : SFR algo (covers part of DAC and HTNA as well)
3030 */
3031 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3032 /*
3033 * No new acks were receieved for data sent to this
3034 * dest. Therefore, according to the SFR algo for
3035 * CMT, no data sent to this dest can be marked for
3036 * FR using this SACK.
3037 */
3038 continue;
3039 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3040 tp1->whoTo->this_sack_highest_newack)) {
3041 /*
3042 * CMT: New acks were receieved for data sent to
3043 * this dest. But no new acks were seen for data
3044 * sent after tp1. Therefore, according to the SFR
3045 * algo for CMT, tp1 cannot be marked for FR using
3046 * this SACK. This step covers part of the DAC algo
3047 * and the HTNA algo as well.
3048 */
3049 continue;
3050 }
3051 /*
3052 * Here we check to see if we were have already done a FR
3053 * and if so we see if the biggest TSN we saw in the sack is
3054 * smaller than the recovery point. If so we don't strike
3055 * the tsn... otherwise we CAN strike the TSN.
3056 */
3057 /*
3058 * @@@ JRI: Check for CMT
3059 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3060 */
3061 if (accum_moved && asoc->fast_retran_loss_recovery) {
3062 /*
3063 * Strike the TSN if in fast-recovery and cum-ack
3064 * moved.
3065 */
3066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3067 sctp_log_fr(biggest_tsn_newly_acked,
3068 tp1->rec.data.TSN_seq,
3069 tp1->sent,
3070 SCTP_FR_LOG_STRIKE_CHUNK);
3071 }
3072 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3073 tp1->sent++;
3074 }
3075 if ((asoc->sctp_cmt_on_off > 0) &&
3076 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3077 /*
3078 * CMT DAC algorithm: If SACK flag is set to
3079 * 0, then lowest_newack test will not pass
3080 * because it would have been set to the
3081 * cumack earlier. If not already to be
3082 * rtx'd, If not a mixed sack and if tp1 is
3083 * not between two sacked TSNs, then mark by
3084 * one more.
3085 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3086 * two packets have been received after this missing TSN.
3087 */
3088 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3089 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3090 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3091 sctp_log_fr(16 + num_dests_sacked,
3092 tp1->rec.data.TSN_seq,
3093 tp1->sent,
3094 SCTP_FR_LOG_STRIKE_CHUNK);
3095 }
3096 tp1->sent++;
3097 }
3098 }
3099 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3100 (asoc->sctp_cmt_on_off == 0)) {
3101 /*
3102 * For those that have done a FR we must take
3103 * special consideration if we strike. I.e the
3104 * biggest_newly_acked must be higher than the
3105 * sending_seq at the time we did the FR.
3106 */
3107 if (
3108 #ifdef SCTP_FR_TO_ALTERNATE
3109 /*
3110 * If FR's go to new networks, then we must only do
3111 * this for singly homed asoc's. However if the FR's
3112 * go to the same network (Armando's work) then its
3113 * ok to FR multiple times.
3114 */
3115 (asoc->numnets < 2)
3116 #else
3117 (1)
3118 #endif
3119 ) {
3120
3121 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3122 tp1->rec.data.fast_retran_tsn)) {
3123 /*
3124 * Strike the TSN, since this ack is
3125 * beyond where things were when we
3126 * did a FR.
3127 */
3128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3129 sctp_log_fr(biggest_tsn_newly_acked,
3130 tp1->rec.data.TSN_seq,
3131 tp1->sent,
3132 SCTP_FR_LOG_STRIKE_CHUNK);
3133 }
3134 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3135 tp1->sent++;
3136 }
3137 strike_flag = 1;
3138 if ((asoc->sctp_cmt_on_off > 0) &&
3139 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3140 /*
3141 * CMT DAC algorithm: If
3142 * SACK flag is set to 0,
3143 * then lowest_newack test
3144 * will not pass because it
3145 * would have been set to
3146 * the cumack earlier. If
3147 * not already to be rtx'd,
3148 * If not a mixed sack and
3149 * if tp1 is not between two
3150 * sacked TSNs, then mark by
3151 * one more.
3152 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3153 * two packets have been received after this missing TSN.
3154 */
3155 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3156 (num_dests_sacked == 1) &&
3157 SCTP_TSN_GT(this_sack_lowest_newack,
3158 tp1->rec.data.TSN_seq)) {
3159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3160 sctp_log_fr(32 + num_dests_sacked,
3161 tp1->rec.data.TSN_seq,
3162 tp1->sent,
3163 SCTP_FR_LOG_STRIKE_CHUNK);
3164 }
3165 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3166 tp1->sent++;
3167 }
3168 }
3169 }
3170 }
3171 }
3172 /*
3173 * JRI: TODO: remove code for HTNA algo. CMT's
3174 * SFR algo covers HTNA.
3175 */
3176 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3177 biggest_tsn_newly_acked)) {
3178 /*
3179 * We don't strike these: This is the HTNA
3180 * algorithm i.e. we don't strike If our TSN is
3181 * larger than the Highest TSN Newly Acked.
3182 */
3183 ;
3184 } else {
3185 /* Strike the TSN */
3186 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3187 sctp_log_fr(biggest_tsn_newly_acked,
3188 tp1->rec.data.TSN_seq,
3189 tp1->sent,
3190 SCTP_FR_LOG_STRIKE_CHUNK);
3191 }
3192 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3193 tp1->sent++;
3194 }
3195 if ((asoc->sctp_cmt_on_off > 0) &&
3196 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3197 /*
3198 * CMT DAC algorithm: If SACK flag is set to
3199 * 0, then lowest_newack test will not pass
3200 * because it would have been set to the
3201 * cumack earlier. If not already to be
3202 * rtx'd, If not a mixed sack and if tp1 is
3203 * not between two sacked TSNs, then mark by
3204 * one more.
3205 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3206 * two packets have been received after this missing TSN.
3207 */
3208 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3209 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3210 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3211 sctp_log_fr(48 + num_dests_sacked,
3212 tp1->rec.data.TSN_seq,
3213 tp1->sent,
3214 SCTP_FR_LOG_STRIKE_CHUNK);
3215 }
3216 tp1->sent++;
3217 }
3218 }
3219 }
3220 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3221 struct sctp_nets *alt;
3222
3223 /* fix counts and things */
3224 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3225 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3226 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3227 tp1->book_size,
3228 (uintptr_t)tp1->whoTo,
3229 tp1->rec.data.TSN_seq);
3230 }
3231 if (tp1->whoTo) {
3232 tp1->whoTo->net_ack++;
3233 sctp_flight_size_decrease(tp1);
3234 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3235 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3236 tp1);
3237 }
3238 }
3239
3240 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3241 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3242 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3243 }
3244 /* add back to the rwnd */
3245 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3246
3247 /* remove from the total flight */
3248 sctp_total_flight_decrease(stcb, tp1);
3249
3250 if ((stcb->asoc.peer_supports_prsctp) &&
3251 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3252 /* Has it been retransmitted tv_sec times? - we store the retran count there. */
3253 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3254 /* Yes, so drop it */
3255 if (tp1->data != NULL) {
3256 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3257 SCTP_SO_NOT_LOCKED);
3258 }
3259 /* Make sure to flag we had a FR */
3260 tp1->whoTo->net_ack++;
3261 continue;
3262 }
3263 }
3264 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3265 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3266 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3267 0, SCTP_FR_MARKED);
3268 }
3269 if (strike_flag) {
3270 /* This is a subsequent FR */
3271 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3272 }
3273 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3274 if (asoc->sctp_cmt_on_off > 0) {
3275 /*
3276 * CMT: Using RTX_SSTHRESH policy for CMT.
3277 * If CMT is being used, then pick dest with
3278 * largest ssthresh for any retransmission.
3279 */
3280 tp1->no_fr_allowed = 1;
3281 alt = tp1->whoTo;
3282 /*sa_ignore NO_NULL_CHK*/
3283 if (asoc->sctp_cmt_pf > 0) {
3284 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3285 alt = sctp_find_alternate_net(stcb, alt, 2);
3286 } else {
3287 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3288 /*sa_ignore NO_NULL_CHK*/
3289 alt = sctp_find_alternate_net(stcb, alt, 1);
3290 }
3291 if (alt == NULL) {
3292 alt = tp1->whoTo;
3293 }
3294 /*
3295 * CUCv2: If a different dest is picked for
3296 * the retransmission, then new
3297 * (rtx-)pseudo_cumack needs to be tracked
3298 * for orig dest. Let CUCv2 track new (rtx-)
3299 * pseudo-cumack always.
3300 */
3301 if (tp1->whoTo) {
3302 tp1->whoTo->find_pseudo_cumack = 1;
3303 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3304 }
3305
3306 } else {/* CMT is OFF */
3307
3308 #ifdef SCTP_FR_TO_ALTERNATE
3309 /* Can we find an alternate? */
3310 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3311 #else
3312 /*
3313 * default behavior is to NOT retransmit
3314 * FR's to an alternate. Armando Caro's
3315 * paper details why.
3316 */
3317 alt = tp1->whoTo;
3318 #endif
3319 }
3320
3321 tp1->rec.data.doing_fast_retransmit = 1;
3322 tot_retrans++;
3323 /* mark the sending seq for possible subsequent FR's */
3324 /*
3325 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3326 * (uint32_t)tpi->rec.data.TSN_seq);
3327 */
3328 if (TAILQ_EMPTY(&asoc->send_queue)) {
3329 /*
3330 * If the queue of send is empty then its
3331 * the next sequence number that will be
3332 * assigned so we subtract one from this to
3333 * get the one we last sent.
3334 */
3335 tp1->rec.data.fast_retran_tsn = sending_seq;
3336 } else {
3337 /*
3338 * If there are chunks on the send queue
3339 * (unsent data that has made it from the
3340 * stream queues but not out the door, we
3341 * take the first one (which will have the
3342 * lowest TSN) and subtract one to get the
3343 * one we last sent.
3344 */
3345 struct sctp_tmit_chunk *ttt;
3346
3347 ttt = TAILQ_FIRST(&asoc->send_queue);
3348 tp1->rec.data.fast_retran_tsn =
3349 ttt->rec.data.TSN_seq;
3350 }
3351
3352 if (tp1->do_rtt) {
3353 /*
3354 * this guy had a RTO calculation pending on
3355 * it, cancel it
3356 */
3357 if ((tp1->whoTo != NULL) &&
3358 (tp1->whoTo->rto_needed == 0)) {
3359 tp1->whoTo->rto_needed = 1;
3360 }
3361 tp1->do_rtt = 0;
3362 }
3363 if (alt != tp1->whoTo) {
3364 /* yes, there is an alternate. */
3365 sctp_free_remote_addr(tp1->whoTo);
3366 /*sa_ignore FREED_MEMORY*/
3367 tp1->whoTo = alt;
3368 atomic_add_int(&alt->ref_count, 1);
3369 }
3370 }
3371 }
3372 }
3373
3374 struct sctp_tmit_chunk *
3375 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3376 struct sctp_association *asoc)
3377 {
3378 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3379 struct timeval now;
3380 int now_filled = 0;
3381
3382 if (asoc->peer_supports_prsctp == 0) {
3383 return (NULL);
3384 }
3385 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3386 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3387 tp1->sent != SCTP_DATAGRAM_RESEND &&
3388 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3389 /* no chance to advance, out of here */
3390 break;
3391 }
3392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3393 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3394 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3395 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3396 asoc->advanced_peer_ack_point,
3397 tp1->rec.data.TSN_seq, 0, 0);
3398 }
3399 }
3400 if (!PR_SCTP_ENABLED(tp1->flags)) {
3401 /*
3402 * We can't fwd-tsn past any that are reliable aka
3403 * retransmitted until the asoc fails.
3404 */
3405 break;
3406 }
3407 if (!now_filled) {
3408 (void)SCTP_GETTIME_TIMEVAL(&now);
3409 now_filled = 1;
3410 }
3411 /*
3412 * now we got a chunk which is marked for another
3413 * retransmission to a PR-stream but has run out its chances
3414 * already maybe OR has been marked to skip now. Can we skip
3415 * it if its a resend?
3416 */
3417 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3418 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3419 /*
3420 * Now is this one marked for resend and its time is
3421 * now up?
3422 */
3423 #ifndef __FreeBSD__
3424 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3425 #else
3426 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3427 #endif
3428 /* Yes so drop it */
3429 if (tp1->data) {
3430 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3431 1, SCTP_SO_NOT_LOCKED);
3432 }
3433 } else {
3434 /*
3435 * No, we are done when hit one for resend
3436 * whos time as not expired.
3437 */
3438 break;
3439 }
3440 }
3441 /*
3442 * Ok now if this chunk is marked to drop it we can clean up
3443 * the chunk, advance our peer ack point and we can check
3444 * the next chunk.
3445 */
3446 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3447 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3448 /* advance PeerAckPoint goes forward */
3449 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3450 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3451 a_adv = tp1;
3452 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3453 /* No update but we do save the chk */
3454 a_adv = tp1;
3455 }
3456 } else {
3457 /*
3458 * If it is still in RESEND we can advance no
3459 * further
3460 */
3461 break;
3462 }
3463 }
3464 return (a_adv);
3465 }
3466
3467 static int
3468 sctp_fs_audit(struct sctp_association *asoc)
3469 {
3470 struct sctp_tmit_chunk *chk;
3471 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3472 int entry_flight, entry_cnt, ret;
3473
3474 entry_flight = asoc->total_flight;
3475 entry_cnt = asoc->total_flight_count;
3476 ret = 0;
3477
3478 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3479 return (0);
3480
3481 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3482 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3483 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3484 chk->rec.data.TSN_seq,
3485 chk->send_size,
3486 chk->snd_count);
3487 inflight++;
3488 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3489 resend++;
3490 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3491 inbetween++;
3492 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3493 above++;
3494 } else {
3495 acked++;
3496 }
3497 }
3498
3499 if ((inflight > 0) || (inbetween > 0)) {
3500 #ifdef INVARIANTS
3501 panic("Flight size-express incorrect? \n");
3502 #else
3503 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3504 entry_flight, entry_cnt);
3505
3506 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3507 inflight, inbetween, resend, above, acked);
3508 ret = 1;
3509 #endif
3510 }
3511 return (ret);
3512 }
3513
3514
3515 static void
3516 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3517 struct sctp_association *asoc,
3518 struct sctp_tmit_chunk *tp1)
3519 {
3520 tp1->window_probe = 0;
3521 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3522 /* TSN's skipped we do NOT move back. */
3523 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3524 tp1->whoTo->flight_size,
3525 tp1->book_size,
3526 (uintptr_t)tp1->whoTo,
3527 tp1->rec.data.TSN_seq);
3528 return;
3529 }
3530 /* First setup this by shrinking flight */
3531 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3532 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3533 tp1);
3534 }
3535 sctp_flight_size_decrease(tp1);
3536 sctp_total_flight_decrease(stcb, tp1);
3537 /* Now mark for resend */
3538 tp1->sent = SCTP_DATAGRAM_RESEND;
3539 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3540
3541 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3542 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3543 tp1->whoTo->flight_size,
3544 tp1->book_size,
3545 (uintptr_t)tp1->whoTo,
3546 tp1->rec.data.TSN_seq);
3547 }
3548 }
3549
3550 void
3551 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3552 uint32_t rwnd, int *abort_now, int ecne_seen)
3553 {
3554 struct sctp_nets *net;
3555 struct sctp_association *asoc;
3556 struct sctp_tmit_chunk *tp1, *tp2;
3557 uint32_t old_rwnd;
3558 int win_probe_recovery = 0;
3559 int win_probe_recovered = 0;
3560 int j, done_once = 0;
3561 int rto_ok = 1;
3562
3563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3564 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3565 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3566 }
3567 SCTP_TCB_LOCK_ASSERT(stcb);
3568 #ifdef SCTP_ASOCLOG_OF_TSNS
3569 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3570 stcb->asoc.cumack_log_at++;
3571 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3572 stcb->asoc.cumack_log_at = 0;
3573 }
3574 #endif
3575 asoc = &stcb->asoc;
3576 old_rwnd = asoc->peers_rwnd;
3577 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3578 /* old ack */
3579 return;
3580 } else if (asoc->last_acked_seq == cumack) {
3581 /* Window update sack */
3582 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3583 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3584 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3585 /* SWS sender side engages */
3586 asoc->peers_rwnd = 0;
3587 }
3588 if (asoc->peers_rwnd > old_rwnd) {
3589 goto again;
3590 }
3591 return;
3592 }
3593
3594 /* First setup for CC stuff */
3595 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3596 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3597 /* Drag along the window_tsn for cwr's */
3598 net->cwr_window_tsn = cumack;
3599 }
3600 net->prev_cwnd = net->cwnd;
3601 net->net_ack = 0;
3602 net->net_ack2 = 0;
3603
3604 /*
3605 * CMT: Reset CUC and Fast recovery algo variables before
3606 * SACK processing
3607 */
3608 net->new_pseudo_cumack = 0;
3609 net->will_exit_fast_recovery = 0;
3610 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3611 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3612 }
3613 }
3614 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3615 uint32_t send_s;
3616
3617 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3618 tp1 = TAILQ_LAST(&asoc->sent_queue,
3619 sctpchunk_listhead);
3620 send_s = tp1->rec.data.TSN_seq + 1;
3621 } else {
3622 send_s = asoc->sending_seq;
3623 }
3624 if (SCTP_TSN_GE(cumack, send_s)) {
3625 #ifndef INVARIANTS
3626 struct mbuf *op_err;
3627 char msg[SCTP_DIAG_INFO_LEN];
3628
3629 #endif
3630 #ifdef INVARIANTS
3631 panic("Impossible sack 1");
3632 #else
3633
3634 *abort_now = 1;
3635 /* XXX */
3636 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3637 cumack, send_s);
3638 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3639 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3640 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3641 return;
3642 #endif
3643 }
3644 }
3645 asoc->this_sack_highest_gap = cumack;
3646 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3647 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3648 stcb->asoc.overall_error_count,
3649 0,
3650 SCTP_FROM_SCTP_INDATA,
3651 __LINE__);
3652 }
3653 stcb->asoc.overall_error_count = 0;
3654 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3655 /* process the new consecutive TSN first */
3656 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3657 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3658 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3659 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3660 }
3661 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3662 /*
3663 * If it is less than ACKED, it is
3664 * now no-longer in flight. Higher
3665 * values may occur during marking
3666 */
3667 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3668 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3669 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3670 tp1->whoTo->flight_size,
3671 tp1->book_size,
3672 (uintptr_t)tp1->whoTo,
3673 tp1->rec.data.TSN_seq);
3674 }
3675 sctp_flight_size_decrease(tp1);
3676 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3677 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3678 tp1);
3679 }
3680 /* sa_ignore NO_NULL_CHK */
3681 sctp_total_flight_decrease(stcb, tp1);
3682 }
3683 tp1->whoTo->net_ack += tp1->send_size;
3684 if (tp1->snd_count < 2) {
3685 /*
3686 * True non-retransmited
3687 * chunk
3688 */
3689 tp1->whoTo->net_ack2 +=
3690 tp1->send_size;
3691
3692 /* update RTO too? */
3693 if (tp1->do_rtt) {
3694 if (rto_ok) {
3695 tp1->whoTo->RTO =
3696 /*
3697 * sa_ignore
3698 * NO_NULL_CHK
3699 */
3700 sctp_calculate_rto(stcb,
3701 asoc, tp1->whoTo,
3702 &tp1->sent_rcv_time,
3703 sctp_align_safe_nocopy,
3704 SCTP_RTT_FROM_DATA);
3705 rto_ok = 0;
3706 }
3707 if (tp1->whoTo->rto_needed == 0) {
3708 tp1->whoTo->rto_needed = 1;
3709 }
3710 tp1->do_rtt = 0;
3711 }
3712 }
3713 /*
3714 * CMT: CUCv2 algorithm. From the
3715 * cumack'd TSNs, for each TSN being
3716 * acked for the first time, set the
3717 * following variables for the
3718 * corresp destination.
3719 * new_pseudo_cumack will trigger a
3720 * cwnd update.
3721 * find_(rtx_)pseudo_cumack will
3722 * trigger search for the next
3723 * expected (rtx-)pseudo-cumack.
3724 */
3725 tp1->whoTo->new_pseudo_cumack = 1;
3726 tp1->whoTo->find_pseudo_cumack = 1;
3727 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3728
3729 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3730 /* sa_ignore NO_NULL_CHK */
3731 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3732 }
3733 }
3734 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3735 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3736 }
3737 if (tp1->rec.data.chunk_was_revoked) {
3738 /* deflate the cwnd */
3739 tp1->whoTo->cwnd -= tp1->book_size;
3740 tp1->rec.data.chunk_was_revoked = 0;
3741 }
3742 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3743 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3744 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3745 #ifdef INVARIANTS
3746 } else {
3747 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3748 #endif
3749 }
3750 }
3751 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3752 if (tp1->data) {
3753 /* sa_ignore NO_NULL_CHK */
3754 sctp_free_bufspace(stcb, asoc, tp1, 1);
3755 sctp_m_freem(tp1->data);
3756 tp1->data = NULL;
3757 }
3758 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3759 sctp_log_sack(asoc->last_acked_seq,
3760 cumack,
3761 tp1->rec.data.TSN_seq,
3762 0,
3763 0,
3764 SCTP_LOG_FREE_SENT);
3765 }
3766 asoc->sent_queue_cnt--;
3767 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3768 } else {
3769 break;
3770 }
3771 }
3772
3773 }
3774 #if defined(__Userspace__)
3775 if (stcb->sctp_ep->recv_callback) {
3776 if (stcb->sctp_socket) {
3777 uint32_t inqueue_bytes, sb_free_now;
3778 struct sctp_inpcb *inp;
3779
3780 inp = stcb->sctp_ep;
3781 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
3782 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
3783
3784 /* check if the amount free in the send socket buffer crossed the threshold */
3785 if (inp->send_callback &&
3786 (((inp->send_sb_threshold > 0) &&
3787 (sb_free_now >= inp->send_sb_threshold) &&
3788 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
3789 (inp->send_sb_threshold == 0))) {
3790 atomic_add_int(&stcb->asoc.refcnt, 1);
3791 SCTP_TCB_UNLOCK(stcb);
3792 inp->send_callback(stcb->sctp_socket, sb_free_now);
3793 SCTP_TCB_LOCK(stcb);
3794 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3795 }
3796 }
3797 } else if (stcb->sctp_socket) {
3798 #else
3799 /* sa_ignore NO_NULL_CHK */
3800 if (stcb->sctp_socket) {
3801 #endif
3802 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3803 struct socket *so;
3804
3805 #endif
3806 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3807 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3808 /* sa_ignore NO_NULL_CHK */
3809 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3810 }
3811 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3812 so = SCTP_INP_SO(stcb->sctp_ep);
3813 atomic_add_int(&stcb->asoc.refcnt, 1);
3814 SCTP_TCB_UNLOCK(stcb);
3815 SCTP_SOCKET_LOCK(so, 1);
3816 SCTP_TCB_LOCK(stcb);
3817 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3818 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3819 /* assoc was freed while we were unlocked */
3820 SCTP_SOCKET_UNLOCK(so, 1);
3821 return;
3822 }
3823 #endif
3824 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3825 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3826 SCTP_SOCKET_UNLOCK(so, 1);
3827 #endif
3828 } else {
3829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3830 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3831 }
3832 }
3833
3834 /* JRS - Use the congestion control given in the CC module */
3835 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3836 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3837 if (net->net_ack2 > 0) {
3838 /*
3839 * Karn's rule applies to clearing error count, this
3840 * is optional.
3841 */
3842 net->error_count = 0;
3843 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3844 /* addr came good */
3845 net->dest_state |= SCTP_ADDR_REACHABLE;
3846 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3847 0, (void *)net, SCTP_SO_NOT_LOCKED);
3848 }
3849 if (net == stcb->asoc.primary_destination) {
3850 if (stcb->asoc.alternate) {
3851 /* release the alternate, primary is good */
3852 sctp_free_remote_addr(stcb->asoc.alternate);
3853 stcb->asoc.alternate = NULL;
3854 }
3855 }
3856 if (net->dest_state & SCTP_ADDR_PF) {
3857 net->dest_state &= ~SCTP_ADDR_PF;
3858 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3859 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3860 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3861 /* Done with this net */
3862 net->net_ack = 0;
3863 }
3864 /* restore any doubled timers */
3865 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3866 if (net->RTO < stcb->asoc.minrto) {
3867 net->RTO = stcb->asoc.minrto;
3868 }
3869 if (net->RTO > stcb->asoc.maxrto) {
3870 net->RTO = stcb->asoc.maxrto;
3871 }
3872 }
3873 }
3874 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3875 }
3876 asoc->last_acked_seq = cumack;
3877
3878 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3879 /* nothing left in-flight */
3880 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3881 net->flight_size = 0;
3882 net->partial_bytes_acked = 0;
3883 }
3884 asoc->total_flight = 0;
3885 asoc->total_flight_count = 0;
3886 }
3887
3888 /* RWND update */
3889 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3890 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3891 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3892 /* SWS sender side engages */
3893 asoc->peers_rwnd = 0;
3894 }
3895 if (asoc->peers_rwnd > old_rwnd) {
3896 win_probe_recovery = 1;
3897 }
3898 /* Now assure a timer where data is queued at */
3899 again:
3900 j = 0;
3901 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3902 int to_ticks;
3903 if (win_probe_recovery && (net->window_probe)) {
3904 win_probe_recovered = 1;
3905 /*
3906 * Find first chunk that was used with window probe
3907 * and clear the sent
3908 */
3909 /* sa_ignore FREED_MEMORY */
3910 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3911 if (tp1->window_probe) {
3912 /* move back to data send queue */
3913 sctp_window_probe_recovery(stcb, asoc, tp1);
3914 break;
3915 }
3916 }
3917 }
3918 if (net->RTO == 0) {
3919 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3920 } else {
3921 to_ticks = MSEC_TO_TICKS(net->RTO);
3922 }
3923 if (net->flight_size) {
3924 j++;
3925 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3926 sctp_timeout_handler, &net->rxt_timer);
3927 if (net->window_probe) {
3928 net->window_probe = 0;
3929 }
3930 } else {
3931 if (net->window_probe) {
3932 /* In window probes we must assure a timer is still running there */
3933 net->window_probe = 0;
3934 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3935 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3936 sctp_timeout_handler, &net->rxt_timer);
3937 }
3938 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3939 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3940 stcb, net,
3941 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3942 }
3943 }
3944 }
3945 if ((j == 0) &&
3946 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3947 (asoc->sent_queue_retran_cnt == 0) &&
3948 (win_probe_recovered == 0) &&
3949 (done_once == 0)) {
3950 /* huh, this should not happen unless all packets
3951 * are PR-SCTP and marked to skip of course.
3952 */
3953 if (sctp_fs_audit(asoc)) {
3954 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3955 net->flight_size = 0;
3956 }
3957 asoc->total_flight = 0;
3958 asoc->total_flight_count = 0;
3959 asoc->sent_queue_retran_cnt = 0;
3960 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3961 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3962 sctp_flight_size_increase(tp1);
3963 sctp_total_flight_increase(stcb, tp1);
3964 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3965 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3966 }
3967 }
3968 }
3969 done_once = 1;
3970 goto again;
3971 }
3972 /**********************************/
3973 /* Now what about shutdown issues */
3974 /**********************************/
3975 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3976 /* nothing left on sendqueue.. consider done */
3977 /* clean up */
3978 if ((asoc->stream_queue_cnt == 1) &&
3979 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3980 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3981 (asoc->locked_on_sending)
3982 ) {
3983 struct sctp_stream_queue_pending *sp;
3984 /* I may be in a state where we got
3985 * all across.. but cannot write more due
3986 * to a shutdown... we abort since the
3987 * user did not indicate EOR in this case. The
3988 * sp will be cleaned during free of the asoc.
3989 */
3990 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3991 sctp_streamhead);
3992 if ((sp) && (sp->length == 0)) {
3993 /* Let cleanup code purge it */
3994 if (sp->msg_is_complete) {
3995 asoc->stream_queue_cnt--;
3996 } else {
3997 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3998 asoc->locked_on_sending = NULL;
3999 asoc->stream_queue_cnt--;
4000 }
4001 }
4002 }
4003 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4004 (asoc->stream_queue_cnt == 0)) {
4005 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4006 /* Need to abort here */
4007 struct mbuf *op_err;
4008
4009 abort_out_now:
4010 *abort_now = 1;
4011 /* XXX */
4012 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4013 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4014 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4015 } else {
4016 struct sctp_nets *netp;
4017
4018 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4019 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4020 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4021 }
4022 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4023 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4024 sctp_stop_timers_for_shutdown(stcb);
4025 if (asoc->alternate) {
4026 netp = asoc->alternate;
4027 } else {
4028 netp = asoc->primary_destination;
4029 }
4030 sctp_send_shutdown(stcb, netp);
4031 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4032 stcb->sctp_ep, stcb, netp);
4033 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4034 stcb->sctp_ep, stcb, netp);
4035 }
4036 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4037 (asoc->stream_queue_cnt == 0)) {
4038 struct sctp_nets *netp;
4039
4040 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4041 goto abort_out_now;
4042 }
4043 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4044 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4045 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4046 sctp_stop_timers_for_shutdown(stcb);
4047 if (asoc->alternate) {
4048 netp = asoc->alternate;
4049 } else {
4050 netp = asoc->primary_destination;
4051 }
4052 sctp_send_shutdown_ack(stcb, netp);
4053 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4054 stcb->sctp_ep, stcb, netp);
4055 }
4056 }
4057 /*********************************************/
4058 /* Here we perform PR-SCTP procedures */
4059 /* (section 4.2) */
4060 /*********************************************/
4061 /* C1. update advancedPeerAckPoint */
4062 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4063 asoc->advanced_peer_ack_point = cumack;
4064 }
4065 /* PR-Sctp issues need to be addressed too */
4066 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4067 struct sctp_tmit_chunk *lchk;
4068 uint32_t old_adv_peer_ack_point;
4069
4070 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4071 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4072 /* C3. See if we need to send a Fwd-TSN */
4073 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4074 /*
4075 * ISSUE with ECN, see FWD-TSN processing.
4076 */
4077 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4078 send_forward_tsn(stcb, asoc);
4079 } else if (lchk) {
4080 /* try to FR fwd-tsn's that get lost too */
4081 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4082 send_forward_tsn(stcb, asoc);
4083 }
4084 }
4085 }
4086 if (lchk) {
4087 /* Assure a timer is up */
4088 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4089 stcb->sctp_ep, stcb, lchk->whoTo);
4090 }
4091 }
4092 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4093 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4094 rwnd,
4095 stcb->asoc.peers_rwnd,
4096 stcb->asoc.total_flight,
4097 stcb->asoc.total_output_queue_size);
4098 }
4099 }
4100
4101 void
4102 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4103 struct sctp_tcb *stcb,
4104 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4105 int *abort_now, uint8_t flags,
4106 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4107 {
4108 struct sctp_association *asoc;
4109 struct sctp_tmit_chunk *tp1, *tp2;
4110 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4111 uint16_t wake_him = 0;
4112 uint32_t send_s = 0;
4113 long j;
4114 int accum_moved = 0;
4115 int will_exit_fast_recovery = 0;
4116 uint32_t a_rwnd, old_rwnd;
4117 int win_probe_recovery = 0;
4118 int win_probe_recovered = 0;
4119 struct sctp_nets *net = NULL;
4120 int done_once;
4121 int rto_ok = 1;
4122 uint8_t reneged_all = 0;
4123 uint8_t cmt_dac_flag;
4124 /*
4125 * we take any chance we can to service our queues since we cannot
4126 * get awoken when the socket is read from :<
4127 */
4128 /*
4129 * Now perform the actual SACK handling: 1) Verify that it is not an
4130 * old sack, if so discard. 2) If there is nothing left in the send
4131 * queue (cum-ack is equal to last acked) then you have a duplicate
4132 * too, update any rwnd change and verify no timers are running.
4133 * then return. 3) Process any new consequtive data i.e. cum-ack
4134 * moved process these first and note that it moved. 4) Process any
4135 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4136 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4137 * sync up flightsizes and things, stop all timers and also check
4138 * for shutdown_pending state. If so then go ahead and send off the
4139 * shutdown. If in shutdown recv, send off the shutdown-ack and
4140 * start that timer, Ret. 9) Strike any non-acked things and do FR
4141 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4142 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4143 * if in shutdown_recv state.
4144 */
4145 SCTP_TCB_LOCK_ASSERT(stcb);
4146 /* CMT DAC algo */
4147 this_sack_lowest_newack = 0;
4148 SCTP_STAT_INCR(sctps_slowpath_sack);
4149 last_tsn = cum_ack;
4150 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4151 #ifdef SCTP_ASOCLOG_OF_TSNS
4152 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4153 stcb->asoc.cumack_log_at++;
4154 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4155 stcb->asoc.cumack_log_at = 0;
4156 }
4157 #endif
4158 a_rwnd = rwnd;
4159
4160 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4161 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4162 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4163 }
4164
4165 old_rwnd = stcb->asoc.peers_rwnd;
4166 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4167 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4168 stcb->asoc.overall_error_count,
4169 0,
4170 SCTP_FROM_SCTP_INDATA,
4171 __LINE__);
4172 }
4173 stcb->asoc.overall_error_count = 0;
4174 asoc = &stcb->asoc;
4175 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4176 sctp_log_sack(asoc->last_acked_seq,
4177 cum_ack,
4178 0,
4179 num_seg,
4180 num_dup,
4181 SCTP_LOG_NEW_SACK);
4182 }
4183 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4184 uint16_t i;
4185 uint32_t *dupdata, dblock;
4186
4187 for (i = 0; i < num_dup; i++) {
4188 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4189 sizeof(uint32_t), (uint8_t *)&dblock);
4190 if (dupdata == NULL) {
4191 break;
4192 }
4193 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4194 }
4195 }
4196 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4197 /* reality check */
4198 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4199 tp1 = TAILQ_LAST(&asoc->sent_queue,
4200 sctpchunk_listhead);
4201 send_s = tp1->rec.data.TSN_seq + 1;
4202 } else {
4203 tp1 = NULL;
4204 send_s = asoc->sending_seq;
4205 }
4206 if (SCTP_TSN_GE(cum_ack, send_s)) {
4207 struct mbuf *op_err;
4208 char msg[SCTP_DIAG_INFO_LEN];
4209
4210 /*
4211 * no way, we have not even sent this TSN out yet.
4212 * Peer is hopelessly messed up with us.
4213 */
4214 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4215 cum_ack, send_s);
4216 if (tp1) {
4217 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4218 tp1->rec.data.TSN_seq, (void *)tp1);
4219 }
4220 hopeless_peer:
4221 *abort_now = 1;
4222 /* XXX */
4223 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4224 cum_ack, send_s);
4225 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4226 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4227 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4228 return;
4229 }
4230 }
4231 /**********************/
4232 /* 1) check the range */
4233 /**********************/
4234 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4235 /* acking something behind */
4236 return;
4237 }
4238
4239 /* update the Rwnd of the peer */
4240 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4241 TAILQ_EMPTY(&asoc->send_queue) &&
4242 (asoc->stream_queue_cnt == 0)) {
4243 /* nothing left on send/sent and strmq */
4244 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4245 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4246 asoc->peers_rwnd, 0, 0, a_rwnd);
4247 }
4248 asoc->peers_rwnd = a_rwnd;
4249 if (asoc->sent_queue_retran_cnt) {
4250 asoc->sent_queue_retran_cnt = 0;
4251 }
4252 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4253 /* SWS sender side engages */
4254 asoc->peers_rwnd = 0;
4255 }
4256 /* stop any timers */
4257 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4258 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4259 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4260 net->partial_bytes_acked = 0;
4261 net->flight_size = 0;
4262 }
4263 asoc->total_flight = 0;
4264 asoc->total_flight_count = 0;
4265 return;
4266 }
4267 /*
4268 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4269 * things. The total byte count acked is tracked in netAckSz AND
4270 * netAck2 is used to track the total bytes acked that are un-
4271 * amibguious and were never retransmitted. We track these on a per
4272 * destination address basis.
4273 */
4274 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4275 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4276 /* Drag along the window_tsn for cwr's */
4277 net->cwr_window_tsn = cum_ack;
4278 }
4279 net->prev_cwnd = net->cwnd;
4280 net->net_ack = 0;
4281 net->net_ack2 = 0;
4282
4283 /*
4284 * CMT: Reset CUC and Fast recovery algo variables before
4285 * SACK processing
4286 */
4287 net->new_pseudo_cumack = 0;
4288 net->will_exit_fast_recovery = 0;
4289 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4290 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4291 }
4292 }
4293 /* process the new consecutive TSN first */
4294 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4295 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4296 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4297 accum_moved = 1;
4298 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4299 /*
4300 * If it is less than ACKED, it is
4301 * now no-longer in flight. Higher
4302 * values may occur during marking
4303 */
4304 if ((tp1->whoTo->dest_state &
4305 SCTP_ADDR_UNCONFIRMED) &&
4306 (tp1->snd_count < 2)) {
4307 /*
4308 * If there was no retran
4309 * and the address is
4310 * un-confirmed and we sent
4311 * there and are now
4312 * sacked.. its confirmed,
4313 * mark it so.
4314 */
4315 tp1->whoTo->dest_state &=
4316 ~SCTP_ADDR_UNCONFIRMED;
4317 }
4318 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4320 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4321 tp1->whoTo->flight_size,
4322 tp1->book_size,
4323 (uintptr_t)tp1->whoTo,
4324 tp1->rec.data.TSN_seq);
4325 }
4326 sctp_flight_size_decrease(tp1);
4327 sctp_total_flight_decrease(stcb, tp1);
4328 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4329 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4330 tp1);
4331 }
4332 }
4333 tp1->whoTo->net_ack += tp1->send_size;
4334
4335 /* CMT SFR and DAC algos */
4336 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4337 tp1->whoTo->saw_newack = 1;
4338
4339 if (tp1->snd_count < 2) {
4340 /*
4341 * True non-retransmited
4342 * chunk
4343 */
4344 tp1->whoTo->net_ack2 +=
4345 tp1->send_size;
4346
4347 /* update RTO too? */
4348 if (tp1->do_rtt) {
4349 if (rto_ok) {
4350 tp1->whoTo->RTO =
4351 sctp_calculate_rto(stcb,
4352 asoc, tp1->whoTo,
4353 &tp1->sent_rcv_time,
4354 sctp_align_safe_nocopy,
4355 SCTP_RTT_FROM_DATA);
4356 rto_ok = 0;
4357 }
4358 if (tp1->whoTo->rto_needed == 0) {
4359 tp1->whoTo->rto_needed = 1;
4360 }
4361 tp1->do_rtt = 0;
4362 }
4363 }
4364 /*
4365 * CMT: CUCv2 algorithm. From the
4366 * cumack'd TSNs, for each TSN being
4367 * acked for the first time, set the
4368 * following variables for the
4369 * corresp destination.
4370 * new_pseudo_cumack will trigger a
4371 * cwnd update.
4372 * find_(rtx_)pseudo_cumack will
4373 * trigger search for the next
4374 * expected (rtx-)pseudo-cumack.
4375 */
4376 tp1->whoTo->new_pseudo_cumack = 1;
4377 tp1->whoTo->find_pseudo_cumack = 1;
4378 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4379
4380
4381 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4382 sctp_log_sack(asoc->last_acked_seq,
4383 cum_ack,
4384 tp1->rec.data.TSN_seq,
4385 0,
4386 0,
4387 SCTP_LOG_TSN_ACKED);
4388 }
4389 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4390 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4391 }
4392 }
4393 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4394 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4395 #ifdef SCTP_AUDITING_ENABLED
4396 sctp_audit_log(0xB3,
4397 (asoc->sent_queue_retran_cnt & 0x000000ff));
4398 #endif
4399 }
4400 if (tp1->rec.data.chunk_was_revoked) {
4401 /* deflate the cwnd */
4402 tp1->whoTo->cwnd -= tp1->book_size;
4403 tp1->rec.data.chunk_was_revoked = 0;
4404 }
4405 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4406 tp1->sent = SCTP_DATAGRAM_ACKED;
4407 }
4408 }
4409 } else {
4410 break;
4411 }
4412 }
4413 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4414 /* always set this up to cum-ack */
4415 asoc->this_sack_highest_gap = last_tsn;
4416
4417 if ((num_seg > 0) || (num_nr_seg > 0)) {
4418
4419 /*
4420 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4421 * to be greater than the cumack. Also reset saw_newack to 0
4422 * for all dests.
4423 */
4424 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4425 net->saw_newack = 0;
4426 net->this_sack_highest_newack = last_tsn;
4427 }
4428
4429 /*
4430 * thisSackHighestGap will increase while handling NEW
4431 * segments this_sack_highest_newack will increase while
4432 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4433 * used for CMT DAC algo. saw_newack will also change.
4434 */
4435 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4436 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4437 num_seg, num_nr_seg, &rto_ok)) {
4438 wake_him++;
4439 }
4440 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4441 /*
4442 * validate the biggest_tsn_acked in the gap acks if
4443 * strict adherence is wanted.
4444 */
4445 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4446 /*
4447 * peer is either confused or we are under
4448 * attack. We must abort.
4449 */
4450 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4451 biggest_tsn_acked, send_s);
4452 goto hopeless_peer;
4453 }
4454 }
4455 }
4456 /*******************************************/
4457 /* cancel ALL T3-send timer if accum moved */
4458 /*******************************************/
4459 if (asoc->sctp_cmt_on_off > 0) {
4460 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4461 if (net->new_pseudo_cumack)
4462 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4463 stcb, net,
4464 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4465
4466 }
4467 } else {
4468 if (accum_moved) {
4469 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4470 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4471 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4472 }
4473 }
4474 }
4475 /********************************************/
4476 /* drop the acked chunks from the sentqueue */
4477 /********************************************/
4478 asoc->last_acked_seq = cum_ack;
4479
4480 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4481 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4482 break;
4483 }
4484 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4485 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4486 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4487 #ifdef INVARIANTS
4488 } else {
4489 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4490 #endif
4491 }
4492 }
4493 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4494 if (PR_SCTP_ENABLED(tp1->flags)) {
4495 if (asoc->pr_sctp_cnt != 0)
4496 asoc->pr_sctp_cnt--;
4497 }
4498 asoc->sent_queue_cnt--;
4499 if (tp1->data) {
4500 /* sa_ignore NO_NULL_CHK */
4501 sctp_free_bufspace(stcb, asoc, tp1, 1);
4502 sctp_m_freem(tp1->data);
4503 tp1->data = NULL;
4504 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4505 asoc->sent_queue_cnt_removeable--;
4506 }
4507 }
4508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4509 sctp_log_sack(asoc->last_acked_seq,
4510 cum_ack,
4511 tp1->rec.data.TSN_seq,
4512 0,
4513 0,
4514 SCTP_LOG_FREE_SENT);
4515 }
4516 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4517 wake_him++;
4518 }
4519 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4520 #ifdef INVARIANTS
4521 panic("Warning flight size is postive and should be 0");
4522 #else
4523 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4524 asoc->total_flight);
4525 #endif
4526 asoc->total_flight = 0;
4527 }
4528
4529 #if defined(__Userspace__)
4530 if (stcb->sctp_ep->recv_callback) {
4531 if (stcb->sctp_socket) {
4532 uint32_t inqueue_bytes, sb_free_now;
4533 struct sctp_inpcb *inp;
4534
4535 inp = stcb->sctp_ep;
4536 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4537 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4538
4539 /* check if the amount free in the send socket buffer crossed the threshold */
4540 if (inp->send_callback &&
4541 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4542 (inp->send_sb_threshold == 0))) {
4543 atomic_add_int(&stcb->asoc.refcnt, 1);
4544 SCTP_TCB_UNLOCK(stcb);
4545 inp->send_callback(stcb->sctp_socket, sb_free_now);
4546 SCTP_TCB_LOCK(stcb);
4547 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4548 }
4549 }
4550 } else if ((wake_him) && (stcb->sctp_socket)) {
4551 #else
4552 /* sa_ignore NO_NULL_CHK */
4553 if ((wake_him) && (stcb->sctp_socket)) {
4554 #endif
4555 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4556 struct socket *so;
4557
4558 #endif
4559 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4560 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4561 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4562 }
4563 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4564 so = SCTP_INP_SO(stcb->sctp_ep);
4565 atomic_add_int(&stcb->asoc.refcnt, 1);
4566 SCTP_TCB_UNLOCK(stcb);
4567 SCTP_SOCKET_LOCK(so, 1);
4568 SCTP_TCB_LOCK(stcb);
4569 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4570 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4571 /* assoc was freed while we were unlocked */
4572 SCTP_SOCKET_UNLOCK(so, 1);
4573 return;
4574 }
4575 #endif
4576 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4577 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4578 SCTP_SOCKET_UNLOCK(so, 1);
4579 #endif
4580 } else {
4581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4582 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4583 }
4584 }
4585
4586 if (asoc->fast_retran_loss_recovery && accum_moved) {
4587 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4588 /* Setup so we will exit RFC2582 fast recovery */
4589 will_exit_fast_recovery = 1;
4590 }
4591 }
4592 /*
4593 * Check for revoked fragments:
4594 *
4595 * if Previous sack - Had no frags then we can't have any revoked if
4596 * Previous sack - Had frag's then - If we now have frags aka
4597 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4598 * some of them. else - The peer revoked all ACKED fragments, since
4599 * we had some before and now we have NONE.
4600 */
4601
4602 if (num_seg) {
4603 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4604 asoc->saw_sack_with_frags = 1;
4605 } else if (asoc->saw_sack_with_frags) {
4606 int cnt_revoked = 0;
4607
4608 /* Peer revoked all dg's marked or acked */
4609 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4610 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4611 tp1->sent = SCTP_DATAGRAM_SENT;
4612 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4613 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4614 tp1->whoTo->flight_size,
4615 tp1->book_size,
4616 (uintptr_t)tp1->whoTo,
4617 tp1->rec.data.TSN_seq);
4618 }
4619 sctp_flight_size_increase(tp1);
4620 sctp_total_flight_increase(stcb, tp1);
4621 tp1->rec.data.chunk_was_revoked = 1;
4622 /*
4623 * To ensure that this increase in
4624 * flightsize, which is artificial,
4625 * does not throttle the sender, we
4626 * also increase the cwnd
4627 * artificially.
4628 */
4629 tp1->whoTo->cwnd += tp1->book_size;
4630 cnt_revoked++;
4631 }
4632 }
4633 if (cnt_revoked) {
4634 reneged_all = 1;
4635 }
4636 asoc->saw_sack_with_frags = 0;
4637 }
4638 if (num_nr_seg > 0)
4639 asoc->saw_sack_with_nr_frags = 1;
4640 else
4641 asoc->saw_sack_with_nr_frags = 0;
4642
4643 /* JRS - Use the congestion control given in the CC module */
4644 if (ecne_seen == 0) {
4645 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4646 if (net->net_ack2 > 0) {
4647 /*
4648 * Karn's rule applies to clearing error count, this
4649 * is optional.
4650 */
4651 net->error_count = 0;
4652 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4653 /* addr came good */
4654 net->dest_state |= SCTP_ADDR_REACHABLE;
4655 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4656 0, (void *)net, SCTP_SO_NOT_LOCKED);
4657 }
4658
4659 if (net == stcb->asoc.primary_destination) {
4660 if (stcb->asoc.alternate) {
4661 /* release the alternate, primary is good */
4662 sctp_free_remote_addr(stcb->asoc.alternate);
4663 stcb->asoc.alternate = NULL;
4664 }
4665 }
4666
4667 if (net->dest_state & SCTP_ADDR_PF) {
4668 net->dest_state &= ~SCTP_ADDR_PF;
4669 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4670 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4671 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4672 /* Done with this net */
4673 net->net_ack = 0;
4674 }
4675 /* restore any doubled timers */
4676 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4677 if (net->RTO < stcb->asoc.minrto) {
4678 net->RTO = stcb->asoc.minrto;
4679 }
4680 if (net->RTO > stcb->asoc.maxrto) {
4681 net->RTO = stcb->asoc.maxrto;
4682 }
4683 }
4684 }
4685 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4686 }
4687
4688 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4689 /* nothing left in-flight */
4690 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4691 /* stop all timers */
4692 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4693 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4694 net->flight_size = 0;
4695 net->partial_bytes_acked = 0;
4696 }
4697 asoc->total_flight = 0;
4698 asoc->total_flight_count = 0;
4699 }
4700
4701 /**********************************/
4702 /* Now what about shutdown issues */
4703 /**********************************/
4704 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4705 /* nothing left on sendqueue.. consider done */
4706 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4707 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4708 asoc->peers_rwnd, 0, 0, a_rwnd);
4709 }
4710 asoc->peers_rwnd = a_rwnd;
4711 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4712 /* SWS sender side engages */
4713 asoc->peers_rwnd = 0;
4714 }
4715 /* clean up */
4716 if ((asoc->stream_queue_cnt == 1) &&
4717 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4718 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4719 (asoc->locked_on_sending)
4720 ) {
4721 struct sctp_stream_queue_pending *sp;
4722 /* I may be in a state where we got
4723 * all across.. but cannot write more due
4724 * to a shutdown... we abort since the
4725 * user did not indicate EOR in this case.
4726 */
4727 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4728 sctp_streamhead);
4729 if ((sp) && (sp->length == 0)) {
4730 asoc->locked_on_sending = NULL;
4731 if (sp->msg_is_complete) {
4732 asoc->stream_queue_cnt--;
4733 } else {
4734 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4735 asoc->stream_queue_cnt--;
4736 }
4737 }
4738 }
4739 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4740 (asoc->stream_queue_cnt == 0)) {
4741 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4742 /* Need to abort here */
4743 struct mbuf *op_err;
4744
4745 abort_out_now:
4746 *abort_now = 1;
4747 /* XXX */
4748 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4749 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4750 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4751 return;
4752 } else {
4753 struct sctp_nets *netp;
4754
4755 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4756 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4757 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4758 }
4759 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4760 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4761 sctp_stop_timers_for_shutdown(stcb);
4762 if (asoc->alternate) {
4763 netp = asoc->alternate;
4764 } else {
4765 netp = asoc->primary_destination;
4766 }
4767 sctp_send_shutdown(stcb, netp);
4768 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4769 stcb->sctp_ep, stcb, netp);
4770 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4771 stcb->sctp_ep, stcb, netp);
4772 }
4773 return;
4774 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4775 (asoc->stream_queue_cnt == 0)) {
4776 struct sctp_nets *netp;
4777
4778 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4779 goto abort_out_now;
4780 }
4781 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4782 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4783 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4784 sctp_stop_timers_for_shutdown(stcb);
4785 if (asoc->alternate) {
4786 netp = asoc->alternate;
4787 } else {
4788 netp = asoc->primary_destination;
4789 }
4790 sctp_send_shutdown_ack(stcb, netp);
4791 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4792 stcb->sctp_ep, stcb, netp);
4793 return;
4794 }
4795 }
4796 /*
4797 * Now here we are going to recycle net_ack for a different use...
4798 * HEADS UP.
4799 */
4800 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4801 net->net_ack = 0;
4802 }
4803
4804 /*
4805 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4806 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4807 * automatically ensure that.
4808 */
4809 if ((asoc->sctp_cmt_on_off > 0) &&
4810 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4811 (cmt_dac_flag == 0)) {
4812 this_sack_lowest_newack = cum_ack;
4813 }
4814 if ((num_seg > 0) || (num_nr_seg > 0)) {
4815 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4816 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4817 }
4818 /* JRS - Use the congestion control given in the CC module */
4819 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4820
4821 /* Now are we exiting loss recovery ? */
4822 if (will_exit_fast_recovery) {
4823 /* Ok, we must exit fast recovery */
4824 asoc->fast_retran_loss_recovery = 0;
4825 }
4826 if ((asoc->sat_t3_loss_recovery) &&
4827 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4828 /* end satellite t3 loss recovery */
4829 asoc->sat_t3_loss_recovery = 0;
4830 }
4831 /*
4832 * CMT Fast recovery
4833 */
4834 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4835 if (net->will_exit_fast_recovery) {
4836 /* Ok, we must exit fast recovery */
4837 net->fast_retran_loss_recovery = 0;
4838 }
4839 }
4840
4841 /* Adjust and set the new rwnd value */
4842 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4843 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4844 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4845 }
4846 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4847 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4848 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4849 /* SWS sender side engages */
4850 asoc->peers_rwnd = 0;
4851 }
4852 if (asoc->peers_rwnd > old_rwnd) {
4853 win_probe_recovery = 1;
4854 }
4855
4856 /*
4857 * Now we must setup so we have a timer up for anyone with
4858 * outstanding data.
4859 */
4860 done_once = 0;
4861 again:
4862 j = 0;
4863 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4864 if (win_probe_recovery && (net->window_probe)) {
4865 win_probe_recovered = 1;
4866 /*-
4867 * Find first chunk that was used with
4868 * window probe and clear the event. Put
4869 * it back into the send queue as if has
4870 * not been sent.
4871 */
4872 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4873 if (tp1->window_probe) {
4874 sctp_window_probe_recovery(stcb, asoc, tp1);
4875 break;
4876 }
4877 }
4878 }
4879 if (net->flight_size) {
4880 j++;
4881 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4882 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4883 stcb->sctp_ep, stcb, net);
4884 }
4885 if (net->window_probe) {
4886 net->window_probe = 0;
4887 }
4888 } else {
4889 if (net->window_probe) {
4890 /* In window probes we must assure a timer is still running there */
4891 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4892 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4893 stcb->sctp_ep, stcb, net);
4894
4895 }
4896 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4897 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4898 stcb, net,
4899 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4900 }
4901 }
4902 }
4903 if ((j == 0) &&
4904 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4905 (asoc->sent_queue_retran_cnt == 0) &&
4906 (win_probe_recovered == 0) &&
4907 (done_once == 0)) {
4908 /* huh, this should not happen unless all packets
4909 * are PR-SCTP and marked to skip of course.
4910 */
4911 if (sctp_fs_audit(asoc)) {
4912 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4913 net->flight_size = 0;
4914 }
4915 asoc->total_flight = 0;
4916 asoc->total_flight_count = 0;
4917 asoc->sent_queue_retran_cnt = 0;
4918 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4919 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4920 sctp_flight_size_increase(tp1);
4921 sctp_total_flight_increase(stcb, tp1);
4922 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4923 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4924 }
4925 }
4926 }
4927 done_once = 1;
4928 goto again;
4929 }
4930 /*********************************************/
4931 /* Here we perform PR-SCTP procedures */
4932 /* (section 4.2) */
4933 /*********************************************/
4934 /* C1. update advancedPeerAckPoint */
4935 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4936 asoc->advanced_peer_ack_point = cum_ack;
4937 }
4938 /* C2. try to further move advancedPeerAckPoint ahead */
4939 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4940 struct sctp_tmit_chunk *lchk;
4941 uint32_t old_adv_peer_ack_point;
4942
4943 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4944 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4945 /* C3. See if we need to send a Fwd-TSN */
4946 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4947 /*
4948 * ISSUE with ECN, see FWD-TSN processing.
4949 */
4950 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4951 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4952 0xee, cum_ack, asoc->advanced_peer_ack_point,
4953 old_adv_peer_ack_point);
4954 }
4955 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4956 send_forward_tsn(stcb, asoc);
4957 } else if (lchk) {
4958 /* try to FR fwd-tsn's that get lost too */
4959 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4960 send_forward_tsn(stcb, asoc);
4961 }
4962 }
4963 }
4964 if (lchk) {
4965 /* Assure a timer is up */
4966 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4967 stcb->sctp_ep, stcb, lchk->whoTo);
4968 }
4969 }
4970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4971 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4972 a_rwnd,
4973 stcb->asoc.peers_rwnd,
4974 stcb->asoc.total_flight,
4975 stcb->asoc.total_output_queue_size);
4976 }
4977 }
4978
4979 void
4980 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4981 {
4982 /* Copy cum-ack */
4983 uint32_t cum_ack, a_rwnd;
4984
4985 cum_ack = ntohl(cp->cumulative_tsn_ack);
4986 /* Arrange so a_rwnd does NOT change */
4987 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4988
4989 /* Now call the express sack handling */
4990 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4991 }
4992
4993 static void
4994 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4995 struct sctp_stream_in *strmin)
4996 {
4997 struct sctp_queued_to_read *ctl, *nctl;
4998 struct sctp_association *asoc;
4999 uint16_t tt;
5000
5001 asoc = &stcb->asoc;
5002 tt = strmin->last_sequence_delivered;
5003 /*
5004 * First deliver anything prior to and including the stream no that
5005 * came in
5006 */
5007 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5008 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5009 /* this is deliverable now */
5010 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5011 /* subtract pending on streams */
5012 asoc->size_on_all_streams -= ctl->length;
5013 sctp_ucount_decr(asoc->cnt_on_all_streams);
5014 /* deliver it to at least the delivery-q */
5015 if (stcb->sctp_socket) {
5016 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5017 sctp_add_to_readq(stcb->sctp_ep, stcb,
5018 ctl,
5019 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5020 }
5021 } else {
5022 /* no more delivery now. */
5023 break;
5024 }
5025 }
5026 /*
5027 * now we must deliver things in queue the normal way if any are
5028 * now ready.
5029 */
5030 tt = strmin->last_sequence_delivered + 1;
5031 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5032 if (tt == ctl->sinfo_ssn) {
5033 /* this is deliverable now */
5034 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5035 /* subtract pending on streams */
5036 asoc->size_on_all_streams -= ctl->length;
5037 sctp_ucount_decr(asoc->cnt_on_all_streams);
5038 /* deliver it to at least the delivery-q */
5039 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5040 if (stcb->sctp_socket) {
5041 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5042 sctp_add_to_readq(stcb->sctp_ep, stcb,
5043 ctl,
5044 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5045
5046 }
5047 tt = strmin->last_sequence_delivered + 1;
5048 } else {
5049 break;
5050 }
5051 }
5052 }
5053
5054 static void
5055 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5056 struct sctp_association *asoc,
5057 uint16_t stream, uint16_t seq)
5058 {
5059 struct sctp_tmit_chunk *chk, *nchk;
5060
5061 /* For each one on here see if we need to toss it */
5062 /*
5063 * For now large messages held on the reasmqueue that are
5064 * complete will be tossed too. We could in theory do more
5065 * work to spin through and stop after dumping one msg aka
5066 * seeing the start of a new msg at the head, and call the
5067 * delivery function... to see if it can be delivered... But
5068 * for now we just dump everything on the queue.
5069 */
5070 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5071 /* Do not toss it if on a different stream or
5072 * marked for unordered delivery in which case
5073 * the stream sequence number has no meaning.
5074 */
5075 if ((chk->rec.data.stream_number != stream) ||
5076 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5077 continue;
5078 }
5079 if (chk->rec.data.stream_seq == seq) {
5080 /* It needs to be tossed */
5081 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5082 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5083 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5084 asoc->str_of_pdapi = chk->rec.data.stream_number;
5085 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5086 asoc->fragment_flags = chk->rec.data.rcv_flags;
5087 }
5088 asoc->size_on_reasm_queue -= chk->send_size;
5089 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5090
5091 /* Clear up any stream problem */
5092 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5093 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5094 /*
5095 * We must dump forward this streams
5096 * sequence number if the chunk is
5097 * not unordered that is being
5098 * skipped. There is a chance that
5099 * if the peer does not include the
5100 * last fragment in its FWD-TSN we
5101 * WILL have a problem here since
5102 * you would have a partial chunk in
5103 * queue that may not be
5104 * deliverable. Also if a Partial
5105 * delivery API as started the user
5106 * may get a partial chunk. The next
5107 * read returning a new chunk...
5108 * really ugly but I see no way
5109 * around it! Maybe a notify??
5110 */
5111 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5112 }
5113 if (chk->data) {
5114 sctp_m_freem(chk->data);
5115 chk->data = NULL;
5116 }
5117 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5118 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5119 /* If the stream_seq is > than the purging one, we are done */
5120 break;
5121 }
5122 }
5123 }
5124
5125
5126 void
5127 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5128 struct sctp_forward_tsn_chunk *fwd,
5129 int *abort_flag, struct mbuf *m ,int offset)
5130 {
5131 /* The pr-sctp fwd tsn */
5132 /*
5133 * here we will perform all the data receiver side steps for
5134 * processing FwdTSN, as required in by pr-sctp draft:
5135 *
5136 * Assume we get FwdTSN(x):
5137 *
5138 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5139 * others we have 3) examine and update re-ordering queue on
5140 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5141 * report where we are.
5142 */
5143 struct sctp_association *asoc;
5144 uint32_t new_cum_tsn, gap;
5145 unsigned int i, fwd_sz, m_size;
5146 uint32_t str_seq;
5147 struct sctp_stream_in *strm;
5148 struct sctp_tmit_chunk *chk, *nchk;
5149 struct sctp_queued_to_read *ctl, *sv;
5150
5151 asoc = &stcb->asoc;
5152 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5153 SCTPDBG(SCTP_DEBUG_INDATA1,
5154 "Bad size too small/big fwd-tsn\n");
5155 return;
5156 }
5157 m_size = (stcb->asoc.mapping_array_size << 3);
5158 /*************************************************************/
5159 /* 1. Here we update local cumTSN and shift the bitmap array */
5160 /*************************************************************/
5161 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5162
5163 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5164 /* Already got there ... */
5165 return;
5166 }
5167 /*
5168 * now we know the new TSN is more advanced, let's find the actual
5169 * gap
5170 */
5171 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5172 asoc->cumulative_tsn = new_cum_tsn;
5173 if (gap >= m_size) {
5174 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5175 struct mbuf *op_err;
5176 char msg[SCTP_DIAG_INFO_LEN];
5177
5178 /*
5179 * out of range (of single byte chunks in the rwnd I
5180 * give out). This must be an attacker.
5181 */
5182 *abort_flag = 1;
5183 snprintf(msg, sizeof(msg),
5184 "New cum ack %8.8x too high, highest TSN %8.8x",
5185 new_cum_tsn, asoc->highest_tsn_inside_map);
5186 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5187 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33;
5188 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5189 return;
5190 }
5191 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5192
5193 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5194 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5195 asoc->highest_tsn_inside_map = new_cum_tsn;
5196
5197 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5198 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5199
5200 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5201 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5202 }
5203 } else {
5204 SCTP_TCB_LOCK_ASSERT(stcb);
5205 for (i = 0; i <= gap; i++) {
5206 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5207 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5208 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5209 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5210 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5211 }
5212 }
5213 }
5214 }
5215 /*************************************************************/
5216 /* 2. Clear up re-assembly queue */
5217 /*************************************************************/
5218 /*
5219 * First service it if pd-api is up, just in case we can progress it
5220 * forward
5221 */
5222 if (asoc->fragmented_delivery_inprogress) {
5223 sctp_service_reassembly(stcb, asoc);
5224 }
5225 /* For each one on here see if we need to toss it */
5226 /*
5227 * For now large messages held on the reasmqueue that are
5228 * complete will be tossed too. We could in theory do more
5229 * work to spin through and stop after dumping one msg aka
5230 * seeing the start of a new msg at the head, and call the
5231 * delivery function... to see if it can be delivered... But
5232 * for now we just dump everything on the queue.
5233 */
5234 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5235 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5236 /* It needs to be tossed */
5237 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5238 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5239 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5240 asoc->str_of_pdapi = chk->rec.data.stream_number;
5241 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5242 asoc->fragment_flags = chk->rec.data.rcv_flags;
5243 }
5244 asoc->size_on_reasm_queue -= chk->send_size;
5245 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5246
5247 /* Clear up any stream problem */
5248 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5249 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5250 /*
5251 * We must dump forward this streams
5252 * sequence number if the chunk is
5253 * not unordered that is being
5254 * skipped. There is a chance that
5255 * if the peer does not include the
5256 * last fragment in its FWD-TSN we
5257 * WILL have a problem here since
5258 * you would have a partial chunk in
5259 * queue that may not be
5260 * deliverable. Also if a Partial
5261 * delivery API as started the user
5262 * may get a partial chunk. The next
5263 * read returning a new chunk...
5264 * really ugly but I see no way
5265 * around it! Maybe a notify??
5266 */
5267 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5268 }
5269 if (chk->data) {
5270 sctp_m_freem(chk->data);
5271 chk->data = NULL;
5272 }
5273 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5274 } else {
5275 /*
5276 * Ok we have gone beyond the end of the
5277 * fwd-tsn's mark.
5278 */
5279 break;
5280 }
5281 }
5282 /*******************************************************/
5283 /* 3. Update the PR-stream re-ordering queues and fix */
5284 /* delivery issues as needed. */
5285 /*******************************************************/
5286 fwd_sz -= sizeof(*fwd);
5287 if (m && fwd_sz) {
5288 /* New method. */
5289 unsigned int num_str;
5290 struct sctp_strseq *stseq, strseqbuf;
5291 offset += sizeof(*fwd);
5292
5293 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5294 num_str = fwd_sz / sizeof(struct sctp_strseq);
5295 for (i = 0; i < num_str; i++) {
5296 uint16_t st;
5297 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5298 sizeof(struct sctp_strseq),
5299 (uint8_t *)&strseqbuf);
5300 offset += sizeof(struct sctp_strseq);
5301 if (stseq == NULL) {
5302 break;
5303 }
5304 /* Convert */
5305 st = ntohs(stseq->stream);
5306 stseq->stream = st;
5307 st = ntohs(stseq->sequence);
5308 stseq->sequence = st;
5309
5310 /* now process */
5311
5312 /*
5313 * Ok we now look for the stream/seq on the read queue
5314 * where its not all delivered. If we find it we transmute the
5315 * read entry into a PDI_ABORTED.
5316 */
5317 if (stseq->stream >= asoc->streamincnt) {
5318 /* screwed up streams, stop! */
5319 break;
5320 }
5321 if ((asoc->str_of_pdapi == stseq->stream) &&
5322 (asoc->ssn_of_pdapi == stseq->sequence)) {
5323 /* If this is the one we were partially delivering
5324 * now then we no longer are. Note this will change
5325 * with the reassembly re-write.
5326 */
5327 asoc->fragmented_delivery_inprogress = 0;
5328 }
5329 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5330 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5331 if ((ctl->sinfo_stream == stseq->stream) &&
5332 (ctl->sinfo_ssn == stseq->sequence)) {
5333 str_seq = (stseq->stream << 16) | stseq->sequence;
5334 ctl->end_added = 1;
5335 ctl->pdapi_aborted = 1;
5336 sv = stcb->asoc.control_pdapi;
5337 stcb->asoc.control_pdapi = ctl;
5338 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5339 stcb,
5340 SCTP_PARTIAL_DELIVERY_ABORTED,
5341 (void *)&str_seq,
5342 SCTP_SO_NOT_LOCKED);
5343 stcb->asoc.control_pdapi = sv;
5344 break;
5345 } else if ((ctl->sinfo_stream == stseq->stream) &&
5346 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5347 /* We are past our victim SSN */
5348 break;
5349 }
5350 }
5351 strm = &asoc->strmin[stseq->stream];
5352 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5353 /* Update the sequence number */
5354 strm->last_sequence_delivered = stseq->sequence;
5355 }
5356 /* now kick the stream the new way */
5357 /*sa_ignore NO_NULL_CHK*/
5358 sctp_kick_prsctp_reorder_queue(stcb, strm);
5359 }
5360 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5361 }
5362 /*
5363 * Now slide thing forward.
5364 */
5365 sctp_slide_mapping_arrays(stcb);
5366
5367 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5368 /* now lets kick out and check for more fragmented delivery */
5369 /*sa_ignore NO_NULL_CHK*/
5370 sctp_deliver_reasm_check(stcb, &stcb->asoc);
5371 }
5372 }
5373