1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #ifdef __FreeBSD__
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 361243 2020-05-19 07:23:35Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_input.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_timer.h>
53 #include <netinet/sctp_crc32.h>
54 #if defined(__FreeBSD__)
55 #include <netinet/sctp_kdtrace.h>
56 #endif
57 #if defined(INET) || defined(INET6)
58 #if !defined(__Userspace_os_Windows)
59 #include <netinet/udp.h>
60 #endif
61 #endif
62 #if defined(__FreeBSD__)
63 #include <sys/smp.h>
64 #endif
65
66 #if defined(__APPLE__)
67 #define APPLE_FILE_NO 2
68 #endif
69
70
71 static void
sctp_stop_all_cookie_timers(struct sctp_tcb * stcb)72 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
73 {
74 struct sctp_nets *net;
75
76 /* This now not only stops all cookie timers
77 * it also stops any INIT timers as well. This
78 * will make sure that the timers are stopped in
79 * all collision cases.
80 */
81 SCTP_TCB_LOCK_ASSERT(stcb);
82 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
83 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
84 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
85 stcb->sctp_ep,
86 stcb,
87 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
88 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
89 sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
90 stcb->sctp_ep,
91 stcb,
92 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
93 }
94 }
95 }
96
97 /* INIT handler */
98 static void
sctp_handle_init(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_chunk * cp,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)99 sctp_handle_init(struct mbuf *m, int iphlen, int offset,
100 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
101 struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
102 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
103 #if defined(__FreeBSD__)
104 uint8_t mflowtype, uint32_t mflowid,
105 #endif
106 uint32_t vrf_id, uint16_t port)
107 {
108 struct sctp_init *init;
109 struct mbuf *op_err;
110
111 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
112 (void *)stcb);
113 if (stcb == NULL) {
114 SCTP_INP_RLOCK(inp);
115 }
116 /* validate length */
117 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
118 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
119 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
120 #if defined(__FreeBSD__)
121 mflowtype, mflowid,
122 #endif
123 vrf_id, port);
124 if (stcb)
125 *abort_no_unlock = 1;
126 goto outnow;
127 }
128 /* validate parameters */
129 init = &cp->init;
130 if (init->initiate_tag == 0) {
131 /* protocol error... send abort */
132 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
133 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
134 #if defined(__FreeBSD__)
135 mflowtype, mflowid,
136 #endif
137 vrf_id, port);
138 if (stcb)
139 *abort_no_unlock = 1;
140 goto outnow;
141 }
142 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
143 /* invalid parameter... send abort */
144 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
145 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
146 #if defined(__FreeBSD__)
147 mflowtype, mflowid,
148 #endif
149 vrf_id, port);
150 if (stcb)
151 *abort_no_unlock = 1;
152 goto outnow;
153 }
154 if (init->num_inbound_streams == 0) {
155 /* protocol error... send abort */
156 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
157 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
158 #if defined(__FreeBSD__)
159 mflowtype, mflowid,
160 #endif
161 vrf_id, port);
162 if (stcb)
163 *abort_no_unlock = 1;
164 goto outnow;
165 }
166 if (init->num_outbound_streams == 0) {
167 /* protocol error... send abort */
168 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
169 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
170 #if defined(__FreeBSD__)
171 mflowtype, mflowid,
172 #endif
173 vrf_id, port);
174 if (stcb)
175 *abort_no_unlock = 1;
176 goto outnow;
177 }
178 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
179 offset + ntohs(cp->ch.chunk_length))) {
180 /* auth parameter(s) error... send abort */
181 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
182 "Problem with AUTH parameters");
183 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
184 #if defined(__FreeBSD__)
185 mflowtype, mflowid,
186 #endif
187 vrf_id, port);
188 if (stcb)
189 *abort_no_unlock = 1;
190 goto outnow;
191 }
192 /* We are only accepting if we have a listening socket.*/
193 if ((stcb == NULL) &&
194 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
195 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
196 (!SCTP_IS_LISTENING(inp)))) {
197 /*
198 * FIX ME ?? What about TCP model and we have a
199 * match/restart case? Actually no fix is needed.
200 * the lookup will always find the existing assoc so stcb
201 * would not be NULL. It may be questionable to do this
202 * since we COULD just send back the INIT-ACK and hope that
203 * the app did accept()'s by the time the COOKIE was sent. But
204 * there is a price to pay for COOKIE generation and I don't
205 * want to pay it on the chance that the app will actually do
206 * some accepts(). The App just looses and should NOT be in
207 * this state :-)
208 */
209 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
210 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
211 "No listener");
212 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
213 #if defined(__FreeBSD__)
214 mflowtype, mflowid, inp->fibnum,
215 #endif
216 vrf_id, port);
217 }
218 goto outnow;
219 }
220 if ((stcb != NULL) &&
221 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
222 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
223 sctp_send_shutdown_ack(stcb, NULL);
224 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
225 } else {
226 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
227 sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
228 src, dst, sh, cp,
229 #if defined(__FreeBSD__)
230 mflowtype, mflowid,
231 #endif
232 vrf_id, port);
233 }
234 outnow:
235 if (stcb == NULL) {
236 SCTP_INP_RUNLOCK(inp);
237 }
238 }
239
240 /*
241 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
242 */
243
244 int
sctp_is_there_unsent_data(struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)245 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
246 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
247 SCTP_UNUSED
248 #endif
249 )
250 {
251 int unsent_data;
252 unsigned int i;
253 struct sctp_stream_queue_pending *sp;
254 struct sctp_association *asoc;
255
256 /* This function returns if any stream has true unsent data on it.
257 * Note that as it looks through it will clean up any places that
258 * have old data that has been sent but left at top of stream queue.
259 */
260 asoc = &stcb->asoc;
261 unsent_data = 0;
262 SCTP_TCB_SEND_LOCK(stcb);
263 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
264 /* Check to see if some data queued */
265 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
266 /*sa_ignore FREED_MEMORY*/
267 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
268 if (sp == NULL) {
269 continue;
270 }
271 if ((sp->msg_is_complete) &&
272 (sp->length == 0) &&
273 (sp->sender_all_done)) {
274 /* We are doing differed cleanup. Last
275 * time through when we took all the data
276 * the sender_all_done was not set.
277 */
278 if (sp->put_last_out == 0) {
279 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
280 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
281 sp->sender_all_done,
282 sp->length,
283 sp->msg_is_complete,
284 sp->put_last_out);
285 }
286 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
287 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
288 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
289 if (sp->net) {
290 sctp_free_remote_addr(sp->net);
291 sp->net = NULL;
292 }
293 if (sp->data) {
294 sctp_m_freem(sp->data);
295 sp->data = NULL;
296 }
297 sctp_free_a_strmoq(stcb, sp, so_locked);
298 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
299 unsent_data++;
300 }
301 } else {
302 unsent_data++;
303 }
304 if (unsent_data > 0) {
305 break;
306 }
307 }
308 }
309 SCTP_TCB_SEND_UNLOCK(stcb);
310 return (unsent_data);
311 }
312
313 static int
sctp_process_init(struct sctp_init_chunk * cp,struct sctp_tcb * stcb)314 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
315 {
316 struct sctp_init *init;
317 struct sctp_association *asoc;
318 struct sctp_nets *lnet;
319 unsigned int i;
320
321 init = &cp->init;
322 asoc = &stcb->asoc;
323 /* save off parameters */
324 asoc->peer_vtag = ntohl(init->initiate_tag);
325 asoc->peers_rwnd = ntohl(init->a_rwnd);
326 /* init tsn's */
327 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
328
329 if (!TAILQ_EMPTY(&asoc->nets)) {
330 /* update any ssthresh's that may have a default */
331 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
332 lnet->ssthresh = asoc->peers_rwnd;
333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
334 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
335 }
336
337 }
338 }
339 SCTP_TCB_SEND_LOCK(stcb);
340 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
341 unsigned int newcnt;
342 struct sctp_stream_out *outs;
343 struct sctp_stream_queue_pending *sp, *nsp;
344 struct sctp_tmit_chunk *chk, *nchk;
345
346 /* abandon the upper streams */
347 newcnt = ntohs(init->num_inbound_streams);
348 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
349 if (chk->rec.data.sid >= newcnt) {
350 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
351 asoc->send_queue_cnt--;
352 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
353 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
354 #ifdef INVARIANTS
355 } else {
356 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
357 #endif
358 }
359 if (chk->data != NULL) {
360 sctp_free_bufspace(stcb, asoc, chk, 1);
361 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
362 0, chk, SCTP_SO_NOT_LOCKED);
363 if (chk->data) {
364 sctp_m_freem(chk->data);
365 chk->data = NULL;
366 }
367 }
368 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
369 /*sa_ignore FREED_MEMORY*/
370 }
371 }
372 if (asoc->strmout) {
373 for (i = newcnt; i < asoc->pre_open_streams; i++) {
374 outs = &asoc->strmout[i];
375 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
376 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
377 TAILQ_REMOVE(&outs->outqueue, sp, next);
378 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
379 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
380 stcb, 0, sp, SCTP_SO_NOT_LOCKED);
381 if (sp->data) {
382 sctp_m_freem(sp->data);
383 sp->data = NULL;
384 }
385 if (sp->net) {
386 sctp_free_remote_addr(sp->net);
387 sp->net = NULL;
388 }
389 /* Free the chunk */
390 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
391 /*sa_ignore FREED_MEMORY*/
392 }
393 outs->state = SCTP_STREAM_CLOSED;
394 }
395 }
396 /* cut back the count */
397 asoc->pre_open_streams = newcnt;
398 }
399 SCTP_TCB_SEND_UNLOCK(stcb);
400 asoc->streamoutcnt = asoc->pre_open_streams;
401 if (asoc->strmout) {
402 for (i = 0; i < asoc->streamoutcnt; i++) {
403 asoc->strmout[i].state = SCTP_STREAM_OPEN;
404 }
405 }
406 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
407 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
409 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
410 }
411 /* This is the next one we expect */
412 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
413
414 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
415 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
416
417 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
418 /* open the requested streams */
419
420 if (asoc->strmin != NULL) {
421 /* Free the old ones */
422 for (i = 0; i < asoc->streamincnt; i++) {
423 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
424 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
425 }
426 SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
427 }
428 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
429 asoc->streamincnt = ntohs(init->num_outbound_streams);
430 } else {
431 asoc->streamincnt = asoc->max_inbound_streams;
432 }
433 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
434 sizeof(struct sctp_stream_in), SCTP_M_STRMI);
435 if (asoc->strmin == NULL) {
436 /* we didn't get memory for the streams! */
437 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
438 return (-1);
439 }
440 for (i = 0; i < asoc->streamincnt; i++) {
441 asoc->strmin[i].sid = i;
442 asoc->strmin[i].last_mid_delivered = 0xffffffff;
443 TAILQ_INIT(&asoc->strmin[i].inqueue);
444 TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
445 asoc->strmin[i].pd_api_started = 0;
446 asoc->strmin[i].delivery_started = 0;
447 }
448 /*
449 * load_address_from_init will put the addresses into the
450 * association when the COOKIE is processed or the INIT-ACK is
451 * processed. Both types of COOKIE's existing and new call this
452 * routine. It will remove addresses that are no longer in the
453 * association (for the restarting case where addresses are
454 * removed). Up front when the INIT arrives we will discard it if it
455 * is a restart and new addresses have been added.
456 */
457 /* sa_ignore MEMLEAK */
458 return (0);
459 }
460
461 /*
462 * INIT-ACK message processing/consumption returns value < 0 on error
463 */
464 static int
sctp_process_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id)465 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
466 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
467 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
468 struct sctp_nets *net, int *abort_no_unlock,
469 #if defined(__FreeBSD__)
470 uint8_t mflowtype, uint32_t mflowid,
471 #endif
472 uint32_t vrf_id)
473 {
474 struct sctp_association *asoc;
475 struct mbuf *op_err;
476 int retval, abort_flag, cookie_found;
477 int initack_limit;
478 int nat_friendly = 0;
479
480 /* First verify that we have no illegal param's */
481 abort_flag = 0;
482 cookie_found = 0;
483
484 op_err = sctp_arethere_unrecognized_parameters(m,
485 (offset + sizeof(struct sctp_init_chunk)),
486 &abort_flag, (struct sctp_chunkhdr *)cp,
487 &nat_friendly, &cookie_found);
488 if (abort_flag) {
489 /* Send an abort and notify peer */
490 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
491 *abort_no_unlock = 1;
492 return (-1);
493 }
494 if (!cookie_found) {
495 uint16_t len;
496
497 /* Only report the missing cookie parameter */
498 if (op_err != NULL) {
499 sctp_m_freem(op_err);
500 }
501 len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
502 /* We abort with an error of missing mandatory param */
503 op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
504 if (op_err != NULL) {
505 struct sctp_error_missing_param *cause;
506
507 SCTP_BUF_LEN(op_err) = len;
508 cause = mtod(op_err, struct sctp_error_missing_param *);
509 /* Subtract the reserved param */
510 cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
511 cause->cause.length = htons(len);
512 cause->num_missing_params = htonl(1);
513 cause->type[0] = htons(SCTP_STATE_COOKIE);
514 }
515 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
516 src, dst, sh, op_err,
517 #if defined(__FreeBSD__)
518 mflowtype, mflowid,
519 #endif
520 vrf_id, net->port);
521 *abort_no_unlock = 1;
522 return (-3);
523 }
524 asoc = &stcb->asoc;
525 asoc->peer_supports_nat = (uint8_t)nat_friendly;
526 /* process the peer's parameters in the INIT-ACK */
527 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
528 if (retval < 0) {
529 if (op_err != NULL) {
530 sctp_m_freem(op_err);
531 }
532 return (retval);
533 }
534 initack_limit = offset + ntohs(cp->ch.chunk_length);
535 /* load all addresses */
536 if ((retval = sctp_load_addresses_from_init(stcb, m,
537 (offset + sizeof(struct sctp_init_chunk)), initack_limit,
538 src, dst, NULL, stcb->asoc.port))) {
539 if (op_err != NULL) {
540 sctp_m_freem(op_err);
541 }
542 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
543 "Problem with address parameters");
544 SCTPDBG(SCTP_DEBUG_INPUT1,
545 "Load addresses from INIT causes an abort %d\n",
546 retval);
547 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
548 src, dst, sh, op_err,
549 #if defined(__FreeBSD__)
550 mflowtype, mflowid,
551 #endif
552 vrf_id, net->port);
553 *abort_no_unlock = 1;
554 return (-1);
555 }
556 /* if the peer doesn't support asconf, flush the asconf queue */
557 if (asoc->asconf_supported == 0) {
558 struct sctp_asconf_addr *param, *nparam;
559
560 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
561 TAILQ_REMOVE(&asoc->asconf_queue, param, next);
562 SCTP_FREE(param, SCTP_M_ASC_ADDR);
563 }
564 }
565
566 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
567 stcb->asoc.local_hmacs);
568 if (op_err) {
569 sctp_queue_op_err(stcb, op_err);
570 /* queuing will steal away the mbuf chain to the out queue */
571 op_err = NULL;
572 }
573 /* extract the cookie and queue it to "echo" it back... */
574 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
575 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
576 stcb->asoc.overall_error_count,
577 0,
578 SCTP_FROM_SCTP_INPUT,
579 __LINE__);
580 }
581 stcb->asoc.overall_error_count = 0;
582 net->error_count = 0;
583
584 /*
585 * Cancel the INIT timer, We do this first before queueing the
586 * cookie. We always cancel at the primary to assue that we are
587 * canceling the timer started by the INIT which always goes to the
588 * primary.
589 */
590 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
591 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
592
593 /* calculate the RTO */
594 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
595 SCTP_RTT_FROM_NON_DATA);
596 #if defined(__Userspace__)
597 if (stcb->sctp_ep->recv_callback) {
598 if (stcb->sctp_socket) {
599 uint32_t inqueue_bytes, sb_free_now;
600 struct sctp_inpcb *inp;
601
602 inp = stcb->sctp_ep;
603 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
604 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
605
606 /* check if the amount free in the send socket buffer crossed the threshold */
607 if (inp->send_callback &&
608 (((inp->send_sb_threshold > 0) &&
609 (sb_free_now >= inp->send_sb_threshold) &&
610 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
611 (inp->send_sb_threshold == 0))) {
612 atomic_add_int(&stcb->asoc.refcnt, 1);
613 SCTP_TCB_UNLOCK(stcb);
614 inp->send_callback(stcb->sctp_socket, sb_free_now);
615 SCTP_TCB_LOCK(stcb);
616 atomic_subtract_int(&stcb->asoc.refcnt, 1);
617 }
618 }
619 }
620 #endif
621 retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
622 return (retval);
623 }
624
625 static void
sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net)626 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
627 struct sctp_tcb *stcb, struct sctp_nets *net)
628 {
629 union sctp_sockstore store;
630 struct sctp_nets *r_net, *f_net;
631 struct timeval tv;
632 int req_prim = 0;
633 uint16_t old_error_counter;
634
635 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
636 /* Invalid length */
637 return;
638 }
639
640 memset(&store, 0, sizeof(store));
641 switch (cp->heartbeat.hb_info.addr_family) {
642 #ifdef INET
643 case AF_INET:
644 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
645 store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
646 #ifdef HAVE_SIN_LEN
647 store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
648 #endif
649 store.sin.sin_port = stcb->rport;
650 memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
651 sizeof(store.sin.sin_addr));
652 } else {
653 return;
654 }
655 break;
656 #endif
657 #ifdef INET6
658 case AF_INET6:
659 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
660 store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
661 #ifdef HAVE_SIN6_LEN
662 store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
663 #endif
664 store.sin6.sin6_port = stcb->rport;
665 memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
666 } else {
667 return;
668 }
669 break;
670 #endif
671 #if defined(__Userspace__)
672 case AF_CONN:
673 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
674 store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
675 #ifdef HAVE_SCONN_LEN
676 store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
677 #endif
678 store.sconn.sconn_port = stcb->rport;
679 memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
680 } else {
681 return;
682 }
683 break;
684 #endif
685 default:
686 return;
687 }
688 r_net = sctp_findnet(stcb, &store.sa);
689 if (r_net == NULL) {
690 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
691 return;
692 }
693 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
694 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
695 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
696 /*
697 * If the its a HB and it's random value is correct when can
698 * confirm the destination.
699 */
700 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
701 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
702 stcb->asoc.primary_destination = r_net;
703 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
704 f_net = TAILQ_FIRST(&stcb->asoc.nets);
705 if (f_net != r_net) {
706 /* first one on the list is NOT the primary
707 * sctp_cmpaddr() is much more efficient if
708 * the primary is the first on the list, make it
709 * so.
710 */
711 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
712 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
713 }
714 req_prim = 1;
715 }
716 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
717 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
718 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
719 r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
720 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
721 }
722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
723 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
724 stcb->asoc.overall_error_count,
725 0,
726 SCTP_FROM_SCTP_INPUT,
727 __LINE__);
728 }
729 stcb->asoc.overall_error_count = 0;
730 old_error_counter = r_net->error_count;
731 r_net->error_count = 0;
732 r_net->hb_responded = 1;
733 tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
734 tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
735 /* Now lets do a RTO with this */
736 sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
737 SCTP_RTT_FROM_NON_DATA);
738 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
739 r_net->dest_state |= SCTP_ADDR_REACHABLE;
740 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
741 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
742 }
743 if (r_net->dest_state & SCTP_ADDR_PF) {
744 r_net->dest_state &= ~SCTP_ADDR_PF;
745 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
746 }
747 if (old_error_counter > 0) {
748 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
749 stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
750 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
751 }
752 if (r_net == stcb->asoc.primary_destination) {
753 if (stcb->asoc.alternate) {
754 /* release the alternate, primary is good */
755 sctp_free_remote_addr(stcb->asoc.alternate);
756 stcb->asoc.alternate = NULL;
757 }
758 }
759 /* Mobility adaptation */
760 if (req_prim) {
761 if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
762 SCTP_MOBILITY_BASE) ||
763 sctp_is_mobility_feature_on(stcb->sctp_ep,
764 SCTP_MOBILITY_FASTHANDOFF)) &&
765 sctp_is_mobility_feature_on(stcb->sctp_ep,
766 SCTP_MOBILITY_PRIM_DELETED)) {
767
768 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
769 stcb->sctp_ep, stcb, NULL,
770 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
771 if (sctp_is_mobility_feature_on(stcb->sctp_ep,
772 SCTP_MOBILITY_FASTHANDOFF)) {
773 sctp_assoc_immediate_retrans(stcb,
774 stcb->asoc.primary_destination);
775 }
776 if (sctp_is_mobility_feature_on(stcb->sctp_ep,
777 SCTP_MOBILITY_BASE)) {
778 sctp_move_chunks_from_net(stcb,
779 stcb->asoc.deleted_primary);
780 }
781 sctp_delete_prim_timer(stcb->sctp_ep, stcb);
782 }
783 }
784 }
785
786 static int
sctp_handle_nat_colliding_state(struct sctp_tcb * stcb)787 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
788 {
789 /*
790 * Return 0 means we want you to proceed with the abort
791 * non-zero means no abort processing.
792 */
793 uint32_t new_vtag;
794 struct sctpasochead *head;
795
796 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
797 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
798 new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
799 atomic_add_int(&stcb->asoc.refcnt, 1);
800 SCTP_TCB_UNLOCK(stcb);
801 SCTP_INP_INFO_WLOCK();
802 SCTP_TCB_LOCK(stcb);
803 atomic_subtract_int(&stcb->asoc.refcnt, 1);
804 } else {
805 return (0);
806 }
807 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
808 /* generate a new vtag and send init */
809 LIST_REMOVE(stcb, sctp_asocs);
810 stcb->asoc.my_vtag = new_vtag;
811 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
812 /* put it in the bucket in the vtag hash of assoc's for the system */
813 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
814 SCTP_INP_INFO_WUNLOCK();
815 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
816 return (1);
817 } else {
818 /* treat like a case where the cookie expired i.e.:
819 * - dump current cookie.
820 * - generate a new vtag.
821 * - resend init.
822 */
823 /* generate a new vtag and send init */
824 LIST_REMOVE(stcb, sctp_asocs);
825 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
826 sctp_stop_all_cookie_timers(stcb);
827 sctp_toss_old_cookies(stcb, &stcb->asoc);
828 stcb->asoc.my_vtag = new_vtag;
829 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
830 /* put it in the bucket in the vtag hash of assoc's for the system */
831 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
832 SCTP_INP_INFO_WUNLOCK();
833 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
834 return (1);
835 }
836 return (0);
837 }
838
839 static int
sctp_handle_nat_missing_state(struct sctp_tcb * stcb,struct sctp_nets * net)840 sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
841 struct sctp_nets *net)
842 {
843 /* return 0 means we want you to proceed with the abort
844 * non-zero means no abort processing
845 */
846 if (stcb->asoc.auth_supported == 0) {
847 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
848 return (0);
849 }
850 sctp_asconf_send_nat_state_update(stcb, net);
851 return (1);
852 }
853
854
855 /* Returns 1 if the stcb was aborted, 0 otherwise */
856 static int
sctp_handle_abort(struct sctp_abort_chunk * abort,struct sctp_tcb * stcb,struct sctp_nets * net)857 sctp_handle_abort(struct sctp_abort_chunk *abort,
858 struct sctp_tcb *stcb, struct sctp_nets *net)
859 {
860 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
861 struct socket *so;
862 #endif
863 uint16_t len;
864 uint16_t error;
865
866 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
867 if (stcb == NULL)
868 return (0);
869
870 len = ntohs(abort->ch.chunk_length);
871 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
872 /* Need to check the cause codes for our
873 * two magic nat aborts which don't kill the assoc
874 * necessarily.
875 */
876 struct sctp_error_cause *cause;
877
878 cause = (struct sctp_error_cause *)(abort + 1);
879 error = ntohs(cause->code);
880 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
881 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
882 abort->ch.chunk_flags);
883 if (sctp_handle_nat_colliding_state(stcb)) {
884 return (0);
885 }
886 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
887 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
888 abort->ch.chunk_flags);
889 if (sctp_handle_nat_missing_state(stcb, net)) {
890 return (0);
891 }
892 }
893 } else {
894 error = 0;
895 }
896 /* stop any receive timers */
897 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
898 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
899 /* notify user of the abort and clean up... */
900 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
901 /* free the tcb */
902 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
903 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
904 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
905 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
906 }
907 #ifdef SCTP_ASOCLOG_OF_TSNS
908 sctp_print_out_track_log(stcb);
909 #endif
910 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
911 so = SCTP_INP_SO(stcb->sctp_ep);
912 atomic_add_int(&stcb->asoc.refcnt, 1);
913 SCTP_TCB_UNLOCK(stcb);
914 SCTP_SOCKET_LOCK(so, 1);
915 SCTP_TCB_LOCK(stcb);
916 atomic_subtract_int(&stcb->asoc.refcnt, 1);
917 #endif
918 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
919 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
920 SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
921 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
922 SCTP_SOCKET_UNLOCK(so, 1);
923 #endif
924 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
925 return (1);
926 }
927
928 static void
sctp_start_net_timers(struct sctp_tcb * stcb)929 sctp_start_net_timers(struct sctp_tcb *stcb)
930 {
931 uint32_t cnt_hb_sent;
932 struct sctp_nets *net;
933
934 cnt_hb_sent = 0;
935 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
936 /* For each network start:
937 * 1) A pmtu timer.
938 * 2) A HB timer
939 * 3) If the dest in unconfirmed send
940 * a hb as well if under max_hb_burst have
941 * been sent.
942 */
943 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
944 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
945 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
946 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
947 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
948 cnt_hb_sent++;
949 }
950 }
951 if (cnt_hb_sent) {
952 sctp_chunk_output(stcb->sctp_ep, stcb,
953 SCTP_OUTPUT_FROM_COOKIE_ACK,
954 SCTP_SO_NOT_LOCKED);
955 }
956 }
957
958
959 static void
sctp_handle_shutdown(struct sctp_shutdown_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_flag)960 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
961 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
962 {
963 struct sctp_association *asoc;
964 int some_on_streamwheel;
965 int old_state;
966 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
967 struct socket *so;
968 #endif
969
970 SCTPDBG(SCTP_DEBUG_INPUT2,
971 "sctp_handle_shutdown: handling SHUTDOWN\n");
972 if (stcb == NULL)
973 return;
974 asoc = &stcb->asoc;
975 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
976 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
977 return;
978 }
979 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
980 /* Shutdown NOT the expected size */
981 return;
982 }
983 old_state = SCTP_GET_STATE(stcb);
984 sctp_update_acked(stcb, cp, abort_flag);
985 if (*abort_flag) {
986 return;
987 }
988 if (asoc->control_pdapi) {
989 /* With a normal shutdown
990 * we assume the end of last record.
991 */
992 SCTP_INP_READ_LOCK(stcb->sctp_ep);
993 if (asoc->control_pdapi->on_strm_q) {
994 struct sctp_stream_in *strm;
995
996 strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
997 if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
998 /* Unordered */
999 TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
1000 asoc->control_pdapi->on_strm_q = 0;
1001 } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
1002 /* Ordered */
1003 TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
1004 asoc->control_pdapi->on_strm_q = 0;
1005 #ifdef INVARIANTS
1006 } else {
1007 panic("Unknown state on ctrl:%p on_strm_q:%d",
1008 asoc->control_pdapi,
1009 asoc->control_pdapi->on_strm_q);
1010 #endif
1011 }
1012 }
1013 asoc->control_pdapi->end_added = 1;
1014 asoc->control_pdapi->pdapi_aborted = 1;
1015 asoc->control_pdapi = NULL;
1016 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1017 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1018 so = SCTP_INP_SO(stcb->sctp_ep);
1019 atomic_add_int(&stcb->asoc.refcnt, 1);
1020 SCTP_TCB_UNLOCK(stcb);
1021 SCTP_SOCKET_LOCK(so, 1);
1022 SCTP_TCB_LOCK(stcb);
1023 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1024 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1025 /* assoc was freed while we were unlocked */
1026 SCTP_SOCKET_UNLOCK(so, 1);
1027 return;
1028 }
1029 #endif
1030 if (stcb->sctp_socket) {
1031 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1032 }
1033 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1034 SCTP_SOCKET_UNLOCK(so, 1);
1035 #endif
1036 }
1037 /* goto SHUTDOWN_RECEIVED state to block new requests */
1038 if (stcb->sctp_socket) {
1039 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1040 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
1041 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
1042 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
1043 /* notify upper layer that peer has initiated a shutdown */
1044 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1045
1046 /* reset time */
1047 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
1048 }
1049 }
1050 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
1051 /*
1052 * stop the shutdown timer, since we WILL move to
1053 * SHUTDOWN-ACK-SENT.
1054 */
1055 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
1056 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
1057 }
1058 /* Now is there unsent data on a stream somewhere? */
1059 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
1060
1061 if (!TAILQ_EMPTY(&asoc->send_queue) ||
1062 !TAILQ_EMPTY(&asoc->sent_queue) ||
1063 some_on_streamwheel) {
1064 /* By returning we will push more data out */
1065 return;
1066 } else {
1067 /* no outstanding data to send, so move on... */
1068 /* send SHUTDOWN-ACK */
1069 /* move to SHUTDOWN-ACK-SENT state */
1070 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
1071 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1072 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1073 }
1074 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
1075 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
1076 sctp_stop_timers_for_shutdown(stcb);
1077 sctp_send_shutdown_ack(stcb, net);
1078 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
1079 stcb->sctp_ep, stcb, net);
1080 } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1081 sctp_send_shutdown_ack(stcb, net);
1082 }
1083 }
1084 }
1085
1086 static void
sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk * cp SCTP_UNUSED,struct sctp_tcb * stcb,struct sctp_nets * net)1087 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
1088 struct sctp_tcb *stcb,
1089 struct sctp_nets *net)
1090 {
1091 struct sctp_association *asoc;
1092 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1093 struct socket *so;
1094
1095 so = SCTP_INP_SO(stcb->sctp_ep);
1096 #endif
1097 SCTPDBG(SCTP_DEBUG_INPUT2,
1098 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
1099 if (stcb == NULL)
1100 return;
1101
1102 asoc = &stcb->asoc;
1103 /* process according to association state */
1104 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1105 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1106 /* unexpected SHUTDOWN-ACK... do OOTB handling... */
1107 sctp_send_shutdown_complete(stcb, net, 1);
1108 SCTP_TCB_UNLOCK(stcb);
1109 return;
1110 }
1111 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
1112 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1113 /* unexpected SHUTDOWN-ACK... so ignore... */
1114 SCTP_TCB_UNLOCK(stcb);
1115 return;
1116 }
1117 if (asoc->control_pdapi) {
1118 /* With a normal shutdown
1119 * we assume the end of last record.
1120 */
1121 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1122 asoc->control_pdapi->end_added = 1;
1123 asoc->control_pdapi->pdapi_aborted = 1;
1124 asoc->control_pdapi = NULL;
1125 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1126 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1127 atomic_add_int(&stcb->asoc.refcnt, 1);
1128 SCTP_TCB_UNLOCK(stcb);
1129 SCTP_SOCKET_LOCK(so, 1);
1130 SCTP_TCB_LOCK(stcb);
1131 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1132 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1133 /* assoc was freed while we were unlocked */
1134 SCTP_SOCKET_UNLOCK(so, 1);
1135 return;
1136 }
1137 #endif
1138 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1139 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1140 SCTP_SOCKET_UNLOCK(so, 1);
1141 #endif
1142 }
1143 #ifdef INVARIANTS
1144 if (!TAILQ_EMPTY(&asoc->send_queue) ||
1145 !TAILQ_EMPTY(&asoc->sent_queue) ||
1146 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
1147 panic("Queues are not empty when handling SHUTDOWN-ACK");
1148 }
1149 #endif
1150 /* stop the timer */
1151 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
1152 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1153 /* send SHUTDOWN-COMPLETE */
1154 sctp_send_shutdown_complete(stcb, net, 0);
1155 /* notify upper layer protocol */
1156 if (stcb->sctp_socket) {
1157 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1158 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1159 stcb->sctp_socket->so_snd.sb_cc = 0;
1160 }
1161 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1162 }
1163 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1164 /* free the TCB but first save off the ep */
1165 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1166 atomic_add_int(&stcb->asoc.refcnt, 1);
1167 SCTP_TCB_UNLOCK(stcb);
1168 SCTP_SOCKET_LOCK(so, 1);
1169 SCTP_TCB_LOCK(stcb);
1170 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1171 #endif
1172 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1173 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1174 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1175 SCTP_SOCKET_UNLOCK(so, 1);
1176 #endif
1177 }
1178
1179 static void
sctp_process_unrecog_chunk(struct sctp_tcb * stcb,uint8_t chunk_type)1180 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
1181 {
1182 switch (chunk_type) {
1183 case SCTP_ASCONF_ACK:
1184 case SCTP_ASCONF:
1185 sctp_asconf_cleanup(stcb);
1186 break;
1187 case SCTP_IFORWARD_CUM_TSN:
1188 case SCTP_FORWARD_CUM_TSN:
1189 stcb->asoc.prsctp_supported = 0;
1190 break;
1191 default:
1192 SCTPDBG(SCTP_DEBUG_INPUT2,
1193 "Peer does not support chunk type %d (0x%x).\n",
1194 chunk_type, chunk_type);
1195 break;
1196 }
1197 }
1198
1199 /*
1200 * Skip past the param header and then we will find the param that caused the
1201 * problem. There are a number of param's in a ASCONF OR the prsctp param
1202 * these will turn of specific features.
1203 * XXX: Is this the right thing to do?
1204 */
1205 static void
sctp_process_unrecog_param(struct sctp_tcb * stcb,uint16_t parameter_type)1206 sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
1207 {
1208 switch (parameter_type) {
1209 /* pr-sctp draft */
1210 case SCTP_PRSCTP_SUPPORTED:
1211 stcb->asoc.prsctp_supported = 0;
1212 break;
1213 case SCTP_SUPPORTED_CHUNK_EXT:
1214 break;
1215 /* draft-ietf-tsvwg-addip-sctp */
1216 case SCTP_HAS_NAT_SUPPORT:
1217 stcb->asoc.peer_supports_nat = 0;
1218 break;
1219 case SCTP_ADD_IP_ADDRESS:
1220 case SCTP_DEL_IP_ADDRESS:
1221 case SCTP_SET_PRIM_ADDR:
1222 stcb->asoc.asconf_supported = 0;
1223 break;
1224 case SCTP_SUCCESS_REPORT:
1225 case SCTP_ERROR_CAUSE_IND:
1226 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1227 SCTPDBG(SCTP_DEBUG_INPUT2,
1228 "Turning off ASCONF to this strange peer\n");
1229 stcb->asoc.asconf_supported = 0;
1230 break;
1231 default:
1232 SCTPDBG(SCTP_DEBUG_INPUT2,
1233 "Peer does not support param type %d (0x%x)??\n",
1234 parameter_type, parameter_type);
1235 break;
1236 }
1237 }
1238
1239 static int
sctp_handle_error(struct sctp_chunkhdr * ch,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t limit)1240 sctp_handle_error(struct sctp_chunkhdr *ch,
1241 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
1242 {
1243 struct sctp_error_cause *cause;
1244 struct sctp_association *asoc;
1245 uint32_t remaining_length, adjust;
1246 uint16_t code, cause_code, cause_length;
1247 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1248 struct socket *so;
1249 #endif
1250
1251 /* parse through all of the errors and process */
1252 asoc = &stcb->asoc;
1253 cause = (struct sctp_error_cause *)((caddr_t)ch +
1254 sizeof(struct sctp_chunkhdr));
1255 remaining_length = ntohs(ch->chunk_length);
1256 if (remaining_length > limit) {
1257 remaining_length = limit;
1258 }
1259 if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
1260 remaining_length -= sizeof(struct sctp_chunkhdr);
1261 } else {
1262 remaining_length = 0;
1263 }
1264 code = 0;
1265 while (remaining_length >= sizeof(struct sctp_error_cause)) {
1266 /* Process an Error Cause */
1267 cause_code = ntohs(cause->code);
1268 cause_length = ntohs(cause->length);
1269 if ((cause_length > remaining_length) || (cause_length == 0)) {
1270 /* Invalid cause length, possibly due to truncation. */
1271 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
1272 remaining_length, cause_length);
1273 return (0);
1274 }
1275 if (code == 0) {
1276 /* report the first error cause */
1277 code = cause_code;
1278 }
1279 switch (cause_code) {
1280 case SCTP_CAUSE_INVALID_STREAM:
1281 case SCTP_CAUSE_MISSING_PARAM:
1282 case SCTP_CAUSE_INVALID_PARAM:
1283 case SCTP_CAUSE_NO_USER_DATA:
1284 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
1285 cause_code);
1286 break;
1287 case SCTP_CAUSE_NAT_COLLIDING_STATE:
1288 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags: %x\n",
1289 ch->chunk_flags);
1290 if (sctp_handle_nat_colliding_state(stcb)) {
1291 return (0);
1292 }
1293 break;
1294 case SCTP_CAUSE_NAT_MISSING_STATE:
1295 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags: %x\n",
1296 ch->chunk_flags);
1297 if (sctp_handle_nat_missing_state(stcb, net)) {
1298 return (0);
1299 }
1300 break;
1301 case SCTP_CAUSE_STALE_COOKIE:
1302 /*
1303 * We only act if we have echoed a cookie and are
1304 * waiting.
1305 */
1306 if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
1307 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1308 struct sctp_error_stale_cookie *stale_cookie;
1309
1310 stale_cookie = (struct sctp_error_stale_cookie *)cause;
1311 asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time);
1312 /* Double it to be more robust on RTX */
1313 if (asoc->cookie_preserve_req <= UINT32_MAX / 2) {
1314 asoc->cookie_preserve_req *= 2;
1315 } else {
1316 asoc->cookie_preserve_req = UINT32_MAX;
1317 }
1318 asoc->stale_cookie_count++;
1319 if (asoc->stale_cookie_count >
1320 asoc->max_init_times) {
1321 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1322 /* now free the asoc */
1323 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1324 so = SCTP_INP_SO(stcb->sctp_ep);
1325 atomic_add_int(&stcb->asoc.refcnt, 1);
1326 SCTP_TCB_UNLOCK(stcb);
1327 SCTP_SOCKET_LOCK(so, 1);
1328 SCTP_TCB_LOCK(stcb);
1329 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1330 #endif
1331 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1332 SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1333 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1334 SCTP_SOCKET_UNLOCK(so, 1);
1335 #endif
1336 return (-1);
1337 }
1338 /* blast back to INIT state */
1339 sctp_toss_old_cookies(stcb, &stcb->asoc);
1340 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
1341 sctp_stop_all_cookie_timers(stcb);
1342 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1343 }
1344 break;
1345 case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1346 /*
1347 * Nothing we can do here, we don't do hostname
1348 * addresses so if the peer does not like my IPv6
1349 * (or IPv4 for that matter) it does not matter. If
1350 * they don't support that type of address, they can
1351 * NOT possibly get that packet type... i.e. with no
1352 * IPv6 you can't receive a IPv6 packet. so we can
1353 * safely ignore this one. If we ever added support
1354 * for HOSTNAME Addresses, then we would need to do
1355 * something here.
1356 */
1357 break;
1358 case SCTP_CAUSE_UNRECOG_CHUNK:
1359 if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
1360 struct sctp_error_unrecognized_chunk *unrec_chunk;
1361
1362 unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
1363 sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
1364 }
1365 break;
1366 case SCTP_CAUSE_UNRECOG_PARAM:
1367 /* XXX: We only consider the first parameter */
1368 if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
1369 struct sctp_paramhdr *unrec_parameter;
1370
1371 unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
1372 sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
1373 }
1374 break;
1375 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1376 /*
1377 * We ignore this since the timer will drive out a
1378 * new cookie anyway and there timer will drive us
1379 * to send a SHUTDOWN_COMPLETE. We can't send one
1380 * here since we don't have their tag.
1381 */
1382 break;
1383 case SCTP_CAUSE_DELETING_LAST_ADDR:
1384 case SCTP_CAUSE_RESOURCE_SHORTAGE:
1385 case SCTP_CAUSE_DELETING_SRC_ADDR:
1386 /*
1387 * We should NOT get these here, but in a
1388 * ASCONF-ACK.
1389 */
1390 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
1391 cause_code);
1392 break;
1393 case SCTP_CAUSE_OUT_OF_RESC:
1394 /*
1395 * And what, pray tell do we do with the fact that
1396 * the peer is out of resources? Not really sure we
1397 * could do anything but abort. I suspect this
1398 * should have came WITH an abort instead of in a
1399 * OP-ERROR.
1400 */
1401 break;
1402 default:
1403 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
1404 cause_code);
1405 break;
1406 }
1407 adjust = SCTP_SIZE32(cause_length);
1408 if (remaining_length >= adjust) {
1409 remaining_length -= adjust;
1410 } else {
1411 remaining_length = 0;
1412 }
1413 cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
1414 }
1415 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
1416 return (0);
1417 }
1418
1419 static int
sctp_handle_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id)1420 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1421 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1422 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1423 struct sctp_nets *net, int *abort_no_unlock,
1424 #if defined(__FreeBSD__)
1425 uint8_t mflowtype, uint32_t mflowid,
1426 #endif
1427 uint32_t vrf_id)
1428 {
1429 struct sctp_init_ack *init_ack;
1430 struct mbuf *op_err;
1431
1432 SCTPDBG(SCTP_DEBUG_INPUT2,
1433 "sctp_handle_init_ack: handling INIT-ACK\n");
1434
1435 if (stcb == NULL) {
1436 SCTPDBG(SCTP_DEBUG_INPUT2,
1437 "sctp_handle_init_ack: TCB is null\n");
1438 return (-1);
1439 }
1440 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1441 /* Invalid length */
1442 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1443 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1444 src, dst, sh, op_err,
1445 #if defined(__FreeBSD__)
1446 mflowtype, mflowid,
1447 #endif
1448 vrf_id, net->port);
1449 *abort_no_unlock = 1;
1450 return (-1);
1451 }
1452 init_ack = &cp->init;
1453 /* validate parameters */
1454 if (init_ack->initiate_tag == 0) {
1455 /* protocol error... send an abort */
1456 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1457 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1458 src, dst, sh, op_err,
1459 #if defined(__FreeBSD__)
1460 mflowtype, mflowid,
1461 #endif
1462 vrf_id, net->port);
1463 *abort_no_unlock = 1;
1464 return (-1);
1465 }
1466 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1467 /* protocol error... send an abort */
1468 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1469 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1470 src, dst, sh, op_err,
1471 #if defined(__FreeBSD__)
1472 mflowtype, mflowid,
1473 #endif
1474 vrf_id, net->port);
1475 *abort_no_unlock = 1;
1476 return (-1);
1477 }
1478 if (init_ack->num_inbound_streams == 0) {
1479 /* protocol error... send an abort */
1480 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1481 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1482 src, dst, sh, op_err,
1483 #if defined(__FreeBSD__)
1484 mflowtype, mflowid,
1485 #endif
1486 vrf_id, net->port);
1487 *abort_no_unlock = 1;
1488 return (-1);
1489 }
1490 if (init_ack->num_outbound_streams == 0) {
1491 /* protocol error... send an abort */
1492 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1493 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1494 src, dst, sh, op_err,
1495 #if defined(__FreeBSD__)
1496 mflowtype, mflowid,
1497 #endif
1498 vrf_id, net->port);
1499 *abort_no_unlock = 1;
1500 return (-1);
1501 }
1502 /* process according to association state... */
1503 switch (SCTP_GET_STATE(stcb)) {
1504 case SCTP_STATE_COOKIE_WAIT:
1505 /* this is the expected state for this chunk */
1506 /* process the INIT-ACK parameters */
1507 if (stcb->asoc.primary_destination->dest_state &
1508 SCTP_ADDR_UNCONFIRMED) {
1509 /*
1510 * The primary is where we sent the INIT, we can
1511 * always consider it confirmed when the INIT-ACK is
1512 * returned. Do this before we load addresses
1513 * though.
1514 */
1515 stcb->asoc.primary_destination->dest_state &=
1516 ~SCTP_ADDR_UNCONFIRMED;
1517 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1518 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1519 }
1520 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1521 net, abort_no_unlock,
1522 #if defined(__FreeBSD__)
1523 mflowtype, mflowid,
1524 #endif
1525 vrf_id) < 0) {
1526 /* error in parsing parameters */
1527 return (-1);
1528 }
1529 /* update our state */
1530 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1531 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
1532
1533 /* reset the RTO calc */
1534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1535 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1536 stcb->asoc.overall_error_count,
1537 0,
1538 SCTP_FROM_SCTP_INPUT,
1539 __LINE__);
1540 }
1541 stcb->asoc.overall_error_count = 0;
1542 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1543 /*
1544 * collapse the init timer back in case of a exponential
1545 * backoff
1546 */
1547 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1548 stcb, net);
1549 /*
1550 * the send at the end of the inbound data processing will
1551 * cause the cookie to be sent
1552 */
1553 break;
1554 case SCTP_STATE_SHUTDOWN_SENT:
1555 /* incorrect state... discard */
1556 break;
1557 case SCTP_STATE_COOKIE_ECHOED:
1558 /* incorrect state... discard */
1559 break;
1560 case SCTP_STATE_OPEN:
1561 /* incorrect state... discard */
1562 break;
1563 case SCTP_STATE_EMPTY:
1564 case SCTP_STATE_INUSE:
1565 default:
1566 /* incorrect state... discard */
1567 return (-1);
1568 break;
1569 }
1570 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1571 return (0);
1572 }
1573
1574 static struct sctp_tcb *
1575 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1576 struct sockaddr *src, struct sockaddr *dst,
1577 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1578 struct sctp_inpcb *inp, struct sctp_nets **netp,
1579 struct sockaddr *init_src, int *notification,
1580 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1581 #if defined(__FreeBSD__)
1582 uint8_t mflowtype, uint32_t mflowid,
1583 #endif
1584 uint32_t vrf_id, uint16_t port);
1585
1586
1587 /*
1588 * handle a state cookie for an existing association m: input packet mbuf
1589 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1590 * "split" mbuf and the cookie signature does not exist offset: offset into
1591 * mbuf to the cookie-echo chunk
1592 */
1593 static struct sctp_tcb *
sctp_process_cookie_existing(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)1594 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1595 struct sockaddr *src, struct sockaddr *dst,
1596 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1597 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1598 struct sockaddr *init_src, int *notification,
1599 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1600 #if defined(__FreeBSD__)
1601 uint8_t mflowtype, uint32_t mflowid,
1602 #endif
1603 uint32_t vrf_id, uint16_t port)
1604 {
1605 struct sctp_association *asoc;
1606 struct sctp_init_chunk *init_cp, init_buf;
1607 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1608 struct sctp_nets *net;
1609 struct mbuf *op_err;
1610 struct timeval old;
1611 int init_offset, initack_offset, i;
1612 int retval;
1613 int spec_flag = 0;
1614 uint32_t how_indx;
1615 #if defined(SCTP_DETAILED_STR_STATS)
1616 int j;
1617 #endif
1618
1619 net = *netp;
1620 /* I know that the TCB is non-NULL from the caller */
1621 asoc = &stcb->asoc;
1622 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1623 if (asoc->cookie_how[how_indx] == 0)
1624 break;
1625 }
1626 if (how_indx < sizeof(asoc->cookie_how)) {
1627 asoc->cookie_how[how_indx] = 1;
1628 }
1629 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1630 /* SHUTDOWN came in after sending INIT-ACK */
1631 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1632 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1633 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1634 #if defined(__FreeBSD__)
1635 mflowtype, mflowid, inp->fibnum,
1636 #endif
1637 vrf_id, net->port);
1638 if (how_indx < sizeof(asoc->cookie_how))
1639 asoc->cookie_how[how_indx] = 2;
1640 return (NULL);
1641 }
1642 /*
1643 * find and validate the INIT chunk in the cookie (peer's info) the
1644 * INIT should start after the cookie-echo header struct (chunk
1645 * header, state cookie header struct)
1646 */
1647 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1648
1649 init_cp = (struct sctp_init_chunk *)
1650 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1651 (uint8_t *) & init_buf);
1652 if (init_cp == NULL) {
1653 /* could not pull a INIT chunk in cookie */
1654 return (NULL);
1655 }
1656 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1657 return (NULL);
1658 }
1659 /*
1660 * find and validate the INIT-ACK chunk in the cookie (my info) the
1661 * INIT-ACK follows the INIT chunk
1662 */
1663 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1664 initack_cp = (struct sctp_init_ack_chunk *)
1665 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1666 (uint8_t *) & initack_buf);
1667 if (initack_cp == NULL) {
1668 /* could not pull INIT-ACK chunk in cookie */
1669 return (NULL);
1670 }
1671 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1672 return (NULL);
1673 }
1674 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1675 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1676 /*
1677 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1678 * to get into the OPEN state
1679 */
1680 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1681 /*-
1682 * Opps, this means that we somehow generated two vtag's
1683 * the same. I.e. we did:
1684 * Us Peer
1685 * <---INIT(tag=a)------
1686 * ----INIT-ACK(tag=t)-->
1687 * ----INIT(tag=t)------> *1
1688 * <---INIT-ACK(tag=a)---
1689 * <----CE(tag=t)------------- *2
1690 *
1691 * At point *1 we should be generating a different
1692 * tag t'. Which means we would throw away the CE and send
1693 * ours instead. Basically this is case C (throw away side).
1694 */
1695 if (how_indx < sizeof(asoc->cookie_how))
1696 asoc->cookie_how[how_indx] = 17;
1697 return (NULL);
1698
1699 }
1700 switch (SCTP_GET_STATE(stcb)) {
1701 case SCTP_STATE_COOKIE_WAIT:
1702 case SCTP_STATE_COOKIE_ECHOED:
1703 /*
1704 * INIT was sent but got a COOKIE_ECHO with the
1705 * correct tags... just accept it...but we must
1706 * process the init so that we can make sure we
1707 * have the right seq no's.
1708 */
1709 /* First we must process the INIT !! */
1710 retval = sctp_process_init(init_cp, stcb);
1711 if (retval < 0) {
1712 if (how_indx < sizeof(asoc->cookie_how))
1713 asoc->cookie_how[how_indx] = 3;
1714 return (NULL);
1715 }
1716 /* we have already processed the INIT so no problem */
1717 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
1718 stcb, net,
1719 SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1720 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
1721 stcb, net,
1722 SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1723 /* update current state */
1724 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1725 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1726 else
1727 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1728
1729 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1730 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1731 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1732 stcb->sctp_ep, stcb, NULL);
1733 }
1734 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1735 sctp_stop_all_cookie_timers(stcb);
1736 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1737 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1738 (!SCTP_IS_LISTENING(inp))) {
1739 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1740 struct socket *so;
1741 #endif
1742 /*
1743 * Here is where collision would go if we
1744 * did a connect() and instead got a
1745 * init/init-ack/cookie done before the
1746 * init-ack came back..
1747 */
1748 stcb->sctp_ep->sctp_flags |=
1749 SCTP_PCB_FLAGS_CONNECTED;
1750 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1751 so = SCTP_INP_SO(stcb->sctp_ep);
1752 atomic_add_int(&stcb->asoc.refcnt, 1);
1753 SCTP_TCB_UNLOCK(stcb);
1754 SCTP_SOCKET_LOCK(so, 1);
1755 SCTP_TCB_LOCK(stcb);
1756 atomic_add_int(&stcb->asoc.refcnt, -1);
1757 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1758 SCTP_SOCKET_UNLOCK(so, 1);
1759 return (NULL);
1760 }
1761 #endif
1762 soisconnected(stcb->sctp_socket);
1763 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1764 SCTP_SOCKET_UNLOCK(so, 1);
1765 #endif
1766 }
1767 /* notify upper layer */
1768 *notification = SCTP_NOTIFY_ASSOC_UP;
1769 /*
1770 * since we did not send a HB make sure we
1771 * don't double things
1772 */
1773 old.tv_sec = cookie->time_entered.tv_sec;
1774 old.tv_usec = cookie->time_entered.tv_usec;
1775 net->hb_responded = 1;
1776 sctp_calculate_rto(stcb, asoc, net, &old,
1777 SCTP_RTT_FROM_NON_DATA);
1778
1779 if (stcb->asoc.sctp_autoclose_ticks &&
1780 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1781 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1782 inp, stcb, NULL);
1783 }
1784 break;
1785 default:
1786 /*
1787 * we're in the OPEN state (or beyond), so
1788 * peer must have simply lost the COOKIE-ACK
1789 */
1790 break;
1791 } /* end switch */
1792 sctp_stop_all_cookie_timers(stcb);
1793 /*
1794 * We ignore the return code here.. not sure if we should
1795 * somehow abort.. but we do have an existing asoc. This
1796 * really should not fail.
1797 */
1798 if (sctp_load_addresses_from_init(stcb, m,
1799 init_offset + sizeof(struct sctp_init_chunk),
1800 initack_offset, src, dst, init_src, stcb->asoc.port)) {
1801 if (how_indx < sizeof(asoc->cookie_how))
1802 asoc->cookie_how[how_indx] = 4;
1803 return (NULL);
1804 }
1805 /* respond with a COOKIE-ACK */
1806 sctp_toss_old_cookies(stcb, asoc);
1807 sctp_send_cookie_ack(stcb);
1808 if (how_indx < sizeof(asoc->cookie_how))
1809 asoc->cookie_how[how_indx] = 5;
1810 return (stcb);
1811 }
1812
1813 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1814 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1815 cookie->tie_tag_my_vtag == 0 &&
1816 cookie->tie_tag_peer_vtag == 0) {
1817 /*
1818 * case C in Section 5.2.4 Table 2: XMOO silently discard
1819 */
1820 if (how_indx < sizeof(asoc->cookie_how))
1821 asoc->cookie_how[how_indx] = 6;
1822 return (NULL);
1823 }
1824 /* If nat support, and the below and stcb is established,
1825 * send back a ABORT(colliding state) if we are established.
1826 */
1827 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
1828 (asoc->peer_supports_nat) &&
1829 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1830 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1831 (asoc->peer_vtag == 0)))) {
1832 /* Special case - Peer's support nat. We may have
1833 * two init's that we gave out the same tag on since
1834 * one was not established.. i.e. we get INIT from host-1
1835 * behind the nat and we respond tag-a, we get a INIT from
1836 * host-2 behind the nat and we get tag-a again. Then we
1837 * bring up host-1 (or 2's) assoc, Then comes the cookie
1838 * from hsot-2 (or 1). Now we have colliding state. We must
1839 * send an abort here with colliding state indication.
1840 */
1841 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1842 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1843 #if defined(__FreeBSD__)
1844 mflowtype, mflowid, inp->fibnum,
1845 #endif
1846 vrf_id, port);
1847 return (NULL);
1848 }
1849 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1850 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1851 (asoc->peer_vtag == 0))) {
1852 /*
1853 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1854 * should be ok, re-accept peer info
1855 */
1856 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1857 /* Extension of case C.
1858 * If we hit this, then the random number
1859 * generator returned the same vtag when we
1860 * first sent our INIT-ACK and when we later sent
1861 * our INIT. The side with the seq numbers that are
1862 * different will be the one that normnally would
1863 * have hit case C. This in effect "extends" our vtags
1864 * in this collision case to be 64 bits. The same collision
1865 * could occur aka you get both vtag and seq number the
1866 * same twice in a row.. but is much less likely. If it
1867 * did happen then we would proceed through and bring
1868 * up the assoc.. we may end up with the wrong stream
1869 * setup however.. which would be bad.. but there is
1870 * no way to tell.. until we send on a stream that does
1871 * not exist :-)
1872 */
1873 if (how_indx < sizeof(asoc->cookie_how))
1874 asoc->cookie_how[how_indx] = 7;
1875
1876 return (NULL);
1877 }
1878 if (how_indx < sizeof(asoc->cookie_how))
1879 asoc->cookie_how[how_indx] = 8;
1880 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1881 SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1882 sctp_stop_all_cookie_timers(stcb);
1883 /*
1884 * since we did not send a HB make sure we don't double
1885 * things
1886 */
1887 net->hb_responded = 1;
1888 if (stcb->asoc.sctp_autoclose_ticks &&
1889 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1890 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1891 NULL);
1892 }
1893 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1894 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1895
1896 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1897 /* Ok the peer probably discarded our
1898 * data (if we echoed a cookie+data). So anything
1899 * on the sent_queue should be marked for
1900 * retransmit, we may not get something to
1901 * kick us so it COULD still take a timeout
1902 * to move these.. but it can't hurt to mark them.
1903 */
1904 struct sctp_tmit_chunk *chk;
1905 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1906 if (chk->sent < SCTP_DATAGRAM_RESEND) {
1907 chk->sent = SCTP_DATAGRAM_RESEND;
1908 sctp_flight_size_decrease(chk);
1909 sctp_total_flight_decrease(stcb, chk);
1910 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1911 spec_flag++;
1912 }
1913 }
1914
1915 }
1916 /* process the INIT info (peer's info) */
1917 retval = sctp_process_init(init_cp, stcb);
1918 if (retval < 0) {
1919 if (how_indx < sizeof(asoc->cookie_how))
1920 asoc->cookie_how[how_indx] = 9;
1921 return (NULL);
1922 }
1923 if (sctp_load_addresses_from_init(stcb, m,
1924 init_offset + sizeof(struct sctp_init_chunk),
1925 initack_offset, src, dst, init_src, stcb->asoc.port)) {
1926 if (how_indx < sizeof(asoc->cookie_how))
1927 asoc->cookie_how[how_indx] = 10;
1928 return (NULL);
1929 }
1930 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1931 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1932 *notification = SCTP_NOTIFY_ASSOC_UP;
1933
1934 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1935 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1936 (!SCTP_IS_LISTENING(inp))) {
1937 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1938 struct socket *so;
1939 #endif
1940 stcb->sctp_ep->sctp_flags |=
1941 SCTP_PCB_FLAGS_CONNECTED;
1942 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1943 so = SCTP_INP_SO(stcb->sctp_ep);
1944 atomic_add_int(&stcb->asoc.refcnt, 1);
1945 SCTP_TCB_UNLOCK(stcb);
1946 SCTP_SOCKET_LOCK(so, 1);
1947 SCTP_TCB_LOCK(stcb);
1948 atomic_add_int(&stcb->asoc.refcnt, -1);
1949 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1950 SCTP_SOCKET_UNLOCK(so, 1);
1951 return (NULL);
1952 }
1953 #endif
1954 soisconnected(stcb->sctp_socket);
1955 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1956 SCTP_SOCKET_UNLOCK(so, 1);
1957 #endif
1958 }
1959 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1960 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1961 else
1962 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1963 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1964 } else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1965 SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1966 } else {
1967 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1968 }
1969 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1970 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1971 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1972 stcb->sctp_ep, stcb, NULL);
1973 }
1974 sctp_stop_all_cookie_timers(stcb);
1975 sctp_toss_old_cookies(stcb, asoc);
1976 sctp_send_cookie_ack(stcb);
1977 if (spec_flag) {
1978 /* only if we have retrans set do we do this. What
1979 * this call does is get only the COOKIE-ACK out
1980 * and then when we return the normal call to
1981 * sctp_chunk_output will get the retrans out
1982 * behind this.
1983 */
1984 sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1985 }
1986 if (how_indx < sizeof(asoc->cookie_how))
1987 asoc->cookie_how[how_indx] = 11;
1988
1989 return (stcb);
1990 }
1991 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1992 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1993 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1994 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1995 cookie->tie_tag_peer_vtag != 0) {
1996 struct sctpasochead *head;
1997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1998 struct socket *so;
1999 #endif
2000
2001 if (asoc->peer_supports_nat) {
2002 /* This is a gross gross hack.
2003 * Just call the cookie_new code since we
2004 * are allowing a duplicate association.
2005 * I hope this works...
2006 */
2007 return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
2008 sh, cookie, cookie_len,
2009 inp, netp, init_src,notification,
2010 auth_skipped, auth_offset, auth_len,
2011 #if defined(__FreeBSD__)
2012 mflowtype, mflowid,
2013 #endif
2014 vrf_id, port));
2015 }
2016 /*
2017 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
2018 */
2019 /* temp code */
2020 if (how_indx < sizeof(asoc->cookie_how))
2021 asoc->cookie_how[how_indx] = 12;
2022 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
2023 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2024 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
2025 SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2026
2027 /* notify upper layer */
2028 *notification = SCTP_NOTIFY_ASSOC_RESTART;
2029 atomic_add_int(&stcb->asoc.refcnt, 1);
2030 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
2031 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
2032 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
2033 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2034 }
2035 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
2036 SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
2037 } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
2038 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
2039 }
2040 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2041 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2042 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2043 stcb->sctp_ep, stcb, NULL);
2044
2045 } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
2046 /* move to OPEN state, if not in SHUTDOWN_SENT */
2047 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2048 }
2049 asoc->pre_open_streams =
2050 ntohs(initack_cp->init.num_outbound_streams);
2051 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2052 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2053 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2054
2055 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2056
2057 asoc->str_reset_seq_in = asoc->init_seq_number;
2058
2059 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2060 if (asoc->mapping_array) {
2061 memset(asoc->mapping_array, 0,
2062 asoc->mapping_array_size);
2063 }
2064 if (asoc->nr_mapping_array) {
2065 memset(asoc->nr_mapping_array, 0,
2066 asoc->mapping_array_size);
2067 }
2068 SCTP_TCB_UNLOCK(stcb);
2069 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2070 so = SCTP_INP_SO(stcb->sctp_ep);
2071 SCTP_SOCKET_LOCK(so, 1);
2072 #endif
2073 SCTP_INP_INFO_WLOCK();
2074 SCTP_INP_WLOCK(stcb->sctp_ep);
2075 SCTP_TCB_LOCK(stcb);
2076 atomic_add_int(&stcb->asoc.refcnt, -1);
2077 /* send up all the data */
2078 SCTP_TCB_SEND_LOCK(stcb);
2079
2080 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
2081 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2082 stcb->asoc.strmout[i].chunks_on_queues = 0;
2083 #if defined(SCTP_DETAILED_STR_STATS)
2084 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
2085 asoc->strmout[i].abandoned_sent[j] = 0;
2086 asoc->strmout[i].abandoned_unsent[j] = 0;
2087 }
2088 #else
2089 asoc->strmout[i].abandoned_sent[0] = 0;
2090 asoc->strmout[i].abandoned_unsent[0] = 0;
2091 #endif
2092 stcb->asoc.strmout[i].sid = i;
2093 stcb->asoc.strmout[i].next_mid_ordered = 0;
2094 stcb->asoc.strmout[i].next_mid_unordered = 0;
2095 stcb->asoc.strmout[i].last_msg_incomplete = 0;
2096 }
2097 /* process the INIT-ACK info (my info) */
2098 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2099 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2100
2101 /* pull from vtag hash */
2102 LIST_REMOVE(stcb, sctp_asocs);
2103 /* re-insert to new vtag position */
2104 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
2105 SCTP_BASE_INFO(hashasocmark))];
2106 /*
2107 * put it in the bucket in the vtag hash of assoc's for the
2108 * system
2109 */
2110 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2111
2112 SCTP_TCB_SEND_UNLOCK(stcb);
2113 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2114 SCTP_INP_INFO_WUNLOCK();
2115 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2116 SCTP_SOCKET_UNLOCK(so, 1);
2117 #endif
2118 asoc->total_flight = 0;
2119 asoc->total_flight_count = 0;
2120 /* process the INIT info (peer's info) */
2121 retval = sctp_process_init(init_cp, stcb);
2122 if (retval < 0) {
2123 if (how_indx < sizeof(asoc->cookie_how))
2124 asoc->cookie_how[how_indx] = 13;
2125
2126 return (NULL);
2127 }
2128 /*
2129 * since we did not send a HB make sure we don't double
2130 * things
2131 */
2132 net->hb_responded = 1;
2133
2134 if (sctp_load_addresses_from_init(stcb, m,
2135 init_offset + sizeof(struct sctp_init_chunk),
2136 initack_offset, src, dst, init_src, stcb->asoc.port)) {
2137 if (how_indx < sizeof(asoc->cookie_how))
2138 asoc->cookie_how[how_indx] = 14;
2139
2140 return (NULL);
2141 }
2142 /* respond with a COOKIE-ACK */
2143 sctp_stop_all_cookie_timers(stcb);
2144 sctp_toss_old_cookies(stcb, asoc);
2145 sctp_send_cookie_ack(stcb);
2146 if (how_indx < sizeof(asoc->cookie_how))
2147 asoc->cookie_how[how_indx] = 15;
2148
2149 return (stcb);
2150 }
2151 if (how_indx < sizeof(asoc->cookie_how))
2152 asoc->cookie_how[how_indx] = 16;
2153 /* all other cases... */
2154 return (NULL);
2155 }
2156
2157
2158 /*
2159 * handle a state cookie for a new association m: input packet mbuf chain--
2160 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2161 * and the cookie signature does not exist offset: offset into mbuf to the
2162 * cookie-echo chunk length: length of the cookie chunk to: where the init
2163 * was from returns a new TCB
2164 */
2165 static struct sctp_tcb *
sctp_process_cookie_new(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)2166 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2167 struct sockaddr *src, struct sockaddr *dst,
2168 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2169 struct sctp_inpcb *inp, struct sctp_nets **netp,
2170 struct sockaddr *init_src, int *notification,
2171 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2172 #if defined(__FreeBSD__)
2173 uint8_t mflowtype, uint32_t mflowid,
2174 #endif
2175 uint32_t vrf_id, uint16_t port)
2176 {
2177 struct sctp_tcb *stcb;
2178 struct sctp_init_chunk *init_cp, init_buf;
2179 struct sctp_init_ack_chunk *initack_cp, initack_buf;
2180 union sctp_sockstore store;
2181 struct sctp_association *asoc;
2182 int init_offset, initack_offset, initack_limit;
2183 int retval;
2184 int error = 0;
2185 uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
2186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2187 struct socket *so;
2188
2189 so = SCTP_INP_SO(inp);
2190 #endif
2191
2192 /*
2193 * find and validate the INIT chunk in the cookie (peer's info) the
2194 * INIT should start after the cookie-echo header struct (chunk
2195 * header, state cookie header struct)
2196 */
2197 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2198 init_cp = (struct sctp_init_chunk *)
2199 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2200 (uint8_t *) & init_buf);
2201 if (init_cp == NULL) {
2202 /* could not pull a INIT chunk in cookie */
2203 SCTPDBG(SCTP_DEBUG_INPUT1,
2204 "process_cookie_new: could not pull INIT chunk hdr\n");
2205 return (NULL);
2206 }
2207 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2208 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2209 return (NULL);
2210 }
2211 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2212 /*
2213 * find and validate the INIT-ACK chunk in the cookie (my info) the
2214 * INIT-ACK follows the INIT chunk
2215 */
2216 initack_cp = (struct sctp_init_ack_chunk *)
2217 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2218 (uint8_t *) & initack_buf);
2219 if (initack_cp == NULL) {
2220 /* could not pull INIT-ACK chunk in cookie */
2221 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2222 return (NULL);
2223 }
2224 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2225 return (NULL);
2226 }
2227 /*
2228 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2229 * "initack_limit" value. This is because the chk_length field
2230 * includes the length of the cookie, but the cookie is omitted when
2231 * the INIT and INIT_ACK are tacked onto the cookie...
2232 */
2233 initack_limit = offset + cookie_len;
2234
2235 /*
2236 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2237 * and popluate
2238 */
2239
2240 /*
2241 * Here we do a trick, we set in NULL for the proc/thread argument. We
2242 * do this since in effect we only use the p argument when
2243 * the socket is unbound and we must do an implicit bind.
2244 * Since we are getting a cookie, we cannot be unbound.
2245 */
2246 stcb = sctp_aloc_assoc(inp, init_src, &error,
2247 ntohl(initack_cp->init.initiate_tag), vrf_id,
2248 ntohs(initack_cp->init.num_outbound_streams),
2249 port,
2250 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2251 (struct thread *)NULL,
2252 #elif defined(__Windows__)
2253 (PKTHREAD)NULL,
2254 #else
2255 (struct proc *)NULL,
2256 #endif
2257 SCTP_DONT_INITIALIZE_AUTH_PARAMS);
2258 if (stcb == NULL) {
2259 struct mbuf *op_err;
2260
2261 /* memory problem? */
2262 SCTPDBG(SCTP_DEBUG_INPUT1,
2263 "process_cookie_new: no room for another TCB!\n");
2264 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2265 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2266 src, dst, sh, op_err,
2267 #if defined(__FreeBSD__)
2268 mflowtype, mflowid,
2269 #endif
2270 vrf_id, port);
2271 return (NULL);
2272 }
2273 /* get the correct sctp_nets */
2274 if (netp)
2275 *netp = sctp_findnet(stcb, init_src);
2276
2277 asoc = &stcb->asoc;
2278 /* get scope variables out of cookie */
2279 asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2280 asoc->scope.site_scope = cookie->site_scope;
2281 asoc->scope.local_scope = cookie->local_scope;
2282 asoc->scope.loopback_scope = cookie->loopback_scope;
2283
2284 #if defined(__Userspace__)
2285 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2286 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
2287 (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
2288 #else
2289 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2290 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2291 #endif
2292 struct mbuf *op_err;
2293
2294 /*
2295 * Houston we have a problem. The EP changed while the
2296 * cookie was in flight. Only recourse is to abort the
2297 * association.
2298 */
2299 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2300 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2301 src, dst, sh, op_err,
2302 #if defined(__FreeBSD__)
2303 mflowtype, mflowid,
2304 #endif
2305 vrf_id, port);
2306 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2307 atomic_add_int(&stcb->asoc.refcnt, 1);
2308 SCTP_TCB_UNLOCK(stcb);
2309 SCTP_SOCKET_LOCK(so, 1);
2310 SCTP_TCB_LOCK(stcb);
2311 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2312 #endif
2313 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2314 SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2315 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2316 SCTP_SOCKET_UNLOCK(so, 1);
2317 #endif
2318 return (NULL);
2319 }
2320 /* process the INIT-ACK info (my info) */
2321 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2322 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2323 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2324 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2325 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2326 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2327 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2328 asoc->str_reset_seq_in = asoc->init_seq_number;
2329
2330 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2331
2332 /* process the INIT info (peer's info) */
2333 if (netp)
2334 retval = sctp_process_init(init_cp, stcb);
2335 else
2336 retval = 0;
2337 if (retval < 0) {
2338 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2339 atomic_add_int(&stcb->asoc.refcnt, 1);
2340 SCTP_TCB_UNLOCK(stcb);
2341 SCTP_SOCKET_LOCK(so, 1);
2342 SCTP_TCB_LOCK(stcb);
2343 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2344 #endif
2345 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2346 SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2347 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2348 SCTP_SOCKET_UNLOCK(so, 1);
2349 #endif
2350 return (NULL);
2351 }
2352 /* load all addresses */
2353 if (sctp_load_addresses_from_init(stcb, m,
2354 init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2355 src, dst, init_src, port)) {
2356 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2357 atomic_add_int(&stcb->asoc.refcnt, 1);
2358 SCTP_TCB_UNLOCK(stcb);
2359 SCTP_SOCKET_LOCK(so, 1);
2360 SCTP_TCB_LOCK(stcb);
2361 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2362 #endif
2363 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2364 SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2365 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2366 SCTP_SOCKET_UNLOCK(so, 1);
2367 #endif
2368 return (NULL);
2369 }
2370 /*
2371 * verify any preceding AUTH chunk that was skipped
2372 */
2373 /* pull the local authentication parameters from the cookie/init-ack */
2374 sctp_auth_get_cookie_params(stcb, m,
2375 initack_offset + sizeof(struct sctp_init_ack_chunk),
2376 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2377 if (auth_skipped) {
2378 struct sctp_auth_chunk *auth;
2379
2380 if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
2381 auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2382 } else {
2383 auth = NULL;
2384 }
2385 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2386 /* auth HMAC failed, dump the assoc and packet */
2387 SCTPDBG(SCTP_DEBUG_AUTH1,
2388 "COOKIE-ECHO: AUTH failed\n");
2389 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2390 atomic_add_int(&stcb->asoc.refcnt, 1);
2391 SCTP_TCB_UNLOCK(stcb);
2392 SCTP_SOCKET_LOCK(so, 1);
2393 SCTP_TCB_LOCK(stcb);
2394 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2395 #endif
2396 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2397 SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2398 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2399 SCTP_SOCKET_UNLOCK(so, 1);
2400 #endif
2401 return (NULL);
2402 } else {
2403 /* remaining chunks checked... good to go */
2404 stcb->asoc.authenticated = 1;
2405 }
2406 }
2407
2408 /*
2409 * if we're doing ASCONFs, check to see if we have any new local
2410 * addresses that need to get added to the peer (eg. addresses
2411 * changed while cookie echo in flight). This needs to be done
2412 * after we go to the OPEN state to do the correct asconf
2413 * processing. else, make sure we have the correct addresses in our
2414 * lists
2415 */
2416
2417 /* warning, we re-use sin, sin6, sa_store here! */
2418 /* pull in local_address (our "from" address) */
2419 switch (cookie->laddr_type) {
2420 #ifdef INET
2421 case SCTP_IPV4_ADDRESS:
2422 /* source addr is IPv4 */
2423 memset(&store.sin, 0, sizeof(struct sockaddr_in));
2424 store.sin.sin_family = AF_INET;
2425 #ifdef HAVE_SIN_LEN
2426 store.sin.sin_len = sizeof(struct sockaddr_in);
2427 #endif
2428 store.sin.sin_addr.s_addr = cookie->laddress[0];
2429 break;
2430 #endif
2431 #ifdef INET6
2432 case SCTP_IPV6_ADDRESS:
2433 /* source addr is IPv6 */
2434 memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2435 store.sin6.sin6_family = AF_INET6;
2436 #ifdef HAVE_SIN6_LEN
2437 store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2438 #endif
2439 store.sin6.sin6_scope_id = cookie->scope_id;
2440 memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2441 break;
2442 #endif
2443 #if defined(__Userspace__)
2444 case SCTP_CONN_ADDRESS:
2445 /* source addr is conn */
2446 memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
2447 store.sconn.sconn_family = AF_CONN;
2448 #ifdef HAVE_SCONN_LEN
2449 store.sconn.sconn_len = sizeof(struct sockaddr_conn);
2450 #endif
2451 memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
2452 break;
2453 #endif
2454 default:
2455 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2456 atomic_add_int(&stcb->asoc.refcnt, 1);
2457 SCTP_TCB_UNLOCK(stcb);
2458 SCTP_SOCKET_LOCK(so, 1);
2459 SCTP_TCB_LOCK(stcb);
2460 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2461 #endif
2462 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2463 SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2464 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2465 SCTP_SOCKET_UNLOCK(so, 1);
2466 #endif
2467 return (NULL);
2468 }
2469
2470 /* update current state */
2471 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2472 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2473 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2474 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2475 stcb->sctp_ep, stcb, NULL);
2476 }
2477 sctp_stop_all_cookie_timers(stcb);
2478 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2479 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2480
2481 /* set up to notify upper layer */
2482 *notification = SCTP_NOTIFY_ASSOC_UP;
2483 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2484 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2485 (!SCTP_IS_LISTENING(inp))) {
2486 /*
2487 * This is an endpoint that called connect() how it got a
2488 * cookie that is NEW is a bit of a mystery. It must be that
2489 * the INIT was sent, but before it got there.. a complete
2490 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2491 * should have went to the other code.. not here.. oh well..
2492 * a bit of protection is worth having..
2493 */
2494 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2495 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2496 atomic_add_int(&stcb->asoc.refcnt, 1);
2497 SCTP_TCB_UNLOCK(stcb);
2498 SCTP_SOCKET_LOCK(so, 1);
2499 SCTP_TCB_LOCK(stcb);
2500 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2501 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2502 SCTP_SOCKET_UNLOCK(so, 1);
2503 return (NULL);
2504 }
2505 #endif
2506 soisconnected(stcb->sctp_socket);
2507 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2508 SCTP_SOCKET_UNLOCK(so, 1);
2509 #endif
2510 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2511 (SCTP_IS_LISTENING(inp))) {
2512 /*
2513 * We don't want to do anything with this one. Since it is
2514 * the listening guy. The timer will get started for
2515 * accepted connections in the caller.
2516 */
2517 ;
2518 }
2519 /* since we did not send a HB make sure we don't double things */
2520 if ((netp) && (*netp))
2521 (*netp)->hb_responded = 1;
2522
2523 if (stcb->asoc.sctp_autoclose_ticks &&
2524 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2525 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2526 }
2527 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2528 if ((netp != NULL) && (*netp != NULL)) {
2529 struct timeval old;
2530
2531 /* calculate the RTT and set the encaps port */
2532 old.tv_sec = cookie->time_entered.tv_sec;
2533 old.tv_usec = cookie->time_entered.tv_usec;
2534 sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA);
2535 }
2536 /* respond with a COOKIE-ACK */
2537 sctp_send_cookie_ack(stcb);
2538
2539 /*
2540 * check the address lists for any ASCONFs that need to be sent
2541 * AFTER the cookie-ack is sent
2542 */
2543 sctp_check_address_list(stcb, m,
2544 initack_offset + sizeof(struct sctp_init_ack_chunk),
2545 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2546 &store.sa, cookie->local_scope, cookie->site_scope,
2547 cookie->ipv4_scope, cookie->loopback_scope);
2548
2549
2550 return (stcb);
2551 }
2552
2553 /*
2554 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2555 * we NEED to make sure we are not already using the vtag. If so we
2556 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2557 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2558 SCTP_BASE_INFO(hashasocmark))];
2559 LIST_FOREACH(stcb, head, sctp_asocs) {
2560 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
2561 -- SEND ABORT - TRY AGAIN --
2562 }
2563 }
2564 */
2565
2566 /*
2567 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2568 * existing (non-NULL) TCB
2569 */
2570 static struct mbuf *
2571 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2572 struct sockaddr *src, struct sockaddr *dst,
2573 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2574 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2575 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2576 struct sctp_tcb **locked_tcb,
2577 #if defined(__FreeBSD__)
2578 uint8_t mflowtype, uint32_t mflowid,
2579 #endif
2580 uint32_t vrf_id, uint16_t port)
2581 {
2582 struct sctp_state_cookie *cookie;
2583 struct sctp_tcb *l_stcb = *stcb;
2584 struct sctp_inpcb *l_inp;
2585 struct sockaddr *to;
2586 struct sctp_pcb *ep;
2587 struct mbuf *m_sig;
2588 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2589 uint8_t *sig;
2590 uint8_t cookie_ok = 0;
2591 unsigned int sig_offset, cookie_offset;
2592 unsigned int cookie_len;
2593 struct timeval now;
2594 struct timeval time_expires;
2595 int notification = 0;
2596 struct sctp_nets *netl;
2597 int had_a_existing_tcb = 0;
2598 int send_int_conf = 0;
2599 #ifdef INET
2600 struct sockaddr_in sin;
2601 #endif
2602 #ifdef INET6
2603 struct sockaddr_in6 sin6;
2604 #endif
2605 #if defined(__Userspace__)
2606 struct sockaddr_conn sconn;
2607 #endif
2608
2609 SCTPDBG(SCTP_DEBUG_INPUT2,
2610 "sctp_handle_cookie: handling COOKIE-ECHO\n");
2611
2612 if (inp_p == NULL) {
2613 return (NULL);
2614 }
2615 cookie = &cp->cookie;
2616 cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2617 cookie_len = ntohs(cp->ch.chunk_length);
2618
2619 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2620 sizeof(struct sctp_init_chunk) +
2621 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2622 /* cookie too small */
2623 return (NULL);
2624 }
2625 if ((cookie->peerport != sh->src_port) ||
2626 (cookie->myport != sh->dest_port) ||
2627 (cookie->my_vtag != sh->v_tag)) {
2628 /*
2629 * invalid ports or bad tag. Note that we always leave the
2630 * v_tag in the header in network order and when we stored
2631 * it in the my_vtag slot we also left it in network order.
2632 * This maintains the match even though it may be in the
2633 * opposite byte order of the machine :->
2634 */
2635 return (NULL);
2636 }
2637 /*
2638 * split off the signature into its own mbuf (since it should not be
2639 * calculated in the sctp_hmac_m() call).
2640 */
2641 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2642 m_sig = m_split(m, sig_offset, M_NOWAIT);
2643 if (m_sig == NULL) {
2644 /* out of memory or ?? */
2645 return (NULL);
2646 }
2647 #ifdef SCTP_MBUF_LOGGING
2648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2649 sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
2650 }
2651 #endif
2652
2653 /*
2654 * compute the signature/digest for the cookie
2655 */
2656 ep = &(*inp_p)->sctp_ep;
2657 l_inp = *inp_p;
2658 if (l_stcb) {
2659 SCTP_TCB_UNLOCK(l_stcb);
2660 }
2661 SCTP_INP_RLOCK(l_inp);
2662 if (l_stcb) {
2663 SCTP_TCB_LOCK(l_stcb);
2664 }
2665 /* which cookie is it? */
2666 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2667 (ep->current_secret_number != ep->last_secret_number)) {
2668 /* it's the old cookie */
2669 (void)sctp_hmac_m(SCTP_HMAC,
2670 (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2671 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2672 } else {
2673 /* it's the current cookie */
2674 (void)sctp_hmac_m(SCTP_HMAC,
2675 (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2676 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2677 }
2678 /* get the signature */
2679 SCTP_INP_RUNLOCK(l_inp);
2680 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2681 if (sig == NULL) {
2682 /* couldn't find signature */
2683 sctp_m_freem(m_sig);
2684 return (NULL);
2685 }
2686 /* compare the received digest with the computed digest */
2687 if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2688 /* try the old cookie? */
2689 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2690 (ep->current_secret_number != ep->last_secret_number)) {
2691 /* compute digest with old */
2692 (void)sctp_hmac_m(SCTP_HMAC,
2693 (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2694 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2695 /* compare */
2696 if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2697 cookie_ok = 1;
2698 }
2699 } else {
2700 cookie_ok = 1;
2701 }
2702
2703 /*
2704 * Now before we continue we must reconstruct our mbuf so that
2705 * normal processing of any other chunks will work.
2706 */
2707 {
2708 struct mbuf *m_at;
2709
2710 m_at = m;
2711 while (SCTP_BUF_NEXT(m_at) != NULL) {
2712 m_at = SCTP_BUF_NEXT(m_at);
2713 }
2714 SCTP_BUF_NEXT(m_at) = m_sig;
2715 }
2716
2717 if (cookie_ok == 0) {
2718 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2719 SCTPDBG(SCTP_DEBUG_INPUT2,
2720 "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2721 (uint32_t) offset, cookie_offset, sig_offset);
2722 return (NULL);
2723 }
2724
2725 /*
2726 * check the cookie timestamps to be sure it's not stale
2727 */
2728 (void)SCTP_GETTIME_TIMEVAL(&now);
2729 /* Expire time is in Ticks, so we convert to seconds */
2730 time_expires.tv_sec = cookie->time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life);
2731 time_expires.tv_usec = cookie->time_entered.tv_usec;
2732 #ifndef __FreeBSD__
2733 if (timercmp(&now, &time_expires, >))
2734 #else
2735 if (timevalcmp(&now, &time_expires, >))
2736 #endif
2737 {
2738 /* cookie is stale! */
2739 struct mbuf *op_err;
2740 struct sctp_error_stale_cookie *cause;
2741 struct timeval diff;
2742 uint32_t staleness;
2743
2744 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
2745 0, M_NOWAIT, 1, MT_DATA);
2746 if (op_err == NULL) {
2747 /* FOOBAR */
2748 return (NULL);
2749 }
2750 /* Set the len */
2751 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
2752 cause = mtod(op_err, struct sctp_error_stale_cookie *);
2753 cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
2754 cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
2755 (sizeof(uint32_t))));
2756 #ifndef __FreeBSD__
2757 timersub(&now, &time_expires, &diff);
2758 #else
2759 diff = now;
2760 timevalsub(&diff, &time_expires);
2761 #endif
2762 if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
2763 staleness = UINT32_MAX;
2764 } else {
2765 staleness = diff.tv_sec * 1000000;
2766 }
2767 if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
2768 staleness += diff.tv_usec;
2769 } else {
2770 staleness = UINT32_MAX;
2771 }
2772 cause->stale_time = htonl(staleness);
2773 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2774 #if defined(__FreeBSD__)
2775 mflowtype, mflowid, l_inp->fibnum,
2776 #endif
2777 vrf_id, port);
2778 return (NULL);
2779 }
2780 /*
2781 * Now we must see with the lookup address if we have an existing
2782 * asoc. This will only happen if we were in the COOKIE-WAIT state
2783 * and a INIT collided with us and somewhere the peer sent the
2784 * cookie on another address besides the single address our assoc
2785 * had for him. In this case we will have one of the tie-tags set at
2786 * least AND the address field in the cookie can be used to look it
2787 * up.
2788 */
2789 to = NULL;
2790 switch (cookie->addr_type) {
2791 #ifdef INET6
2792 case SCTP_IPV6_ADDRESS:
2793 memset(&sin6, 0, sizeof(sin6));
2794 sin6.sin6_family = AF_INET6;
2795 #ifdef HAVE_SIN6_LEN
2796 sin6.sin6_len = sizeof(sin6);
2797 #endif
2798 sin6.sin6_port = sh->src_port;
2799 sin6.sin6_scope_id = cookie->scope_id;
2800 memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2801 sizeof(sin6.sin6_addr.s6_addr));
2802 to = (struct sockaddr *)&sin6;
2803 break;
2804 #endif
2805 #ifdef INET
2806 case SCTP_IPV4_ADDRESS:
2807 memset(&sin, 0, sizeof(sin));
2808 sin.sin_family = AF_INET;
2809 #ifdef HAVE_SIN_LEN
2810 sin.sin_len = sizeof(sin);
2811 #endif
2812 sin.sin_port = sh->src_port;
2813 sin.sin_addr.s_addr = cookie->address[0];
2814 to = (struct sockaddr *)&sin;
2815 break;
2816 #endif
2817 #if defined(__Userspace__)
2818 case SCTP_CONN_ADDRESS:
2819 memset(&sconn, 0, sizeof(struct sockaddr_conn));
2820 sconn.sconn_family = AF_CONN;
2821 #ifdef HAVE_SCONN_LEN
2822 sconn.sconn_len = sizeof(struct sockaddr_conn);
2823 #endif
2824 sconn.sconn_port = sh->src_port;
2825 memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
2826 to = (struct sockaddr *)&sconn;
2827 break;
2828 #endif
2829 default:
2830 /* This should not happen */
2831 return (NULL);
2832 }
2833 if (*stcb == NULL) {
2834 /* Yep, lets check */
2835 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2836 if (*stcb == NULL) {
2837 /*
2838 * We should have only got back the same inp. If we
2839 * got back a different ep we have a problem. The
2840 * original findep got back l_inp and now
2841 */
2842 if (l_inp != *inp_p) {
2843 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2844 }
2845 } else {
2846 if (*locked_tcb == NULL) {
2847 /* In this case we found the assoc only
2848 * after we locked the create lock. This means
2849 * we are in a colliding case and we must make
2850 * sure that we unlock the tcb if its one of the
2851 * cases where we throw away the incoming packets.
2852 */
2853 *locked_tcb = *stcb;
2854
2855 /* We must also increment the inp ref count
2856 * since the ref_count flags was set when we
2857 * did not find the TCB, now we found it which
2858 * reduces the refcount.. we must raise it back
2859 * out to balance it all :-)
2860 */
2861 SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2862 if ((*stcb)->sctp_ep != l_inp) {
2863 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2864 (void *)(*stcb)->sctp_ep, (void *)l_inp);
2865 }
2866 }
2867 }
2868 }
2869
2870 cookie_len -= SCTP_SIGNATURE_SIZE;
2871 if (*stcb == NULL) {
2872 /* this is the "normal" case... get a new TCB */
2873 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2874 cookie, cookie_len, *inp_p,
2875 netp, to, ¬ification,
2876 auth_skipped, auth_offset, auth_len,
2877 #if defined(__FreeBSD__)
2878 mflowtype, mflowid,
2879 #endif
2880 vrf_id, port);
2881 } else {
2882 /* this is abnormal... cookie-echo on existing TCB */
2883 had_a_existing_tcb = 1;
2884 *stcb = sctp_process_cookie_existing(m, iphlen, offset,
2885 src, dst, sh,
2886 cookie, cookie_len, *inp_p, *stcb, netp, to,
2887 ¬ification, auth_skipped, auth_offset, auth_len,
2888 #if defined(__FreeBSD__)
2889 mflowtype, mflowid,
2890 #endif
2891 vrf_id, port);
2892 }
2893
2894 if (*stcb == NULL) {
2895 /* still no TCB... must be bad cookie-echo */
2896 return (NULL);
2897 }
2898 #if defined(__FreeBSD__)
2899 if (*netp != NULL) {
2900 (*netp)->flowtype = mflowtype;
2901 (*netp)->flowid = mflowid;
2902 }
2903 #endif
2904 /*
2905 * Ok, we built an association so confirm the address we sent the
2906 * INIT-ACK to.
2907 */
2908 netl = sctp_findnet(*stcb, to);
2909 /*
2910 * This code should in theory NOT run but
2911 */
2912 if (netl == NULL) {
2913 /* TSNH! Huh, why do I need to add this address here? */
2914 if (sctp_add_remote_addr(*stcb, to, NULL, port,
2915 SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2916 return (NULL);
2917 }
2918 netl = sctp_findnet(*stcb, to);
2919 }
2920 if (netl) {
2921 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2922 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2923 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2924 netl);
2925 send_int_conf = 1;
2926 }
2927 }
2928 sctp_start_net_timers(*stcb);
2929 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2930 if (!had_a_existing_tcb ||
2931 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2932 /*
2933 * If we have a NEW cookie or the connect never
2934 * reached the connected state during collision we
2935 * must do the TCP accept thing.
2936 */
2937 struct socket *so, *oso;
2938 struct sctp_inpcb *inp;
2939
2940 if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2941 /*
2942 * For a restart we will keep the same
2943 * socket, no need to do anything. I THINK!!
2944 */
2945 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2946 if (send_int_conf) {
2947 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2948 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2949 }
2950 return (m);
2951 }
2952 oso = (*inp_p)->sctp_socket;
2953 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
2954 /*
2955 * We do this to keep the sockets side happy during
2956 * the sonewcon ONLY.
2957 */
2958 NET_LOCK_GIANT();
2959 #endif
2960 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2961 SCTP_TCB_UNLOCK((*stcb));
2962 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2963 CURVNET_SET(oso->so_vnet);
2964 #endif
2965 #if defined(__APPLE__)
2966 SCTP_SOCKET_LOCK(oso, 1);
2967 #endif
2968 so = sonewconn(oso, 0
2969 #if defined(__APPLE__)
2970 ,NULL
2971 #endif
2972 #ifdef __Panda__
2973 ,NULL , (*inp_p)->def_vrf_id
2974 #endif
2975 );
2976 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
2977 NET_UNLOCK_GIANT();
2978 #endif
2979 #if defined(__APPLE__)
2980 SCTP_SOCKET_UNLOCK(oso, 1);
2981 #endif
2982 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2983 CURVNET_RESTORE();
2984 #endif
2985 SCTP_TCB_LOCK((*stcb));
2986 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2987
2988 if (so == NULL) {
2989 struct mbuf *op_err;
2990 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2991 struct socket *pcb_so;
2992 #endif
2993 /* Too many sockets */
2994 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2995 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2996 sctp_abort_association(*inp_p, NULL, m, iphlen,
2997 src, dst, sh, op_err,
2998 #if defined(__FreeBSD__)
2999 mflowtype, mflowid,
3000 #endif
3001 vrf_id, port);
3002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3003 pcb_so = SCTP_INP_SO(*inp_p);
3004 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3005 SCTP_TCB_UNLOCK((*stcb));
3006 SCTP_SOCKET_LOCK(pcb_so, 1);
3007 SCTP_TCB_LOCK((*stcb));
3008 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3009 #endif
3010 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
3011 SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3012 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3013 SCTP_SOCKET_UNLOCK(pcb_so, 1);
3014 #endif
3015 return (NULL);
3016 }
3017 inp = (struct sctp_inpcb *)so->so_pcb;
3018 SCTP_INP_INCR_REF(inp);
3019 /*
3020 * We add the unbound flag here so that
3021 * if we get an soabort() before we get the
3022 * move_pcb done, we will properly cleanup.
3023 */
3024 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
3025 SCTP_PCB_FLAGS_CONNECTED |
3026 SCTP_PCB_FLAGS_IN_TCPPOOL |
3027 SCTP_PCB_FLAGS_UNBOUND |
3028 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
3029 SCTP_PCB_FLAGS_DONT_WAKE);
3030 inp->sctp_features = (*inp_p)->sctp_features;
3031 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
3032 inp->sctp_socket = so;
3033 inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
3034 inp->max_cwnd = (*inp_p)->max_cwnd;
3035 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
3036 inp->ecn_supported = (*inp_p)->ecn_supported;
3037 inp->prsctp_supported = (*inp_p)->prsctp_supported;
3038 inp->auth_supported = (*inp_p)->auth_supported;
3039 inp->asconf_supported = (*inp_p)->asconf_supported;
3040 inp->reconfig_supported = (*inp_p)->reconfig_supported;
3041 inp->nrsack_supported = (*inp_p)->nrsack_supported;
3042 inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
3043 inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
3044 inp->sctp_context = (*inp_p)->sctp_context;
3045 inp->local_strreset_support = (*inp_p)->local_strreset_support;
3046 inp->fibnum = (*inp_p)->fibnum;
3047 inp->inp_starting_point_for_iterator = NULL;
3048 #if defined(__Userspace__)
3049 inp->ulp_info = (*inp_p)->ulp_info;
3050 inp->recv_callback = (*inp_p)->recv_callback;
3051 inp->send_callback = (*inp_p)->send_callback;
3052 inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
3053 #endif
3054 /*
3055 * copy in the authentication parameters from the
3056 * original endpoint
3057 */
3058 if (inp->sctp_ep.local_hmacs)
3059 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3060 inp->sctp_ep.local_hmacs =
3061 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
3062 if (inp->sctp_ep.local_auth_chunks)
3063 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
3064 inp->sctp_ep.local_auth_chunks =
3065 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
3066
3067 /*
3068 * Now we must move it from one hash table to
3069 * another and get the tcb in the right place.
3070 */
3071
3072 /* This is where the one-2-one socket is put into
3073 * the accept state waiting for the accept!
3074 */
3075 if (*stcb) {
3076 SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
3077 }
3078 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
3079
3080 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3081 SCTP_TCB_UNLOCK((*stcb));
3082
3083 #if defined(__FreeBSD__)
3084 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
3085 0);
3086 #else
3087 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
3088 #endif
3089 SCTP_TCB_LOCK((*stcb));
3090 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3091
3092
3093 /* now we must check to see if we were aborted while
3094 * the move was going on and the lock/unlock happened.
3095 */
3096 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3097 /* yep it was, we leave the
3098 * assoc attached to the socket since
3099 * the sctp_inpcb_free() call will send
3100 * an abort for us.
3101 */
3102 SCTP_INP_DECR_REF(inp);
3103 return (NULL);
3104 }
3105 SCTP_INP_DECR_REF(inp);
3106 /* Switch over to the new guy */
3107 *inp_p = inp;
3108 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3109 if (send_int_conf) {
3110 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3111 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3112 }
3113
3114 /* Pull it from the incomplete queue and wake the guy */
3115 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3116 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3117 SCTP_TCB_UNLOCK((*stcb));
3118 SCTP_SOCKET_LOCK(so, 1);
3119 #endif
3120 soisconnected(so);
3121 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3122 SCTP_TCB_LOCK((*stcb));
3123 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3124 SCTP_SOCKET_UNLOCK(so, 1);
3125 #endif
3126 return (m);
3127 }
3128 }
3129 if (notification) {
3130 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3131 }
3132 if (send_int_conf) {
3133 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3134 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3135 }
3136 return (m);
3137 }
3138
3139 static void
3140 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
3141 struct sctp_tcb *stcb, struct sctp_nets *net)
3142 {
3143 /* cp must not be used, others call this without a c-ack :-) */
3144 struct sctp_association *asoc;
3145 struct sctp_tmit_chunk *chk;
3146
3147 SCTPDBG(SCTP_DEBUG_INPUT2,
3148 "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
3149 if ((stcb == NULL) || (net == NULL)) {
3150 return;
3151 }
3152
3153 asoc = &stcb->asoc;
3154 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3155 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3156 asoc->overall_error_count,
3157 0,
3158 SCTP_FROM_SCTP_INPUT,
3159 __LINE__);
3160 }
3161 asoc->overall_error_count = 0;
3162 sctp_stop_all_cookie_timers(stcb);
3163 /* process according to association state */
3164 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
3165 /* state change only needed when I am in right state */
3166 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
3167 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
3168 sctp_start_net_timers(stcb);
3169 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
3170 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3171 stcb->sctp_ep, stcb, NULL);
3172
3173 }
3174 /* update RTO */
3175 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
3176 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
3177 if (asoc->overall_error_count == 0) {
3178 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
3179 SCTP_RTT_FROM_NON_DATA);
3180 }
3181 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
3182 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3183 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3184 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3185 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3186 struct socket *so;
3187
3188 #endif
3189 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3190 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3191 so = SCTP_INP_SO(stcb->sctp_ep);
3192 atomic_add_int(&stcb->asoc.refcnt, 1);
3193 SCTP_TCB_UNLOCK(stcb);
3194 SCTP_SOCKET_LOCK(so, 1);
3195 SCTP_TCB_LOCK(stcb);
3196 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3197 #endif
3198 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
3199 soisconnected(stcb->sctp_socket);
3200 }
3201 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3202 SCTP_SOCKET_UNLOCK(so, 1);
3203 #endif
3204 }
3205 /*
3206 * since we did not send a HB make sure we don't double
3207 * things
3208 */
3209 net->hb_responded = 1;
3210
3211 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3212 /* We don't need to do the asconf thing,
3213 * nor hb or autoclose if the socket is closed.
3214 */
3215 goto closed_socket;
3216 }
3217
3218 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
3219 stcb, net);
3220
3221
3222 if (stcb->asoc.sctp_autoclose_ticks &&
3223 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
3224 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
3225 stcb->sctp_ep, stcb, NULL);
3226 }
3227 /*
3228 * send ASCONF if parameters are pending and ASCONFs are
3229 * allowed (eg. addresses changed when init/cookie echo were
3230 * in flight)
3231 */
3232 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
3233 (stcb->asoc.asconf_supported == 1) &&
3234 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
3235 #ifdef SCTP_TIMER_BASED_ASCONF
3236 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
3237 stcb->sctp_ep, stcb,
3238 stcb->asoc.primary_destination);
3239 #else
3240 sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3241 SCTP_ADDR_NOT_LOCKED);
3242 #endif
3243 }
3244 }
3245 closed_socket:
3246 /* Toss the cookie if I can */
3247 sctp_toss_old_cookies(stcb, asoc);
3248 /* Restart the timer if we have pending data */
3249 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3250 if (chk->whoTo != NULL) {
3251 break;
3252 }
3253 }
3254 if (chk != NULL) {
3255 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3256 }
3257 }
3258
3259 static void
3260 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3261 struct sctp_tcb *stcb)
3262 {
3263 struct sctp_nets *net;
3264 struct sctp_tmit_chunk *lchk;
3265 struct sctp_ecne_chunk bkup;
3266 uint8_t override_bit;
3267 uint32_t tsn, window_data_tsn;
3268 int len;
3269 unsigned int pkt_cnt;
3270
3271 len = ntohs(cp->ch.chunk_length);
3272 if ((len != sizeof(struct sctp_ecne_chunk)) &&
3273 (len != sizeof(struct old_sctp_ecne_chunk))) {
3274 return;
3275 }
3276 if (len == sizeof(struct old_sctp_ecne_chunk)) {
3277 /* Its the old format */
3278 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3279 bkup.num_pkts_since_cwr = htonl(1);
3280 cp = &bkup;
3281 }
3282 SCTP_STAT_INCR(sctps_recvecne);
3283 tsn = ntohl(cp->tsn);
3284 pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3285 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3286 if (lchk == NULL) {
3287 window_data_tsn = stcb->asoc.sending_seq - 1;
3288 } else {
3289 window_data_tsn = lchk->rec.data.tsn;
3290 }
3291
3292 /* Find where it was sent to if possible. */
3293 net = NULL;
3294 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3295 if (lchk->rec.data.tsn == tsn) {
3296 net = lchk->whoTo;
3297 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3298 break;
3299 }
3300 if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
3301 break;
3302 }
3303 }
3304 if (net == NULL) {
3305 /*
3306 * What to do. A previous send of a
3307 * CWR was possibly lost. See how old it is, we
3308 * may have it marked on the actual net.
3309 */
3310 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3311 if (tsn == net->last_cwr_tsn) {
3312 /* Found him, send it off */
3313 break;
3314 }
3315 }
3316 if (net == NULL) {
3317 /*
3318 * If we reach here, we need to send a special
3319 * CWR that says hey, we did this a long time
3320 * ago and you lost the response.
3321 */
3322 net = TAILQ_FIRST(&stcb->asoc.nets);
3323 if (net == NULL) {
3324 /* TSNH */
3325 return;
3326 }
3327 override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3328 } else {
3329 override_bit = 0;
3330 }
3331 } else {
3332 override_bit = 0;
3333 }
3334 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3335 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3336 /* JRS - Use the congestion control given in the pluggable CC module */
3337 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3338 /*
3339 * We reduce once every RTT. So we will only lower cwnd at
3340 * the next sending seq i.e. the window_data_tsn
3341 */
3342 net->cwr_window_tsn = window_data_tsn;
3343 net->ecn_ce_pkt_cnt += pkt_cnt;
3344 net->lost_cnt = pkt_cnt;
3345 net->last_cwr_tsn = tsn;
3346 } else {
3347 override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3348 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3349 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3350 /*
3351 * Another loss in the same window update how
3352 * many marks/packets lost we have had.
3353 */
3354 int cnt = 1;
3355 if (pkt_cnt > net->lost_cnt) {
3356 /* Should be the case */
3357 cnt = (pkt_cnt - net->lost_cnt);
3358 net->ecn_ce_pkt_cnt += cnt;
3359 }
3360 net->lost_cnt = pkt_cnt;
3361 net->last_cwr_tsn = tsn;
3362 /*
3363 * Most CC functions will ignore this call, since we are in-window
3364 * yet of the initial CE the peer saw.
3365 */
3366 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3367 }
3368 }
3369 /*
3370 * We always send a CWR this way if our previous one was lost our
3371 * peer will get an update, or if it is not time again to reduce we
3372 * still get the cwr to the peer. Note we set the override when we
3373 * could not find the TSN on the chunk or the destination network.
3374 */
3375 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3376 }
3377
3378 static void
3379 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3380 {
3381 /*
3382 * Here we get a CWR from the peer. We must look in the outqueue and
3383 * make sure that we have a covered ECNE in the control chunk part.
3384 * If so remove it.
3385 */
3386 struct sctp_tmit_chunk *chk, *nchk;
3387 struct sctp_ecne_chunk *ecne;
3388 int override;
3389 uint32_t cwr_tsn;
3390
3391 cwr_tsn = ntohl(cp->tsn);
3392 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3393 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
3394 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3395 continue;
3396 }
3397 if ((override == 0) && (chk->whoTo != net)) {
3398 /* Must be from the right src unless override is set */
3399 continue;
3400 }
3401 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3402 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3403 /* this covers this ECNE, we can remove it */
3404 stcb->asoc.ecn_echo_cnt_onq--;
3405 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3406 sctp_next);
3407 stcb->asoc.ctrl_queue_cnt--;
3408 sctp_m_freem(chk->data);
3409 chk->data = NULL;
3410 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3411 if (override == 0) {
3412 break;
3413 }
3414 }
3415 }
3416 }
3417
3418 static void
3419 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3420 struct sctp_tcb *stcb, struct sctp_nets *net)
3421 {
3422 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3423 struct socket *so;
3424 #endif
3425
3426 SCTPDBG(SCTP_DEBUG_INPUT2,
3427 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3428 if (stcb == NULL)
3429 return;
3430
3431 /* process according to association state */
3432 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3433 /* unexpected SHUTDOWN-COMPLETE... so ignore... */
3434 SCTPDBG(SCTP_DEBUG_INPUT2,
3435 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3436 SCTP_TCB_UNLOCK(stcb);
3437 return;
3438 }
3439 /* notify upper layer protocol */
3440 if (stcb->sctp_socket) {
3441 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3442 }
3443 #ifdef INVARIANTS
3444 if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
3445 !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
3446 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
3447 panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3448 }
3449 #endif
3450 /* stop the timer */
3451 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
3452 SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3453 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3454 /* free the TCB */
3455 SCTPDBG(SCTP_DEBUG_INPUT2,
3456 "sctp_handle_shutdown_complete: calls free-asoc\n");
3457 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3458 so = SCTP_INP_SO(stcb->sctp_ep);
3459 atomic_add_int(&stcb->asoc.refcnt, 1);
3460 SCTP_TCB_UNLOCK(stcb);
3461 SCTP_SOCKET_LOCK(so, 1);
3462 SCTP_TCB_LOCK(stcb);
3463 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3464 #endif
3465 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
3466 SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3467 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3468 SCTP_SOCKET_UNLOCK(so, 1);
3469 #endif
3470 return;
3471 }
3472
3473 static int
3474 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3475 struct sctp_nets *net, uint8_t flg)
3476 {
3477 switch (desc->chunk_type) {
3478 case SCTP_DATA:
3479 /* find the tsn to resend (possibly */
3480 {
3481 uint32_t tsn;
3482 struct sctp_tmit_chunk *tp1;
3483
3484 tsn = ntohl(desc->tsn_ifany);
3485 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3486 if (tp1->rec.data.tsn == tsn) {
3487 /* found it */
3488 break;
3489 }
3490 if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
3491 /* not found */
3492 tp1 = NULL;
3493 break;
3494 }
3495 }
3496 if (tp1 == NULL) {
3497 /*
3498 * Do it the other way , aka without paying
3499 * attention to queue seq order.
3500 */
3501 SCTP_STAT_INCR(sctps_pdrpdnfnd);
3502 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3503 if (tp1->rec.data.tsn == tsn) {
3504 /* found it */
3505 break;
3506 }
3507 }
3508 }
3509 if (tp1 == NULL) {
3510 SCTP_STAT_INCR(sctps_pdrptsnnf);
3511 }
3512 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3513 uint8_t *ddp;
3514
3515 if (((flg & SCTP_BADCRC) == 0) &&
3516 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3517 return (0);
3518 }
3519 if ((stcb->asoc.peers_rwnd == 0) &&
3520 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3521 SCTP_STAT_INCR(sctps_pdrpdiwnp);
3522 return (0);
3523 }
3524 if (stcb->asoc.peers_rwnd == 0 &&
3525 (flg & SCTP_FROM_MIDDLE_BOX)) {
3526 SCTP_STAT_INCR(sctps_pdrpdizrw);
3527 return (0);
3528 }
3529 ddp = (uint8_t *) (mtod(tp1->data, caddr_t) +
3530 sizeof(struct sctp_data_chunk));
3531 {
3532 unsigned int iii;
3533
3534 for (iii = 0; iii < sizeof(desc->data_bytes);
3535 iii++) {
3536 if (ddp[iii] != desc->data_bytes[iii]) {
3537 SCTP_STAT_INCR(sctps_pdrpbadd);
3538 return (-1);
3539 }
3540 }
3541 }
3542
3543 if (tp1->do_rtt) {
3544 /*
3545 * this guy had a RTO calculation
3546 * pending on it, cancel it
3547 */
3548 if (tp1->whoTo->rto_needed == 0) {
3549 tp1->whoTo->rto_needed = 1;
3550 }
3551 tp1->do_rtt = 0;
3552 }
3553 SCTP_STAT_INCR(sctps_pdrpmark);
3554 if (tp1->sent != SCTP_DATAGRAM_RESEND)
3555 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3556 /*
3557 * mark it as if we were doing a FR, since
3558 * we will be getting gap ack reports behind
3559 * the info from the router.
3560 */
3561 tp1->rec.data.doing_fast_retransmit = 1;
3562 /*
3563 * mark the tsn with what sequences can
3564 * cause a new FR.
3565 */
3566 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3567 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3568 } else {
3569 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
3570 }
3571
3572 /* restart the timer */
3573 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3574 stcb, tp1->whoTo,
3575 SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3576 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3577 stcb, tp1->whoTo);
3578
3579 /* fix counts and things */
3580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3581 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3582 tp1->whoTo->flight_size,
3583 tp1->book_size,
3584 (uint32_t)(uintptr_t)stcb,
3585 tp1->rec.data.tsn);
3586 }
3587 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3588 sctp_flight_size_decrease(tp1);
3589 sctp_total_flight_decrease(stcb, tp1);
3590 }
3591 tp1->sent = SCTP_DATAGRAM_RESEND;
3592 } {
3593 /* audit code */
3594 unsigned int audit;
3595
3596 audit = 0;
3597 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3598 if (tp1->sent == SCTP_DATAGRAM_RESEND)
3599 audit++;
3600 }
3601 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3602 sctp_next) {
3603 if (tp1->sent == SCTP_DATAGRAM_RESEND)
3604 audit++;
3605 }
3606 if (audit != stcb->asoc.sent_queue_retran_cnt) {
3607 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3608 audit, stcb->asoc.sent_queue_retran_cnt);
3609 #ifndef SCTP_AUDITING_ENABLED
3610 stcb->asoc.sent_queue_retran_cnt = audit;
3611 #endif
3612 }
3613 }
3614 }
3615 break;
3616 case SCTP_ASCONF:
3617 {
3618 struct sctp_tmit_chunk *asconf;
3619
3620 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3621 sctp_next) {
3622 if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3623 break;
3624 }
3625 }
3626 if (asconf) {
3627 if (asconf->sent != SCTP_DATAGRAM_RESEND)
3628 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3629 asconf->sent = SCTP_DATAGRAM_RESEND;
3630 asconf->snd_count--;
3631 }
3632 }
3633 break;
3634 case SCTP_INITIATION:
3635 /* resend the INIT */
3636 stcb->asoc.dropped_special_cnt++;
3637 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3638 /*
3639 * If we can get it in, in a few attempts we do
3640 * this, otherwise we let the timer fire.
3641 */
3642 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3643 stcb, net,
3644 SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
3645 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3646 }
3647 break;
3648 case SCTP_SELECTIVE_ACK:
3649 case SCTP_NR_SELECTIVE_ACK:
3650 /* resend the sack */
3651 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3652 break;
3653 case SCTP_HEARTBEAT_REQUEST:
3654 /* resend a demand HB */
3655 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3656 /* Only retransmit if we KNOW we wont destroy the tcb */
3657 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3658 }
3659 break;
3660 case SCTP_SHUTDOWN:
3661 sctp_send_shutdown(stcb, net);
3662 break;
3663 case SCTP_SHUTDOWN_ACK:
3664 sctp_send_shutdown_ack(stcb, net);
3665 break;
3666 case SCTP_COOKIE_ECHO:
3667 {
3668 struct sctp_tmit_chunk *cookie;
3669
3670 cookie = NULL;
3671 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3672 sctp_next) {
3673 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3674 break;
3675 }
3676 }
3677 if (cookie) {
3678 if (cookie->sent != SCTP_DATAGRAM_RESEND)
3679 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3680 cookie->sent = SCTP_DATAGRAM_RESEND;
3681 sctp_stop_all_cookie_timers(stcb);
3682 }
3683 }
3684 break;
3685 case SCTP_COOKIE_ACK:
3686 sctp_send_cookie_ack(stcb);
3687 break;
3688 case SCTP_ASCONF_ACK:
3689 /* resend last asconf ack */
3690 sctp_send_asconf_ack(stcb);
3691 break;
3692 case SCTP_IFORWARD_CUM_TSN:
3693 case SCTP_FORWARD_CUM_TSN:
3694 send_forward_tsn(stcb, &stcb->asoc);
3695 break;
3696 /* can't do anything with these */
3697 case SCTP_PACKET_DROPPED:
3698 case SCTP_INITIATION_ACK: /* this should not happen */
3699 case SCTP_HEARTBEAT_ACK:
3700 case SCTP_ABORT_ASSOCIATION:
3701 case SCTP_OPERATION_ERROR:
3702 case SCTP_SHUTDOWN_COMPLETE:
3703 case SCTP_ECN_ECHO:
3704 case SCTP_ECN_CWR:
3705 default:
3706 break;
3707 }
3708 return (0);
3709 }
3710
3711 void
3712 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3713 {
3714 uint32_t i;
3715 uint16_t temp;
3716
3717 /*
3718 * We set things to 0xffffffff since this is the last delivered sequence
3719 * and we will be sending in 0 after the reset.
3720 */
3721
3722 if (number_entries) {
3723 for (i = 0; i < number_entries; i++) {
3724 temp = ntohs(list[i]);
3725 if (temp >= stcb->asoc.streamincnt) {
3726 continue;
3727 }
3728 stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
3729 }
3730 } else {
3731 list = NULL;
3732 for (i = 0; i < stcb->asoc.streamincnt; i++) {
3733 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3734 }
3735 }
3736 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3737 }
3738
3739 static void
3740 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3741 {
3742 uint32_t i;
3743 uint16_t temp;
3744
3745 if (number_entries > 0) {
3746 for (i = 0; i < number_entries; i++) {
3747 temp = ntohs(list[i]);
3748 if (temp >= stcb->asoc.streamoutcnt) {
3749 /* no such stream */
3750 continue;
3751 }
3752 stcb->asoc.strmout[temp].next_mid_ordered = 0;
3753 stcb->asoc.strmout[temp].next_mid_unordered = 0;
3754 }
3755 } else {
3756 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3757 stcb->asoc.strmout[i].next_mid_ordered = 0;
3758 stcb->asoc.strmout[i].next_mid_unordered = 0;
3759 }
3760 }
3761 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3762 }
3763
3764 static void
3765 sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3766 {
3767 uint32_t i;
3768 uint16_t temp;
3769
3770 if (number_entries > 0) {
3771 for (i = 0; i < number_entries; i++) {
3772 temp = ntohs(list[i]);
3773 if (temp >= stcb->asoc.streamoutcnt) {
3774 /* no such stream */
3775 continue;
3776 }
3777 stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
3778 }
3779 } else {
3780 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3781 stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
3782 }
3783 }
3784 }
3785
3786
3787 struct sctp_stream_reset_request *
3788 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3789 {
3790 struct sctp_association *asoc;
3791 struct sctp_chunkhdr *ch;
3792 struct sctp_stream_reset_request *r;
3793 struct sctp_tmit_chunk *chk;
3794 int len, clen;
3795
3796 asoc = &stcb->asoc;
3797 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3798 asoc->stream_reset_outstanding = 0;
3799 return (NULL);
3800 }
3801 if (stcb->asoc.str_reset == NULL) {
3802 asoc->stream_reset_outstanding = 0;
3803 return (NULL);
3804 }
3805 chk = stcb->asoc.str_reset;
3806 if (chk->data == NULL) {
3807 return (NULL);
3808 }
3809 if (bchk) {
3810 /* he wants a copy of the chk pointer */
3811 *bchk = chk;
3812 }
3813 clen = chk->send_size;
3814 ch = mtod(chk->data, struct sctp_chunkhdr *);
3815 r = (struct sctp_stream_reset_request *)(ch + 1);
3816 if (ntohl(r->request_seq) == seq) {
3817 /* found it */
3818 return (r);
3819 }
3820 len = SCTP_SIZE32(ntohs(r->ph.param_length));
3821 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3822 /* move to the next one, there can only be a max of two */
3823 r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
3824 if (ntohl(r->request_seq) == seq) {
3825 return (r);
3826 }
3827 }
3828 /* that seq is not here */
3829 return (NULL);
3830 }
3831
3832 static void
3833 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3834 {
3835 struct sctp_association *asoc;
3836 struct sctp_tmit_chunk *chk;
3837
3838 asoc = &stcb->asoc;
3839 chk = asoc->str_reset;
3840 if (chk == NULL) {
3841 return;
3842 }
3843 asoc->str_reset = NULL;
3844 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
3845 NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
3846 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3847 asoc->ctrl_queue_cnt--;
3848 if (chk->data) {
3849 sctp_m_freem(chk->data);
3850 chk->data = NULL;
3851 }
3852 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3853 }
3854
3855
3856 static int
3857 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3858 uint32_t seq, uint32_t action,
3859 struct sctp_stream_reset_response *respin)
3860 {
3861 uint16_t type;
3862 int lparam_len;
3863 struct sctp_association *asoc = &stcb->asoc;
3864 struct sctp_tmit_chunk *chk;
3865 struct sctp_stream_reset_request *req_param;
3866 struct sctp_stream_reset_out_request *req_out_param;
3867 struct sctp_stream_reset_in_request *req_in_param;
3868 uint32_t number_entries;
3869
3870 if (asoc->stream_reset_outstanding == 0) {
3871 /* duplicate */
3872 return (0);
3873 }
3874 if (seq == stcb->asoc.str_reset_seq_out) {
3875 req_param = sctp_find_stream_reset(stcb, seq, &chk);
3876 if (req_param != NULL) {
3877 stcb->asoc.str_reset_seq_out++;
3878 type = ntohs(req_param->ph.param_type);
3879 lparam_len = ntohs(req_param->ph.param_length);
3880 if (type == SCTP_STR_RESET_OUT_REQUEST) {
3881 int no_clear = 0;
3882
3883 req_out_param = (struct sctp_stream_reset_out_request *)req_param;
3884 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3885 asoc->stream_reset_out_is_outstanding = 0;
3886 if (asoc->stream_reset_outstanding)
3887 asoc->stream_reset_outstanding--;
3888 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3889 /* do it */
3890 sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
3891 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3892 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3893 } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
3894 /* Set it up so we don't stop retransmitting */
3895 asoc->stream_reset_outstanding++;
3896 stcb->asoc.str_reset_seq_out--;
3897 asoc->stream_reset_out_is_outstanding = 1;
3898 no_clear = 1;
3899 } else {
3900 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3901 }
3902 if (no_clear == 0) {
3903 sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
3904 }
3905 } else if (type == SCTP_STR_RESET_IN_REQUEST) {
3906 req_in_param = (struct sctp_stream_reset_in_request *)req_param;
3907 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3908 if (asoc->stream_reset_outstanding)
3909 asoc->stream_reset_outstanding--;
3910 if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3911 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3912 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3913 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3914 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3915 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3916 }
3917 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3918 /* Ok we now may have more streams */
3919 int num_stream;
3920
3921 num_stream = stcb->asoc.strm_pending_add_size;
3922 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3923 /* TSNH */
3924 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3925 }
3926 stcb->asoc.strm_pending_add_size = 0;
3927 if (asoc->stream_reset_outstanding)
3928 asoc->stream_reset_outstanding--;
3929 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3930 /* Put the new streams into effect */
3931 int i;
3932 for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) {
3933 asoc->strmout[i].state = SCTP_STREAM_OPEN;
3934 }
3935 asoc->streamoutcnt += num_stream;
3936 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3937 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3938 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3939 SCTP_STREAM_CHANGE_DENIED);
3940 } else {
3941 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3942 SCTP_STREAM_CHANGE_FAILED);
3943 }
3944 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3945 if (asoc->stream_reset_outstanding)
3946 asoc->stream_reset_outstanding--;
3947 if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3948 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3949 SCTP_STREAM_CHANGE_DENIED);
3950 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3951 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3952 SCTP_STREAM_CHANGE_FAILED);
3953 }
3954 } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3955 /**
3956 * a) Adopt the new in tsn.
3957 * b) reset the map
3958 * c) Adopt the new out-tsn
3959 */
3960 struct sctp_stream_reset_response_tsn *resp;
3961 struct sctp_forward_tsn_chunk fwdtsn;
3962 int abort_flag = 0;
3963 if (respin == NULL) {
3964 /* huh ? */
3965 return (0);
3966 }
3967 if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
3968 return (0);
3969 }
3970 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3971 resp = (struct sctp_stream_reset_response_tsn *)respin;
3972 asoc->stream_reset_outstanding--;
3973 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3974 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3975 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3976 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3977 if (abort_flag) {
3978 return (1);
3979 }
3980 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3981 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3982 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3983 }
3984
3985 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3986 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3987 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3988
3989 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3990 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3991
3992 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3993 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3994
3995 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3996 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3997 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3998 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3999 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
4000 SCTP_ASSOC_RESET_DENIED);
4001 } else {
4002 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
4003 SCTP_ASSOC_RESET_FAILED);
4004 }
4005 }
4006 /* get rid of the request and get the request flags */
4007 if (asoc->stream_reset_outstanding == 0) {
4008 sctp_clean_up_stream_reset(stcb);
4009 }
4010 }
4011 }
4012 if (asoc->stream_reset_outstanding == 0) {
4013 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4014 }
4015 return (0);
4016 }
4017
4018 static void
4019 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
4020 struct sctp_tmit_chunk *chk,
4021 struct sctp_stream_reset_in_request *req, int trunc)
4022 {
4023 uint32_t seq;
4024 int len, i;
4025 int number_entries;
4026 uint16_t temp;
4027
4028 /*
4029 * peer wants me to send a str-reset to him for my outgoing seq's if
4030 * seq_in is right.
4031 */
4032 struct sctp_association *asoc = &stcb->asoc;
4033
4034 seq = ntohl(req->request_seq);
4035 if (asoc->str_reset_seq_in == seq) {
4036 asoc->last_reset_action[1] = asoc->last_reset_action[0];
4037 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4038 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4039 } else if (trunc) {
4040 /* Can't do it, since they exceeded our buffer size */
4041 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4042 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
4043 len = ntohs(req->ph.param_length);
4044 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
4045 if (number_entries) {
4046 for (i = 0; i < number_entries; i++) {
4047 temp = ntohs(req->list_of_streams[i]);
4048 if (temp >= stcb->asoc.streamoutcnt) {
4049 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4050 goto bad_boy;
4051 }
4052 req->list_of_streams[i] = temp;
4053 }
4054 for (i = 0; i < number_entries; i++) {
4055 if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
4056 stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
4057 }
4058 }
4059 } else {
4060 /* Its all */
4061 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
4062 if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
4063 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
4064 }
4065 }
4066 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4067 } else {
4068 /* Can't do it, since we have sent one out */
4069 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4070 }
4071 bad_boy:
4072 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4073 asoc->str_reset_seq_in++;
4074 } else if (asoc->str_reset_seq_in - 1 == seq) {
4075 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4076 } else if (asoc->str_reset_seq_in - 2 == seq) {
4077 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4078 } else {
4079 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4080 }
4081 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4082 }
4083
4084 static int
4085 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
4086 struct sctp_tmit_chunk *chk,
4087 struct sctp_stream_reset_tsn_request *req)
4088 {
4089 /* reset all in and out and update the tsn */
4090 /*
4091 * A) reset my str-seq's on in and out. B) Select a receive next,
4092 * and set cum-ack to it. Also process this selected number as a
4093 * fwd-tsn as well. C) set in the response my next sending seq.
4094 */
4095 struct sctp_forward_tsn_chunk fwdtsn;
4096 struct sctp_association *asoc = &stcb->asoc;
4097 int abort_flag = 0;
4098 uint32_t seq;
4099
4100 seq = ntohl(req->request_seq);
4101 if (asoc->str_reset_seq_in == seq) {
4102 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
4103 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4104 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4105 } else {
4106 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
4107 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
4108 fwdtsn.ch.chunk_flags = 0;
4109 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
4110 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
4111 if (abort_flag) {
4112 return (1);
4113 }
4114 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
4115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
4116 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4117 }
4118 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4119 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
4120 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
4121 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
4122 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
4123 atomic_add_int(&asoc->sending_seq, 1);
4124 /* save off historical data for retrans */
4125 asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
4126 asoc->last_sending_seq[0] = asoc->sending_seq;
4127 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
4128 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
4129 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
4130 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
4131 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4132 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
4133 }
4134 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4135 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4136 asoc->str_reset_seq_in++;
4137 } else if (asoc->str_reset_seq_in - 1 == seq) {
4138 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4139 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4140 } else if (asoc->str_reset_seq_in - 2 == seq) {
4141 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
4142 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
4143 } else {
4144 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4145 }
4146 return (0);
4147 }
4148
4149 static void
4150 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
4151 struct sctp_tmit_chunk *chk,
4152 struct sctp_stream_reset_out_request *req, int trunc)
4153 {
4154 uint32_t seq, tsn;
4155 int number_entries, len;
4156 struct sctp_association *asoc = &stcb->asoc;
4157
4158 seq = ntohl(req->request_seq);
4159
4160 /* now if its not a duplicate we process it */
4161 if (asoc->str_reset_seq_in == seq) {
4162 len = ntohs(req->ph.param_length);
4163 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
4164 /*
4165 * the sender is resetting, handle the list issue.. we must
4166 * a) verify if we can do the reset, if so no problem b) If
4167 * we can't do the reset we must copy the request. c) queue
4168 * it, and setup the data in processor to trigger it off
4169 * when needed and dequeue all the queued data.
4170 */
4171 tsn = ntohl(req->send_reset_at_tsn);
4172
4173 /* move the reset action back one */
4174 asoc->last_reset_action[1] = asoc->last_reset_action[0];
4175 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4176 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4177 } else if (trunc) {
4178 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4179 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
4180 /* we can do it now */
4181 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
4182 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4183 } else {
4184 /*
4185 * we must queue it up and thus wait for the TSN's
4186 * to arrive that are at or before tsn
4187 */
4188 struct sctp_stream_reset_list *liste;
4189 int siz;
4190
4191 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
4192 SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
4193 siz, SCTP_M_STRESET);
4194 if (liste == NULL) {
4195 /* gak out of memory */
4196 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4197 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4198 return;
4199 }
4200 liste->seq = seq;
4201 liste->tsn = tsn;
4202 liste->number_entries = number_entries;
4203 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
4204 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
4205 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
4206 }
4207 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4208 asoc->str_reset_seq_in++;
4209 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4210 /*
4211 * one seq back, just echo back last action since my
4212 * response was lost.
4213 */
4214 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4215 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4216 /*
4217 * two seq back, just echo back last action since my
4218 * response was lost.
4219 */
4220 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4221 } else {
4222 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4223 }
4224 }
4225
4226 static void
4227 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4228 struct sctp_stream_reset_add_strm *str_add)
4229 {
4230 /*
4231 * Peer is requesting to add more streams.
4232 * If its within our max-streams we will
4233 * allow it.
4234 */
4235 uint32_t num_stream, i;
4236 uint32_t seq;
4237 struct sctp_association *asoc = &stcb->asoc;
4238 struct sctp_queued_to_read *ctl, *nctl;
4239
4240 /* Get the number. */
4241 seq = ntohl(str_add->request_seq);
4242 num_stream = ntohs(str_add->number_of_streams);
4243 /* Now what would be the new total? */
4244 if (asoc->str_reset_seq_in == seq) {
4245 num_stream += stcb->asoc.streamincnt;
4246 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4247 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4248 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4249 } else if ((num_stream > stcb->asoc.max_inbound_streams) ||
4250 (num_stream > 0xffff)) {
4251 /* We must reject it they ask for to many */
4252 denied:
4253 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4254 } else {
4255 /* Ok, we can do that :-) */
4256 struct sctp_stream_in *oldstrm;
4257
4258 /* save off the old */
4259 oldstrm = stcb->asoc.strmin;
4260 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
4261 (num_stream * sizeof(struct sctp_stream_in)),
4262 SCTP_M_STRMI);
4263 if (stcb->asoc.strmin == NULL) {
4264 stcb->asoc.strmin = oldstrm;
4265 goto denied;
4266 }
4267 /* copy off the old data */
4268 for (i = 0; i < stcb->asoc.streamincnt; i++) {
4269 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4270 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4271 stcb->asoc.strmin[i].sid = i;
4272 stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
4273 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
4274 stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
4275 /* now anything on those queues? */
4276 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
4277 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
4278 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
4279 }
4280 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
4281 TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
4282 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
4283 }
4284 }
4285 /* Init the new streams */
4286 for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
4287 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4288 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4289 stcb->asoc.strmin[i].sid = i;
4290 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
4291 stcb->asoc.strmin[i].pd_api_started = 0;
4292 stcb->asoc.strmin[i].delivery_started = 0;
4293 }
4294 SCTP_FREE(oldstrm, SCTP_M_STRMI);
4295 /* update the size */
4296 stcb->asoc.streamincnt = num_stream;
4297 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4298 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4299 }
4300 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4301 asoc->str_reset_seq_in++;
4302 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4303 /*
4304 * one seq back, just echo back last action since my
4305 * response was lost.
4306 */
4307 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4308 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4309 /*
4310 * two seq back, just echo back last action since my
4311 * response was lost.
4312 */
4313 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4314 } else {
4315 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4316
4317 }
4318 }
4319
4320 static void
4321 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4322 struct sctp_stream_reset_add_strm *str_add)
4323 {
4324 /*
4325 * Peer is requesting to add more streams.
4326 * If its within our max-streams we will
4327 * allow it.
4328 */
4329 uint16_t num_stream;
4330 uint32_t seq;
4331 struct sctp_association *asoc = &stcb->asoc;
4332
4333 /* Get the number. */
4334 seq = ntohl(str_add->request_seq);
4335 num_stream = ntohs(str_add->number_of_streams);
4336 /* Now what would be the new total? */
4337 if (asoc->str_reset_seq_in == seq) {
4338 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4339 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4340 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4341 } else if (stcb->asoc.stream_reset_outstanding) {
4342 /* We must reject it we have something pending */
4343 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4344 } else {
4345 /* Ok, we can do that :-) */
4346 int mychk;
4347 mychk = stcb->asoc.streamoutcnt;
4348 mychk += num_stream;
4349 if (mychk < 0x10000) {
4350 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4351 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
4352 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4353 }
4354 } else {
4355 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4356 }
4357 }
4358 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4359 asoc->str_reset_seq_in++;
4360 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4361 /*
4362 * one seq back, just echo back last action since my
4363 * response was lost.
4364 */
4365 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4366 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4367 /*
4368 * two seq back, just echo back last action since my
4369 * response was lost.
4370 */
4371 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4372 } else {
4373 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4374 }
4375 }
4376
4377 #if !defined(__Panda__)
4378 #ifdef __GNUC__
4379 __attribute__ ((noinline))
4380 #endif
4381 #endif
4382 static int
4383 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4384 struct sctp_chunkhdr *ch_req)
4385 {
4386 uint16_t remaining_length, param_len, ptype;
4387 struct sctp_paramhdr pstore;
4388 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4389 uint32_t seq = 0;
4390 int num_req = 0;
4391 int trunc = 0;
4392 struct sctp_tmit_chunk *chk;
4393 struct sctp_chunkhdr *ch;
4394 struct sctp_paramhdr *ph;
4395 int ret_code = 0;
4396 int num_param = 0;
4397
4398 /* now it may be a reset or a reset-response */
4399 remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
4400
4401 /* setup for adding the response */
4402 sctp_alloc_a_chunk(stcb, chk);
4403 if (chk == NULL) {
4404 return (ret_code);
4405 }
4406 chk->copy_by_ref = 0;
4407 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4408 chk->rec.chunk_id.can_take_data = 0;
4409 chk->flags = 0;
4410 chk->asoc = &stcb->asoc;
4411 chk->no_fr_allowed = 0;
4412 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4413 chk->book_size_scale = 0;
4414 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4415 if (chk->data == NULL) {
4416 strres_nochunk:
4417 if (chk->data) {
4418 sctp_m_freem(chk->data);
4419 chk->data = NULL;
4420 }
4421 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4422 return (ret_code);
4423 }
4424 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4425
4426 /* setup chunk parameters */
4427 chk->sent = SCTP_DATAGRAM_UNSENT;
4428 chk->snd_count = 0;
4429 chk->whoTo = NULL;
4430
4431 ch = mtod(chk->data, struct sctp_chunkhdr *);
4432 ch->chunk_type = SCTP_STREAM_RESET;
4433 ch->chunk_flags = 0;
4434 ch->chunk_length = htons(chk->send_size);
4435 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4436 offset += sizeof(struct sctp_chunkhdr);
4437 while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4438 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4439 if (ph == NULL) {
4440 /* TSNH */
4441 break;
4442 }
4443 param_len = ntohs(ph->param_length);
4444 if ((param_len > remaining_length) ||
4445 (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4446 /* bad parameter length */
4447 break;
4448 }
4449 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4450 (uint8_t *)&cstore);
4451 if (ph == NULL) {
4452 /* TSNH */
4453 break;
4454 }
4455 ptype = ntohs(ph->param_type);
4456 num_param++;
4457 if (param_len > sizeof(cstore)) {
4458 trunc = 1;
4459 } else {
4460 trunc = 0;
4461 }
4462 if (num_param > SCTP_MAX_RESET_PARAMS) {
4463 /* hit the max of parameters already sorry.. */
4464 break;
4465 }
4466 if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4467 struct sctp_stream_reset_out_request *req_out;
4468
4469 if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4470 break;
4471 }
4472 req_out = (struct sctp_stream_reset_out_request *)ph;
4473 num_req++;
4474 if (stcb->asoc.stream_reset_outstanding) {
4475 seq = ntohl(req_out->response_seq);
4476 if (seq == stcb->asoc.str_reset_seq_out) {
4477 /* implicit ack */
4478 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4479 }
4480 }
4481 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4482 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4483 struct sctp_stream_reset_add_strm *str_add;
4484
4485 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4486 break;
4487 }
4488 str_add = (struct sctp_stream_reset_add_strm *)ph;
4489 num_req++;
4490 sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4491 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4492 struct sctp_stream_reset_add_strm *str_add;
4493
4494 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4495 break;
4496 }
4497 str_add = (struct sctp_stream_reset_add_strm *)ph;
4498 num_req++;
4499 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4500 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4501 struct sctp_stream_reset_in_request *req_in;
4502
4503 num_req++;
4504 req_in = (struct sctp_stream_reset_in_request *)ph;
4505 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4506 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4507 struct sctp_stream_reset_tsn_request *req_tsn;
4508
4509 num_req++;
4510 req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4511 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4512 ret_code = 1;
4513 goto strres_nochunk;
4514 }
4515 /* no more */
4516 break;
4517 } else if (ptype == SCTP_STR_RESET_RESPONSE) {
4518 struct sctp_stream_reset_response *resp;
4519 uint32_t result;
4520
4521 if (param_len < sizeof(struct sctp_stream_reset_response)) {
4522 break;
4523 }
4524 resp = (struct sctp_stream_reset_response *)ph;
4525 seq = ntohl(resp->response_seq);
4526 result = ntohl(resp->result);
4527 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4528 ret_code = 1;
4529 goto strres_nochunk;
4530 }
4531 } else {
4532 break;
4533 }
4534 offset += SCTP_SIZE32(param_len);
4535 if (remaining_length >= SCTP_SIZE32(param_len)) {
4536 remaining_length -= SCTP_SIZE32(param_len);
4537 } else {
4538 remaining_length = 0;
4539 }
4540 }
4541 if (num_req == 0) {
4542 /* we have no response free the stuff */
4543 goto strres_nochunk;
4544 }
4545 /* ok we have a chunk to link in */
4546 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4547 chk,
4548 sctp_next);
4549 stcb->asoc.ctrl_queue_cnt++;
4550 return (ret_code);
4551 }
4552
4553 /*
4554 * Handle a router or endpoints report of a packet loss, there are two ways
4555 * to handle this, either we get the whole packet and must disect it
4556 * ourselves (possibly with truncation and or corruption) or it is a summary
4557 * from a middle box that did the disectting for us.
4558 */
4559 static void
4560 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4561 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4562 {
4563 uint32_t bottle_bw, on_queue;
4564 uint16_t trunc_len;
4565 unsigned int chlen;
4566 unsigned int at;
4567 struct sctp_chunk_desc desc;
4568 struct sctp_chunkhdr *ch;
4569
4570 chlen = ntohs(cp->ch.chunk_length);
4571 chlen -= sizeof(struct sctp_pktdrop_chunk);
4572 /* XXX possible chlen underflow */
4573 if (chlen == 0) {
4574 ch = NULL;
4575 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4576 SCTP_STAT_INCR(sctps_pdrpbwrpt);
4577 } else {
4578 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4579 chlen -= sizeof(struct sctphdr);
4580 /* XXX possible chlen underflow */
4581 memset(&desc, 0, sizeof(desc));
4582 }
4583 trunc_len = (uint16_t) ntohs(cp->trunc_len);
4584 if (trunc_len > limit) {
4585 trunc_len = limit;
4586 }
4587
4588 /* now the chunks themselves */
4589 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4590 desc.chunk_type = ch->chunk_type;
4591 /* get amount we need to move */
4592 at = ntohs(ch->chunk_length);
4593 if (at < sizeof(struct sctp_chunkhdr)) {
4594 /* corrupt chunk, maybe at the end? */
4595 SCTP_STAT_INCR(sctps_pdrpcrupt);
4596 break;
4597 }
4598 if (trunc_len == 0) {
4599 /* we are supposed to have all of it */
4600 if (at > chlen) {
4601 /* corrupt skip it */
4602 SCTP_STAT_INCR(sctps_pdrpcrupt);
4603 break;
4604 }
4605 } else {
4606 /* is there enough of it left ? */
4607 if (desc.chunk_type == SCTP_DATA) {
4608 if (chlen < (sizeof(struct sctp_data_chunk) +
4609 sizeof(desc.data_bytes))) {
4610 break;
4611 }
4612 } else {
4613 if (chlen < sizeof(struct sctp_chunkhdr)) {
4614 break;
4615 }
4616 }
4617 }
4618 if (desc.chunk_type == SCTP_DATA) {
4619 /* can we get out the tsn? */
4620 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4621 SCTP_STAT_INCR(sctps_pdrpmbda);
4622
4623 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4624 /* yep */
4625 struct sctp_data_chunk *dcp;
4626 uint8_t *ddp;
4627 unsigned int iii;
4628
4629 dcp = (struct sctp_data_chunk *)ch;
4630 ddp = (uint8_t *) (dcp + 1);
4631 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4632 desc.data_bytes[iii] = ddp[iii];
4633 }
4634 desc.tsn_ifany = dcp->dp.tsn;
4635 } else {
4636 /* nope we are done. */
4637 SCTP_STAT_INCR(sctps_pdrpnedat);
4638 break;
4639 }
4640 } else {
4641 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4642 SCTP_STAT_INCR(sctps_pdrpmbct);
4643 }
4644
4645 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4646 SCTP_STAT_INCR(sctps_pdrppdbrk);
4647 break;
4648 }
4649 if (SCTP_SIZE32(at) > chlen) {
4650 break;
4651 }
4652 chlen -= SCTP_SIZE32(at);
4653 if (chlen < sizeof(struct sctp_chunkhdr)) {
4654 /* done, none left */
4655 break;
4656 }
4657 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4658 }
4659 /* Now update any rwnd --- possibly */
4660 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4661 /* From a peer, we get a rwnd report */
4662 uint32_t a_rwnd;
4663
4664 SCTP_STAT_INCR(sctps_pdrpfehos);
4665
4666 bottle_bw = ntohl(cp->bottle_bw);
4667 on_queue = ntohl(cp->current_onq);
4668 if (bottle_bw && on_queue) {
4669 /* a rwnd report is in here */
4670 if (bottle_bw > on_queue)
4671 a_rwnd = bottle_bw - on_queue;
4672 else
4673 a_rwnd = 0;
4674
4675 if (a_rwnd == 0)
4676 stcb->asoc.peers_rwnd = 0;
4677 else {
4678 if (a_rwnd > stcb->asoc.total_flight) {
4679 stcb->asoc.peers_rwnd =
4680 a_rwnd - stcb->asoc.total_flight;
4681 } else {
4682 stcb->asoc.peers_rwnd = 0;
4683 }
4684 if (stcb->asoc.peers_rwnd <
4685 stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4686 /* SWS sender side engages */
4687 stcb->asoc.peers_rwnd = 0;
4688 }
4689 }
4690 }
4691 } else {
4692 SCTP_STAT_INCR(sctps_pdrpfmbox);
4693 }
4694
4695 /* now middle boxes in sat networks get a cwnd bump */
4696 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4697 (stcb->asoc.sat_t3_loss_recovery == 0) &&
4698 (stcb->asoc.sat_network)) {
4699 /*
4700 * This is debatable but for sat networks it makes sense
4701 * Note if a T3 timer has went off, we will prohibit any
4702 * changes to cwnd until we exit the t3 loss recovery.
4703 */
4704 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4705 net, cp, &bottle_bw, &on_queue);
4706 }
4707 }
4708
4709 /*
4710 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4711 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4712 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4713 * length of the complete packet outputs: - length: modified to remaining
4714 * length after control processing - netp: modified to new sctp_nets after
4715 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4716 * bad packet,...) otherwise return the tcb for this packet
4717 */
4718 #if !defined(__Panda__)
4719 #ifdef __GNUC__
4720 __attribute__ ((noinline))
4721 #endif
4722 #endif
4723 static struct sctp_tcb *
4724 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4725 struct sockaddr *src, struct sockaddr *dst,
4726 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4727 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4728 #if defined(__FreeBSD__)
4729 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4730 #endif
4731 uint32_t vrf_id, uint16_t port)
4732 {
4733 struct sctp_association *asoc;
4734 struct mbuf *op_err;
4735 char msg[SCTP_DIAG_INFO_LEN];
4736 uint32_t vtag_in;
4737 int num_chunks = 0; /* number of control chunks processed */
4738 uint32_t chk_length, contiguous;
4739 int ret;
4740 int abort_no_unlock = 0;
4741 int ecne_seen = 0;
4742 /*
4743 * How big should this be, and should it be alloc'd? Lets try the
4744 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4745 * until we get into jumbo grams and such..
4746 */
4747 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4748 int got_auth = 0;
4749 uint32_t auth_offset = 0, auth_len = 0;
4750 int auth_skipped = 0;
4751 int asconf_cnt = 0;
4752 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4753 struct socket *so;
4754 #endif
4755
4756 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4757 iphlen, *offset, length, (void *)stcb);
4758
4759 if (stcb) {
4760 SCTP_TCB_LOCK_ASSERT(stcb);
4761 }
4762 /* validate chunk header length... */
4763 if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4764 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4765 ntohs(ch->chunk_length));
4766 *offset = length;
4767 return (stcb);
4768 }
4769 /*
4770 * validate the verification tag
4771 */
4772 vtag_in = ntohl(sh->v_tag);
4773
4774 if (ch->chunk_type == SCTP_INITIATION) {
4775 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4776 ntohs(ch->chunk_length), vtag_in);
4777 if (vtag_in != 0) {
4778 /* protocol error- silently discard... */
4779 SCTP_STAT_INCR(sctps_badvtag);
4780 if (stcb != NULL) {
4781 SCTP_TCB_UNLOCK(stcb);
4782 }
4783 return (NULL);
4784 }
4785 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4786 /*
4787 * If there is no stcb, skip the AUTH chunk and process
4788 * later after a stcb is found (to validate the lookup was
4789 * valid.
4790 */
4791 if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4792 (stcb == NULL) &&
4793 (inp->auth_supported == 1)) {
4794 /* save this chunk for later processing */
4795 auth_skipped = 1;
4796 auth_offset = *offset;
4797 auth_len = ntohs(ch->chunk_length);
4798
4799 /* (temporarily) move past this chunk */
4800 *offset += SCTP_SIZE32(auth_len);
4801 if (*offset >= length) {
4802 /* no more data left in the mbuf chain */
4803 *offset = length;
4804 return (NULL);
4805 }
4806 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4807 sizeof(struct sctp_chunkhdr), chunk_buf);
4808 }
4809 if (ch == NULL) {
4810 /* Help */
4811 *offset = length;
4812 return (stcb);
4813 }
4814 if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4815 goto process_control_chunks;
4816 }
4817 /*
4818 * first check if it's an ASCONF with an unknown src addr we
4819 * need to look inside to find the association
4820 */
4821 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4822 struct sctp_chunkhdr *asconf_ch = ch;
4823 uint32_t asconf_offset = 0, asconf_len = 0;
4824
4825 /* inp's refcount may be reduced */
4826 SCTP_INP_INCR_REF(inp);
4827
4828 asconf_offset = *offset;
4829 do {
4830 asconf_len = ntohs(asconf_ch->chunk_length);
4831 if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4832 break;
4833 stcb = sctp_findassociation_ep_asconf(m,
4834 *offset,
4835 dst,
4836 sh, &inp, netp, vrf_id);
4837 if (stcb != NULL)
4838 break;
4839 asconf_offset += SCTP_SIZE32(asconf_len);
4840 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4841 sizeof(struct sctp_chunkhdr), chunk_buf);
4842 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4843 if (stcb == NULL) {
4844 /*
4845 * reduce inp's refcount if not reduced in
4846 * sctp_findassociation_ep_asconf().
4847 */
4848 SCTP_INP_DECR_REF(inp);
4849 }
4850
4851 /* now go back and verify any auth chunk to be sure */
4852 if (auth_skipped && (stcb != NULL)) {
4853 struct sctp_auth_chunk *auth;
4854
4855 if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
4856 auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
4857 got_auth = 1;
4858 auth_skipped = 0;
4859 } else {
4860 auth = NULL;
4861 }
4862 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4863 auth_offset)) {
4864 /* auth HMAC failed so dump it */
4865 *offset = length;
4866 return (stcb);
4867 } else {
4868 /* remaining chunks are HMAC checked */
4869 stcb->asoc.authenticated = 1;
4870 }
4871 }
4872 }
4873 if (stcb == NULL) {
4874 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4875 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4876 msg);
4877 /* no association, so it's out of the blue... */
4878 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4879 #if defined(__FreeBSD__)
4880 mflowtype, mflowid, inp->fibnum,
4881 #endif
4882 vrf_id, port);
4883 *offset = length;
4884 return (NULL);
4885 }
4886 asoc = &stcb->asoc;
4887 /* ABORT and SHUTDOWN can use either v_tag... */
4888 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4889 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4890 (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4891 /* Take the T-bit always into account. */
4892 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4893 (vtag_in == asoc->my_vtag)) ||
4894 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4895 (asoc->peer_vtag != htonl(0)) &&
4896 (vtag_in == asoc->peer_vtag))) {
4897 /* this is valid */
4898 } else {
4899 /* drop this packet... */
4900 SCTP_STAT_INCR(sctps_badvtag);
4901 if (stcb != NULL) {
4902 SCTP_TCB_UNLOCK(stcb);
4903 }
4904 return (NULL);
4905 }
4906 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4907 if (vtag_in != asoc->my_vtag) {
4908 /*
4909 * this could be a stale SHUTDOWN-ACK or the
4910 * peer never got the SHUTDOWN-COMPLETE and
4911 * is still hung; we have started a new asoc
4912 * but it won't complete until the shutdown
4913 * is completed
4914 */
4915 if (stcb != NULL) {
4916 SCTP_TCB_UNLOCK(stcb);
4917 }
4918 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4919 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4920 msg);
4921 sctp_handle_ootb(m, iphlen, *offset, src, dst,
4922 sh, inp, op_err,
4923 #if defined(__FreeBSD__)
4924 mflowtype, mflowid, fibnum,
4925 #endif
4926 vrf_id, port);
4927 return (NULL);
4928 }
4929 } else {
4930 /* for all other chunks, vtag must match */
4931 if (vtag_in != asoc->my_vtag) {
4932 /* invalid vtag... */
4933 SCTPDBG(SCTP_DEBUG_INPUT3,
4934 "invalid vtag: %xh, expect %xh\n",
4935 vtag_in, asoc->my_vtag);
4936 SCTP_STAT_INCR(sctps_badvtag);
4937 if (stcb != NULL) {
4938 SCTP_TCB_UNLOCK(stcb);
4939 }
4940 *offset = length;
4941 return (NULL);
4942 }
4943 }
4944 } /* end if !SCTP_COOKIE_ECHO */
4945 /*
4946 * process all control chunks...
4947 */
4948 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4949 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4950 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4951 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4952 /* implied cookie-ack.. we must have lost the ack */
4953 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4954 *netp);
4955 }
4956
4957 process_control_chunks:
4958 while (IS_SCTP_CONTROL(ch)) {
4959 /* validate chunk length */
4960 chk_length = ntohs(ch->chunk_length);
4961 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4962 ch->chunk_type, chk_length);
4963 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4964 if (chk_length < sizeof(*ch) ||
4965 (*offset + (int)chk_length) > length) {
4966 *offset = length;
4967 return (stcb);
4968 }
4969 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4970 /*
4971 * INIT and INIT-ACK only gets the init ack "header" portion
4972 * only because we don't have to process the peer's COOKIE.
4973 * All others get a complete chunk.
4974 */
4975 switch (ch->chunk_type) {
4976 case SCTP_INITIATION:
4977 contiguous = sizeof(struct sctp_init_chunk);
4978 break;
4979 case SCTP_INITIATION_ACK:
4980 contiguous = sizeof(struct sctp_init_ack_chunk);
4981 break;
4982 default:
4983 contiguous = min(chk_length, sizeof(chunk_buf));
4984 break;
4985 }
4986 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4987 contiguous,
4988 chunk_buf);
4989 if (ch == NULL) {
4990 *offset = length;
4991 if (stcb != NULL) {
4992 SCTP_TCB_UNLOCK(stcb);
4993 }
4994 return (NULL);
4995 }
4996
4997 num_chunks++;
4998 /* Save off the last place we got a control from */
4999 if (stcb != NULL) {
5000 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
5001 /*
5002 * allow last_control to be NULL if
5003 * ASCONF... ASCONF processing will find the
5004 * right net later
5005 */
5006 if ((netp != NULL) && (*netp != NULL))
5007 stcb->asoc.last_control_chunk_from = *netp;
5008 }
5009 }
5010 #ifdef SCTP_AUDITING_ENABLED
5011 sctp_audit_log(0xB0, ch->chunk_type);
5012 #endif
5013
5014 /* check to see if this chunk required auth, but isn't */
5015 if ((stcb != NULL) &&
5016 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
5017 !stcb->asoc.authenticated) {
5018 /* "silently" ignore */
5019 SCTP_STAT_INCR(sctps_recvauthmissing);
5020 goto next_chunk;
5021 }
5022 switch (ch->chunk_type) {
5023 case SCTP_INITIATION:
5024 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
5025 /* The INIT chunk must be the only chunk. */
5026 if ((num_chunks > 1) ||
5027 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5028 /* RFC 4960 requires that no ABORT is sent */
5029 *offset = length;
5030 if (stcb != NULL) {
5031 SCTP_TCB_UNLOCK(stcb);
5032 }
5033 return (NULL);
5034 }
5035 /* Honor our resource limit. */
5036 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
5037 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5038 sctp_abort_association(inp, stcb, m, iphlen,
5039 src, dst, sh, op_err,
5040 #if defined(__FreeBSD__)
5041 mflowtype, mflowid,
5042 #endif
5043 vrf_id, port);
5044 *offset = length;
5045 return (NULL);
5046 }
5047 sctp_handle_init(m, iphlen, *offset, src, dst, sh,
5048 (struct sctp_init_chunk *)ch, inp,
5049 stcb, *netp, &abort_no_unlock,
5050 #if defined(__FreeBSD__)
5051 mflowtype, mflowid,
5052 #endif
5053 vrf_id, port);
5054 *offset = length;
5055 if ((!abort_no_unlock) && (stcb != NULL)) {
5056 SCTP_TCB_UNLOCK(stcb);
5057 }
5058 return (NULL);
5059 break;
5060 case SCTP_PAD_CHUNK:
5061 break;
5062 case SCTP_INITIATION_ACK:
5063 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
5064 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5065 /* We are not interested anymore */
5066 if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
5067 ;
5068 } else {
5069 *offset = length;
5070 if (stcb != NULL) {
5071 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5072 so = SCTP_INP_SO(inp);
5073 atomic_add_int(&stcb->asoc.refcnt, 1);
5074 SCTP_TCB_UNLOCK(stcb);
5075 SCTP_SOCKET_LOCK(so, 1);
5076 SCTP_TCB_LOCK(stcb);
5077 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5078 #endif
5079 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5080 SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5081 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5082 SCTP_SOCKET_UNLOCK(so, 1);
5083 #endif
5084 }
5085 return (NULL);
5086 }
5087 }
5088 /* The INIT-ACK chunk must be the only chunk. */
5089 if ((num_chunks > 1) ||
5090 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5091 *offset = length;
5092 return (stcb);
5093 }
5094 if ((netp != NULL) && (*netp != NULL)) {
5095 ret = sctp_handle_init_ack(m, iphlen, *offset,
5096 src, dst, sh,
5097 (struct sctp_init_ack_chunk *)ch,
5098 stcb, *netp,
5099 &abort_no_unlock,
5100 #if defined(__FreeBSD__)
5101 mflowtype, mflowid,
5102 #endif
5103 vrf_id);
5104 } else {
5105 ret = -1;
5106 }
5107 *offset = length;
5108 if (abort_no_unlock) {
5109 return (NULL);
5110 }
5111 /*
5112 * Special case, I must call the output routine to
5113 * get the cookie echoed
5114 */
5115 if ((stcb != NULL) && (ret == 0)) {
5116 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5117 }
5118 return (stcb);
5119 break;
5120 case SCTP_SELECTIVE_ACK:
5121 case SCTP_NR_SELECTIVE_ACK:
5122 {
5123 int abort_now = 0;
5124 uint32_t a_rwnd, cum_ack;
5125 uint16_t num_seg, num_nr_seg, num_dup;
5126 uint8_t flags;
5127 int offset_seg, offset_dup;
5128
5129 SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
5130 ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
5131 SCTP_STAT_INCR(sctps_recvsacks);
5132 if (stcb == NULL) {
5133 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
5134 (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
5135 break;
5136 }
5137 if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
5138 if (chk_length < sizeof(struct sctp_sack_chunk)) {
5139 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
5140 break;
5141 }
5142 } else {
5143 if (stcb->asoc.nrsack_supported == 0) {
5144 goto unknown_chunk;
5145 }
5146 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
5147 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
5148 break;
5149 }
5150 }
5151 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
5152 /*-
5153 * If we have sent a shutdown-ack, we will pay no
5154 * attention to a sack sent in to us since
5155 * we don't care anymore.
5156 */
5157 break;
5158 }
5159 flags = ch->chunk_flags;
5160 if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
5161 struct sctp_sack_chunk *sack;
5162
5163 sack = (struct sctp_sack_chunk *)ch;
5164 cum_ack = ntohl(sack->sack.cum_tsn_ack);
5165 num_seg = ntohs(sack->sack.num_gap_ack_blks);
5166 num_nr_seg = 0;
5167 num_dup = ntohs(sack->sack.num_dup_tsns);
5168 a_rwnd = ntohl(sack->sack.a_rwnd);
5169 if (sizeof(struct sctp_sack_chunk) +
5170 num_seg * sizeof(struct sctp_gap_ack_block) +
5171 num_dup * sizeof(uint32_t) != chk_length) {
5172 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
5173 break;
5174 }
5175 offset_seg = *offset + sizeof(struct sctp_sack_chunk);
5176 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
5177 } else {
5178 struct sctp_nr_sack_chunk *nr_sack;
5179
5180 nr_sack = (struct sctp_nr_sack_chunk *)ch;
5181 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
5182 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
5183 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
5184 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
5185 a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
5186 if (sizeof(struct sctp_nr_sack_chunk) +
5187 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
5188 num_dup * sizeof(uint32_t) != chk_length) {
5189 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
5190 break;
5191 }
5192 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
5193 offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
5194 }
5195 SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5196 (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
5197 cum_ack, num_seg, a_rwnd);
5198 stcb->asoc.seen_a_sack_this_pkt = 1;
5199 if ((stcb->asoc.pr_sctp_cnt == 0) &&
5200 (num_seg == 0) && (num_nr_seg == 0) &&
5201 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5202 (stcb->asoc.saw_sack_with_frags == 0) &&
5203 (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5204 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
5205 /*
5206 * We have a SIMPLE sack having no
5207 * prior segments and data on sent
5208 * queue to be acked. Use the
5209 * faster path sack processing. We
5210 * also allow window update sacks
5211 * with no missing segments to go
5212 * this way too.
5213 */
5214 sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
5215 &abort_now, ecne_seen);
5216 } else {
5217 if ((netp != NULL) && (*netp != NULL)) {
5218 sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5219 num_seg, num_nr_seg, num_dup, &abort_now, flags,
5220 cum_ack, a_rwnd, ecne_seen);
5221 }
5222 }
5223 if (abort_now) {
5224 /* ABORT signal from sack processing */
5225 *offset = length;
5226 return (NULL);
5227 }
5228 if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5229 TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5230 (stcb->asoc.stream_queue_cnt == 0)) {
5231 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
5232 }
5233 break;
5234 }
5235 case SCTP_HEARTBEAT_REQUEST:
5236 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5237 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5238 SCTP_STAT_INCR(sctps_recvheartbeat);
5239 sctp_send_heartbeat_ack(stcb, m, *offset,
5240 chk_length, *netp);
5241 }
5242 break;
5243 case SCTP_HEARTBEAT_ACK:
5244 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
5245 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5246 /* Its not ours */
5247 *offset = length;
5248 return (stcb);
5249 }
5250 SCTP_STAT_INCR(sctps_recvheartbeatack);
5251 if ((netp != NULL) && (*netp != NULL)) {
5252 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5253 stcb, *netp);
5254 }
5255 break;
5256 case SCTP_ABORT_ASSOCIATION:
5257 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5258 (void *)stcb);
5259 *offset = length;
5260 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5261 if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
5262 return (NULL);
5263 } else {
5264 return (stcb);
5265 }
5266 } else {
5267 return (NULL);
5268 }
5269 break;
5270 case SCTP_SHUTDOWN:
5271 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5272 (void *)stcb);
5273 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5274 *offset = length;
5275 return (stcb);
5276 }
5277 if ((netp != NULL) && (*netp != NULL)) {
5278 int abort_flag = 0;
5279
5280 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5281 stcb, *netp, &abort_flag);
5282 if (abort_flag) {
5283 *offset = length;
5284 return (NULL);
5285 }
5286 }
5287 break;
5288 case SCTP_SHUTDOWN_ACK:
5289 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
5290 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5291 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5292 }
5293 *offset = length;
5294 return (NULL);
5295 break;
5296 case SCTP_OPERATION_ERROR:
5297 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
5298 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
5299 sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
5300 *offset = length;
5301 return (NULL);
5302 }
5303 break;
5304 case SCTP_COOKIE_ECHO:
5305 SCTPDBG(SCTP_DEBUG_INPUT3,
5306 "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
5307 if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
5308 ;
5309 } else {
5310 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5311 /* We are not interested anymore */
5312 abend:
5313 if (stcb != NULL) {
5314 SCTP_TCB_UNLOCK(stcb);
5315 }
5316 *offset = length;
5317 return (NULL);
5318 }
5319 }
5320 /*-
5321 * First are we accepting? We do this again here
5322 * since it is possible that a previous endpoint WAS
5323 * listening responded to a INIT-ACK and then
5324 * closed. We opened and bound.. and are now no
5325 * longer listening.
5326 *
5327 * XXXGL: notes on checking listen queue length.
5328 * 1) SCTP_IS_LISTENING() doesn't necessarily mean
5329 * SOLISTENING(), because a listening "UDP type"
5330 * socket isn't listening in terms of the socket
5331 * layer. It is a normal data flow socket, that
5332 * can fork off new connections. Thus, we should
5333 * look into sol_qlen only in case we are !UDP.
5334 * 2) Checking sol_qlen in general requires locking
5335 * the socket, and this code lacks that.
5336 */
5337 if ((stcb == NULL) &&
5338 (!SCTP_IS_LISTENING(inp) ||
5339 (!(inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
5340 #if defined(__FreeBSD__) && __FreeBSD_version >= 1200034
5341 inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) {
5342 #else
5343 inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) {
5344 #endif
5345 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5346 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5347 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5348 sctp_abort_association(inp, stcb, m, iphlen,
5349 src, dst, sh, op_err,
5350 #if defined(__FreeBSD__)
5351 mflowtype, mflowid,
5352 #endif
5353 vrf_id, port);
5354 }
5355 *offset = length;
5356 return (NULL);
5357 } else {
5358 struct mbuf *ret_buf;
5359 struct sctp_inpcb *linp;
5360 struct sctp_tmit_chunk *chk;
5361
5362 if (stcb) {
5363 linp = NULL;
5364 } else {
5365 linp = inp;
5366 }
5367
5368 if (linp != NULL) {
5369 SCTP_ASOC_CREATE_LOCK(linp);
5370 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5371 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5372 SCTP_ASOC_CREATE_UNLOCK(linp);
5373 goto abend;
5374 }
5375 }
5376
5377 if (netp != NULL) {
5378 struct sctp_tcb *locked_stcb;
5379
5380 locked_stcb = stcb;
5381 ret_buf =
5382 sctp_handle_cookie_echo(m, iphlen,
5383 *offset,
5384 src, dst,
5385 sh,
5386 (struct sctp_cookie_echo_chunk *)ch,
5387 &inp, &stcb, netp,
5388 auth_skipped,
5389 auth_offset,
5390 auth_len,
5391 &locked_stcb,
5392 #if defined(__FreeBSD__)
5393 mflowtype,
5394 mflowid,
5395 #endif
5396 vrf_id,
5397 port);
5398 if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
5399 SCTP_TCB_UNLOCK(locked_stcb);
5400 }
5401 if (stcb != NULL) {
5402 SCTP_TCB_LOCK_ASSERT(stcb);
5403 }
5404 } else {
5405 ret_buf = NULL;
5406 }
5407 if (linp != NULL) {
5408 SCTP_ASOC_CREATE_UNLOCK(linp);
5409 }
5410 if (ret_buf == NULL) {
5411 if (stcb != NULL) {
5412 SCTP_TCB_UNLOCK(stcb);
5413 }
5414 SCTPDBG(SCTP_DEBUG_INPUT3,
5415 "GAK, null buffer\n");
5416 *offset = length;
5417 return (NULL);
5418 }
5419 /* if AUTH skipped, see if it verified... */
5420 if (auth_skipped) {
5421 got_auth = 1;
5422 auth_skipped = 0;
5423 }
5424 /* Restart the timer if we have pending data */
5425 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
5426 if (chk->whoTo != NULL) {
5427 break;
5428 }
5429 }
5430 if (chk != NULL) {
5431 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5432 }
5433 }
5434 break;
5435 case SCTP_COOKIE_ACK:
5436 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
5437 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5438 return (stcb);
5439 }
5440 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5441 /* We are not interested anymore */
5442 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5443 ;
5444 } else if (stcb) {
5445 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5446 so = SCTP_INP_SO(inp);
5447 atomic_add_int(&stcb->asoc.refcnt, 1);
5448 SCTP_TCB_UNLOCK(stcb);
5449 SCTP_SOCKET_LOCK(so, 1);
5450 SCTP_TCB_LOCK(stcb);
5451 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5452 #endif
5453 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5454 SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5455 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5456 SCTP_SOCKET_UNLOCK(so, 1);
5457 #endif
5458 *offset = length;
5459 return (NULL);
5460 }
5461 }
5462 if ((netp != NULL) && (*netp != NULL)) {
5463 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5464 }
5465 break;
5466 case SCTP_ECN_ECHO:
5467 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
5468 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5469 /* Its not ours */
5470 *offset = length;
5471 return (stcb);
5472 }
5473 if (stcb->asoc.ecn_supported == 0) {
5474 goto unknown_chunk;
5475 }
5476 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
5477 ecne_seen = 1;
5478 break;
5479 case SCTP_ECN_CWR:
5480 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
5481 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5482 *offset = length;
5483 return (stcb);
5484 }
5485 if (stcb->asoc.ecn_supported == 0) {
5486 goto unknown_chunk;
5487 }
5488 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5489 break;
5490 case SCTP_SHUTDOWN_COMPLETE:
5491 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
5492 /* must be first and only chunk */
5493 if ((num_chunks > 1) ||
5494 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5495 *offset = length;
5496 return (stcb);
5497 }
5498 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5499 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5500 stcb, *netp);
5501 }
5502 *offset = length;
5503 return (NULL);
5504 break;
5505 case SCTP_ASCONF:
5506 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5507 if (stcb != NULL) {
5508 if (stcb->asoc.asconf_supported == 0) {
5509 goto unknown_chunk;
5510 }
5511 sctp_handle_asconf(m, *offset, src,
5512 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5513 asconf_cnt++;
5514 }
5515 break;
5516 case SCTP_ASCONF_ACK:
5517 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
5518 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5519 /* Its not ours */
5520 *offset = length;
5521 return (stcb);
5522 }
5523 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5524 if (stcb->asoc.asconf_supported == 0) {
5525 goto unknown_chunk;
5526 }
5527 /* He's alive so give him credit */
5528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5529 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5530 stcb->asoc.overall_error_count,
5531 0,
5532 SCTP_FROM_SCTP_INPUT,
5533 __LINE__);
5534 }
5535 stcb->asoc.overall_error_count = 0;
5536 sctp_handle_asconf_ack(m, *offset,
5537 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5538 if (abort_no_unlock)
5539 return (NULL);
5540 }
5541 break;
5542 case SCTP_FORWARD_CUM_TSN:
5543 case SCTP_IFORWARD_CUM_TSN:
5544 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD_TSN\n");
5545 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5546 /* Its not ours */
5547 *offset = length;
5548 return (stcb);
5549 }
5550
5551 if (stcb != NULL) {
5552 int abort_flag = 0;
5553
5554 if (stcb->asoc.prsctp_supported == 0) {
5555 goto unknown_chunk;
5556 }
5557 *fwd_tsn_seen = 1;
5558 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5559 /* We are not interested anymore */
5560 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5561 so = SCTP_INP_SO(inp);
5562 atomic_add_int(&stcb->asoc.refcnt, 1);
5563 SCTP_TCB_UNLOCK(stcb);
5564 SCTP_SOCKET_LOCK(so, 1);
5565 SCTP_TCB_LOCK(stcb);
5566 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5567 #endif
5568 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5569 SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
5570 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5571 SCTP_SOCKET_UNLOCK(so, 1);
5572 #endif
5573 *offset = length;
5574 return (NULL);
5575 }
5576 /*
5577 * For sending a SACK this looks like DATA
5578 * chunks.
5579 */
5580 stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
5581 sctp_handle_forward_tsn(stcb,
5582 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5583 if (abort_flag) {
5584 *offset = length;
5585 return (NULL);
5586 }
5587 }
5588 break;
5589 case SCTP_STREAM_RESET:
5590 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5591 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5592 /* Its not ours */
5593 *offset = length;
5594 return (stcb);
5595 }
5596 if (stcb->asoc.reconfig_supported == 0) {
5597 goto unknown_chunk;
5598 }
5599 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5600 /* stop processing */
5601 *offset = length;
5602 return (NULL);
5603 }
5604 break;
5605 case SCTP_PACKET_DROPPED:
5606 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5607 /* re-get it all please */
5608 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5609 /* Its not ours */
5610 *offset = length;
5611 return (stcb);
5612 }
5613
5614 if ((ch != NULL) && (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5615 if (stcb->asoc.pktdrop_supported == 0) {
5616 goto unknown_chunk;
5617 }
5618 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5619 stcb, *netp,
5620 min(chk_length, contiguous));
5621 }
5622 break;
5623 case SCTP_AUTHENTICATION:
5624 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5625 if (stcb == NULL) {
5626 /* save the first AUTH for later processing */
5627 if (auth_skipped == 0) {
5628 auth_offset = *offset;
5629 auth_len = chk_length;
5630 auth_skipped = 1;
5631 }
5632 /* skip this chunk (temporarily) */
5633 goto next_chunk;
5634 }
5635 if (stcb->asoc.auth_supported == 0) {
5636 goto unknown_chunk;
5637 }
5638 if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5639 (chk_length > (sizeof(struct sctp_auth_chunk) +
5640 SCTP_AUTH_DIGEST_LEN_MAX))) {
5641 /* Its not ours */
5642 *offset = length;
5643 return (stcb);
5644 }
5645 if (got_auth == 1) {
5646 /* skip this chunk... it's already auth'd */
5647 goto next_chunk;
5648 }
5649 got_auth = 1;
5650 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5651 m, *offset)) {
5652 /* auth HMAC failed so dump the packet */
5653 *offset = length;
5654 return (stcb);
5655 } else {
5656 /* remaining chunks are HMAC checked */
5657 stcb->asoc.authenticated = 1;
5658 }
5659 break;
5660
5661 default:
5662 unknown_chunk:
5663 /* it's an unknown chunk! */
5664 if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5665 struct sctp_gen_error_cause *cause;
5666 int len;
5667
5668 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
5669 0, M_NOWAIT, 1, MT_DATA);
5670 if (op_err != NULL) {
5671 len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
5672 cause = mtod(op_err, struct sctp_gen_error_cause *);
5673 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5674 cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
5675 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5676 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
5677 if (SCTP_BUF_NEXT(op_err) != NULL) {
5678 #ifdef SCTP_MBUF_LOGGING
5679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5680 sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
5681 }
5682 #endif
5683 sctp_queue_op_err(stcb, op_err);
5684 } else {
5685 sctp_m_freem(op_err);
5686 }
5687 }
5688 }
5689 if ((ch->chunk_type & 0x80) == 0) {
5690 /* discard this packet */
5691 *offset = length;
5692 return (stcb);
5693 } /* else skip this bad chunk and continue... */
5694 break;
5695 } /* switch (ch->chunk_type) */
5696
5697
5698 next_chunk:
5699 /* get the next chunk */
5700 *offset += SCTP_SIZE32(chk_length);
5701 if (*offset >= length) {
5702 /* no more data left in the mbuf chain */
5703 break;
5704 }
5705 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5706 sizeof(struct sctp_chunkhdr), chunk_buf);
5707 if (ch == NULL) {
5708 *offset = length;
5709 return (stcb);
5710 }
5711 } /* while */
5712
5713 if ((asconf_cnt > 0) && (stcb != NULL)) {
5714 sctp_send_asconf_ack(stcb);
5715 }
5716 return (stcb);
5717 }
5718
5719
5720 /*
5721 * common input chunk processing (v4 and v6)
5722 */
5723 void
5724 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5725 struct sockaddr *src, struct sockaddr *dst,
5726 struct sctphdr *sh, struct sctp_chunkhdr *ch,
5727 uint8_t compute_crc,
5728 uint8_t ecn_bits,
5729 #if defined(__FreeBSD__)
5730 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
5731 #endif
5732 uint32_t vrf_id, uint16_t port)
5733 {
5734 uint32_t high_tsn;
5735 int fwd_tsn_seen = 0, data_processed = 0;
5736 struct mbuf *m = *mm, *op_err;
5737 char msg[SCTP_DIAG_INFO_LEN];
5738 int un_sent;
5739 int cnt_ctrl_ready = 0;
5740 struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5741 struct sctp_tcb *stcb = NULL;
5742 struct sctp_nets *net = NULL;
5743 #if defined(__Userspace__)
5744 struct socket *upcall_socket = NULL;
5745 #endif
5746
5747 SCTP_STAT_INCR(sctps_recvdatagrams);
5748 #ifdef SCTP_AUDITING_ENABLED
5749 sctp_audit_log(0xE0, 1);
5750 sctp_auditing(0, inp, stcb, net);
5751 #endif
5752 if (compute_crc != 0) {
5753 uint32_t check, calc_check;
5754
5755 check = sh->checksum;
5756 sh->checksum = 0;
5757 calc_check = sctp_calculate_cksum(m, iphlen);
5758 sh->checksum = check;
5759 if (calc_check != check) {
5760 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
5761 calc_check, check, (void *)m, length, iphlen);
5762 stcb = sctp_findassociation_addr(m, offset, src, dst,
5763 sh, ch, &inp, &net, vrf_id);
5764 #if defined(INET) || defined(INET6)
5765 if ((ch->chunk_type != SCTP_INITIATION) &&
5766 (net != NULL) && (net->port != port)) {
5767 if (net->port == 0) {
5768 /* UDP encapsulation turned on. */
5769 net->mtu -= sizeof(struct udphdr);
5770 if (stcb->asoc.smallest_mtu > net->mtu) {
5771 sctp_pathmtu_adjustment(stcb, net->mtu);
5772 }
5773 } else if (port == 0) {
5774 /* UDP encapsulation turned off. */
5775 net->mtu += sizeof(struct udphdr);
5776 /* XXX Update smallest_mtu */
5777 }
5778 net->port = port;
5779 }
5780 #endif
5781 #if defined(__FreeBSD__)
5782 if (net != NULL) {
5783 net->flowtype = mflowtype;
5784 net->flowid = mflowid;
5785 }
5786 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5787 #endif
5788 if ((inp != NULL) && (stcb != NULL)) {
5789 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5790 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5791 } else if ((inp != NULL) && (stcb == NULL)) {
5792 inp_decr = inp;
5793 }
5794 SCTP_STAT_INCR(sctps_badsum);
5795 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5796 goto out;
5797 }
5798 }
5799 /* Destination port of 0 is illegal, based on RFC4960. */
5800 if (sh->dest_port == 0) {
5801 SCTP_STAT_INCR(sctps_hdrops);
5802 goto out;
5803 }
5804 stcb = sctp_findassociation_addr(m, offset, src, dst,
5805 sh, ch, &inp, &net, vrf_id);
5806 #if defined(INET) || defined(INET6)
5807 if ((ch->chunk_type != SCTP_INITIATION) &&
5808 (net != NULL) && (net->port != port)) {
5809 if (net->port == 0) {
5810 /* UDP encapsulation turned on. */
5811 net->mtu -= sizeof(struct udphdr);
5812 if (stcb->asoc.smallest_mtu > net->mtu) {
5813 sctp_pathmtu_adjustment(stcb, net->mtu);
5814 }
5815 } else if (port == 0) {
5816 /* UDP encapsulation turned off. */
5817 net->mtu += sizeof(struct udphdr);
5818 /* XXX Update smallest_mtu */
5819 }
5820 net->port = port;
5821 }
5822 #endif
5823 #if defined(__FreeBSD__)
5824 if (net != NULL) {
5825 net->flowtype = mflowtype;
5826 net->flowid = mflowid;
5827 }
5828 #endif
5829 if (inp == NULL) {
5830 #if defined(__FreeBSD__)
5831 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5832 #endif
5833 SCTP_STAT_INCR(sctps_noport);
5834 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
5835 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5836 goto out;
5837 }
5838 #endif
5839 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5840 sctp_send_shutdown_complete2(src, dst, sh,
5841 #if defined(__FreeBSD__)
5842 mflowtype, mflowid, fibnum,
5843 #endif
5844 vrf_id, port);
5845 goto out;
5846 }
5847 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5848 goto out;
5849 }
5850 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5851 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5852 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5853 (ch->chunk_type != SCTP_INIT))) {
5854 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5855 "Out of the blue");
5856 sctp_send_abort(m, iphlen, src, dst,
5857 sh, 0, op_err,
5858 #if defined(__FreeBSD__)
5859 mflowtype, mflowid, fibnum,
5860 #endif
5861 vrf_id, port);
5862 }
5863 }
5864 goto out;
5865 } else if (stcb == NULL) {
5866 inp_decr = inp;
5867 }
5868 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5869 (void *)m, iphlen, offset, length, (void *)stcb);
5870 if (stcb) {
5871 /* always clear this before beginning a packet */
5872 stcb->asoc.authenticated = 0;
5873 stcb->asoc.seen_a_sack_this_pkt = 0;
5874 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5875 (void *)stcb, stcb->asoc.state);
5876
5877 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5878 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5879 /*-
5880 * If we hit here, we had a ref count
5881 * up when the assoc was aborted and the
5882 * timer is clearing out the assoc, we should
5883 * NOT respond to any packet.. its OOTB.
5884 */
5885 SCTP_TCB_UNLOCK(stcb);
5886 stcb = NULL;
5887 #if defined(__FreeBSD__)
5888 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5889 #endif
5890 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5891 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5892 msg);
5893 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5894 #if defined(__FreeBSD__)
5895 mflowtype, mflowid, inp->fibnum,
5896 #endif
5897 vrf_id, port);
5898 goto out;
5899 }
5900 }
5901 #if defined(__Userspace__)
5902 if ((stcb != NULL) &&
5903 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
5904 (stcb->sctp_socket != NULL)) {
5905 if (stcb->sctp_socket->so_head != NULL) {
5906 upcall_socket = stcb->sctp_socket->so_head;
5907 } else {
5908 upcall_socket = stcb->sctp_socket;
5909 }
5910 SOCK_LOCK(upcall_socket);
5911 soref(upcall_socket);
5912 SOCK_UNLOCK(upcall_socket);
5913 }
5914 #endif
5915 if (IS_SCTP_CONTROL(ch)) {
5916 /* process the control portion of the SCTP packet */
5917 /* sa_ignore NO_NULL_CHK */
5918 stcb = sctp_process_control(m, iphlen, &offset, length,
5919 src, dst, sh, ch,
5920 inp, stcb, &net, &fwd_tsn_seen,
5921 #if defined(__FreeBSD__)
5922 mflowtype, mflowid, fibnum,
5923 #endif
5924 vrf_id, port);
5925 if (stcb) {
5926 /* This covers us if the cookie-echo was there
5927 * and it changes our INP.
5928 */
5929 inp = stcb->sctp_ep;
5930 #if defined(INET) || defined(INET6)
5931 if ((ch->chunk_type != SCTP_INITIATION) &&
5932 (net != NULL) && (net->port != port)) {
5933 if (net->port == 0) {
5934 /* UDP encapsulation turned on. */
5935 net->mtu -= sizeof(struct udphdr);
5936 if (stcb->asoc.smallest_mtu > net->mtu) {
5937 sctp_pathmtu_adjustment(stcb, net->mtu);
5938 }
5939 } else if (port == 0) {
5940 /* UDP encapsulation turned off. */
5941 net->mtu += sizeof(struct udphdr);
5942 /* XXX Update smallest_mtu */
5943 }
5944 net->port = port;
5945 }
5946 #endif
5947 }
5948 } else {
5949 /*
5950 * no control chunks, so pre-process DATA chunks (these
5951 * checks are taken care of by control processing)
5952 */
5953
5954 /*
5955 * if DATA only packet, and auth is required, then punt...
5956 * can't have authenticated without any AUTH (control)
5957 * chunks
5958 */
5959 if ((stcb != NULL) &&
5960 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5961 /* "silently" ignore */
5962 #if defined(__FreeBSD__)
5963 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5964 #endif
5965 SCTP_STAT_INCR(sctps_recvauthmissing);
5966 goto out;
5967 }
5968 if (stcb == NULL) {
5969 /* out of the blue DATA chunk */
5970 #if defined(__FreeBSD__)
5971 SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh);
5972 #endif
5973 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5974 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5975 msg);
5976 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5977 #if defined(__FreeBSD__)
5978 mflowtype, mflowid, fibnum,
5979 #endif
5980 vrf_id, port);
5981 goto out;
5982 }
5983 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5984 /* v_tag mismatch! */
5985 #if defined(__FreeBSD__)
5986 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5987 #endif
5988 SCTP_STAT_INCR(sctps_badvtag);
5989 goto out;
5990 }
5991 }
5992
5993 #if defined(__FreeBSD__)
5994 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5995 #endif
5996 if (stcb == NULL) {
5997 /*
5998 * no valid TCB for this packet, or we found it's a bad
5999 * packet while processing control, or we're done with this
6000 * packet (done or skip rest of data), so we drop it...
6001 */
6002 goto out;
6003 }
6004 #if defined(__Userspace__)
6005 if ((upcall_socket == NULL) &&
6006 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
6007 (stcb->sctp_socket != NULL)) {
6008 if (stcb->sctp_socket->so_head != NULL) {
6009 upcall_socket = stcb->sctp_socket->so_head;
6010 } else {
6011 upcall_socket = stcb->sctp_socket;
6012 }
6013 SOCK_LOCK(upcall_socket);
6014 soref(upcall_socket);
6015 SOCK_UNLOCK(upcall_socket);
6016 }
6017 #endif
6018
6019 /*
6020 * DATA chunk processing
6021 */
6022 /* plow through the data chunks while length > offset */
6023
6024 /*
6025 * Rest should be DATA only. Check authentication state if AUTH for
6026 * DATA is required.
6027 */
6028 if ((length > offset) &&
6029 (stcb != NULL) &&
6030 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
6031 !stcb->asoc.authenticated) {
6032 /* "silently" ignore */
6033 SCTP_STAT_INCR(sctps_recvauthmissing);
6034 SCTPDBG(SCTP_DEBUG_AUTH1,
6035 "Data chunk requires AUTH, skipped\n");
6036 goto trigger_send;
6037 }
6038 if (length > offset) {
6039 int retval;
6040
6041 /*
6042 * First check to make sure our state is correct. We would
6043 * not get here unless we really did have a tag, so we don't
6044 * abort if this happens, just dump the chunk silently.
6045 */
6046 switch (SCTP_GET_STATE(stcb)) {
6047 case SCTP_STATE_COOKIE_ECHOED:
6048 /*
6049 * we consider data with valid tags in this state
6050 * shows us the cookie-ack was lost. Imply it was
6051 * there.
6052 */
6053 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
6054 break;
6055 case SCTP_STATE_COOKIE_WAIT:
6056 /*
6057 * We consider OOTB any data sent during asoc setup.
6058 */
6059 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6060 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6061 msg);
6062 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6063 #if defined(__FreeBSD__)
6064 mflowtype, mflowid, inp->fibnum,
6065 #endif
6066 vrf_id, port);
6067 goto out;
6068 /*sa_ignore NOTREACHED*/
6069 break;
6070 case SCTP_STATE_EMPTY: /* should not happen */
6071 case SCTP_STATE_INUSE: /* should not happen */
6072 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
6073 case SCTP_STATE_SHUTDOWN_ACK_SENT:
6074 default:
6075 goto out;
6076 /*sa_ignore NOTREACHED*/
6077 break;
6078 case SCTP_STATE_OPEN:
6079 case SCTP_STATE_SHUTDOWN_SENT:
6080 break;
6081 }
6082 /* plow through the data chunks while length > offset */
6083 retval = sctp_process_data(mm, iphlen, &offset, length,
6084 inp, stcb, net, &high_tsn);
6085 if (retval == 2) {
6086 /*
6087 * The association aborted, NO UNLOCK needed since
6088 * the association is destroyed.
6089 */
6090 stcb = NULL;
6091 goto out;
6092 }
6093 data_processed = 1;
6094 /*
6095 * Anything important needs to have been m_copy'ed in
6096 * process_data
6097 */
6098 }
6099
6100 /* take care of ecn */
6101 if ((data_processed == 1) &&
6102 (stcb->asoc.ecn_supported == 1) &&
6103 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
6104 /* Yep, we need to add a ECNE */
6105 sctp_send_ecn_echo(stcb, net, high_tsn);
6106 }
6107
6108 if ((data_processed == 0) && (fwd_tsn_seen)) {
6109 int was_a_gap;
6110 uint32_t highest_tsn;
6111
6112 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
6113 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
6114 } else {
6115 highest_tsn = stcb->asoc.highest_tsn_inside_map;
6116 }
6117 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
6118 stcb->asoc.send_sack = 1;
6119 sctp_sack_check(stcb, was_a_gap);
6120 } else if (fwd_tsn_seen) {
6121 stcb->asoc.send_sack = 1;
6122 }
6123 /* trigger send of any chunks in queue... */
6124 trigger_send:
6125 #ifdef SCTP_AUDITING_ENABLED
6126 sctp_audit_log(0xE0, 2);
6127 sctp_auditing(1, inp, stcb, net);
6128 #endif
6129 SCTPDBG(SCTP_DEBUG_INPUT1,
6130 "Check for chunk output prw:%d tqe:%d tf=%d\n",
6131 stcb->asoc.peers_rwnd,
6132 TAILQ_EMPTY(&stcb->asoc.control_send_queue),
6133 stcb->asoc.total_flight);
6134 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
6135 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
6136 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
6137 }
6138 if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
6139 cnt_ctrl_ready ||
6140 stcb->asoc.trigger_reset ||
6141 ((un_sent) &&
6142 (stcb->asoc.peers_rwnd > 0 ||
6143 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
6144 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
6145 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
6146 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
6147 }
6148 #ifdef SCTP_AUDITING_ENABLED
6149 sctp_audit_log(0xE0, 3);
6150 sctp_auditing(2, inp, stcb, net);
6151 #endif
6152 out:
6153 if (stcb != NULL) {
6154 SCTP_TCB_UNLOCK(stcb);
6155 }
6156 #if defined(__Userspace__)
6157 if (upcall_socket != NULL) {
6158 if (upcall_socket->so_upcall != NULL) {
6159 if (soreadable(upcall_socket) ||
6160 sowriteable(upcall_socket) ||
6161 upcall_socket->so_error) {
6162 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
6163 }
6164 }
6165 ACCEPT_LOCK();
6166 SOCK_LOCK(upcall_socket);
6167 sorele(upcall_socket);
6168 }
6169 #endif
6170 if (inp_decr != NULL) {
6171 /* reduce ref-count */
6172 SCTP_INP_WLOCK(inp_decr);
6173 SCTP_INP_DECR_REF(inp_decr);
6174 SCTP_INP_WUNLOCK(inp_decr);
6175 }
6176 return;
6177 }
6178
6179 #ifdef INET
6180 #if !defined(__Userspace__)
6181 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
6182 void
6183 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
6184 #elif defined(__Panda__)
6185 void
6186 sctp_input(pakhandle_type i_pak)
6187 #else
6188 void
6189 #if __STDC__
6190 sctp_input(struct mbuf *i_pak,...)
6191 #else
6192 sctp_input(i_pak, va_alist)
6193 struct mbuf *i_pak;
6194 #endif
6195 #endif
6196 {
6197 struct mbuf *m;
6198 int iphlen;
6199 uint32_t vrf_id = 0;
6200 uint8_t ecn_bits;
6201 struct sockaddr_in src, dst;
6202 struct ip *ip;
6203 struct sctphdr *sh;
6204 struct sctp_chunkhdr *ch;
6205 int length, offset;
6206 uint8_t compute_crc;
6207 #if defined(__FreeBSD__)
6208 uint32_t mflowid;
6209 uint8_t mflowtype;
6210 uint16_t fibnum;
6211 #endif
6212 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__))
6213 uint16_t port = 0;
6214 #endif
6215
6216 #if defined(__Panda__)
6217 /* This is Evil, but its the only way to make panda work right. */
6218 iphlen = sizeof(struct ip);
6219 #else
6220 iphlen = off;
6221 #endif
6222 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6223 SCTP_RELEASE_PKT(i_pak);
6224 return;
6225 }
6226 m = SCTP_HEADER_TO_CHAIN(i_pak);
6227 #ifdef __Panda__
6228 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
6229 (void)SCTP_RELEASE_HEADER(i_pak);
6230 #endif
6231 #ifdef SCTP_MBUF_LOGGING
6232 /* Log in any input mbufs */
6233 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6234 sctp_log_mbc(m, SCTP_MBUF_INPUT);
6235 }
6236 #endif
6237 #ifdef SCTP_PACKET_LOGGING
6238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6239 sctp_packet_log(m);
6240 }
6241 #endif
6242 #if defined(__FreeBSD__)
6243 #if __FreeBSD_version > 1000049
6244 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6245 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6246 m->m_pkthdr.len,
6247 if_name(m->m_pkthdr.rcvif),
6248 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6249 #elif __FreeBSD_version >= 800000
6250 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6251 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6252 m->m_pkthdr.len,
6253 if_name(m->m_pkthdr.rcvif),
6254 m->m_pkthdr.csum_flags);
6255 #else
6256 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6257 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6258 m->m_pkthdr.len,
6259 m->m_pkthdr.rcvif->if_xname,
6260 m->m_pkthdr.csum_flags);
6261 #endif
6262 #endif
6263 #if defined(__APPLE__)
6264 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6265 "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
6266 m->m_pkthdr.len,
6267 m->m_pkthdr.rcvif->if_name,
6268 m->m_pkthdr.rcvif->if_unit,
6269 m->m_pkthdr.csum_flags);
6270 #endif
6271 #if defined(__Windows__)
6272 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6273 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6274 m->m_pkthdr.len,
6275 m->m_pkthdr.rcvif->if_xname,
6276 m->m_pkthdr.csum_flags);
6277 #endif
6278 #if defined(__FreeBSD__)
6279 mflowid = m->m_pkthdr.flowid;
6280 mflowtype = M_HASHTYPE_GET(m);
6281 fibnum = M_GETFIB(m);
6282 #endif
6283 SCTP_STAT_INCR(sctps_recvpackets);
6284 SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6285 /* Get IP, SCTP, and first chunk header together in the first mbuf. */
6286 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6287 if (SCTP_BUF_LEN(m) < offset) {
6288 if ((m = m_pullup(m, offset)) == NULL) {
6289 SCTP_STAT_INCR(sctps_hdrops);
6290 return;
6291 }
6292 }
6293 ip = mtod(m, struct ip *);
6294 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6295 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6296 offset -= sizeof(struct sctp_chunkhdr);
6297 memset(&src, 0, sizeof(struct sockaddr_in));
6298 src.sin_family = AF_INET;
6299 #ifdef HAVE_SIN_LEN
6300 src.sin_len = sizeof(struct sockaddr_in);
6301 #endif
6302 src.sin_port = sh->src_port;
6303 src.sin_addr = ip->ip_src;
6304 memset(&dst, 0, sizeof(struct sockaddr_in));
6305 dst.sin_family = AF_INET;
6306 #ifdef HAVE_SIN_LEN
6307 dst.sin_len = sizeof(struct sockaddr_in);
6308 #endif
6309 dst.sin_port = sh->dest_port;
6310 dst.sin_addr = ip->ip_dst;
6311 #if defined(__Windows__)
6312 NTOHS(ip->ip_len);
6313 #endif
6314 #if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
6315 ip->ip_len = ntohs(ip->ip_len);
6316 #endif
6317 #if defined(__FreeBSD__)
6318 #if __FreeBSD_version >= 1000000
6319 length = ntohs(ip->ip_len);
6320 #else
6321 length = ip->ip_len + iphlen;
6322 #endif
6323 #elif defined(__APPLE__)
6324 length = ip->ip_len + iphlen;
6325 #elif defined(__Userspace__)
6326 #if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
6327 length = ip->ip_len;
6328 #else
6329 length = ip->ip_len + iphlen;
6330 #endif
6331 #else
6332 length = ip->ip_len;
6333 #endif
6334 /* Validate mbuf chain length with IP payload length. */
6335 if (SCTP_HEADER_LEN(m) != length) {
6336 SCTPDBG(SCTP_DEBUG_INPUT1,
6337 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6338 SCTP_STAT_INCR(sctps_hdrops);
6339 goto out;
6340 }
6341 /* SCTP does not allow broadcasts or multicasts */
6342 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6343 goto out;
6344 }
6345 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6346 goto out;
6347 }
6348 ecn_bits = ip->ip_tos;
6349 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
6350 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6351 SCTP_STAT_INCR(sctps_recvhwcrc);
6352 compute_crc = 0;
6353 } else {
6354 #else
6355 if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
6356 ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
6357 (SCTP_IS_IT_LOOPBACK(m)))) {
6358 SCTP_STAT_INCR(sctps_recvhwcrc);
6359 compute_crc = 0;
6360 } else {
6361 #endif
6362 SCTP_STAT_INCR(sctps_recvswcrc);
6363 compute_crc = 1;
6364 }
6365 sctp_common_input_processing(&m, iphlen, offset, length,
6366 (struct sockaddr *)&src,
6367 (struct sockaddr *)&dst,
6368 sh, ch,
6369 compute_crc,
6370 ecn_bits,
6371 #if defined(__FreeBSD__)
6372 mflowtype, mflowid, fibnum,
6373 #endif
6374 vrf_id, port);
6375 out:
6376 if (m) {
6377 sctp_m_freem(m);
6378 }
6379 return;
6380 }
6381
6382 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6383 extern int *sctp_cpuarry;
6384 #endif
6385
6386 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6387 int
6388 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
6389 {
6390 struct mbuf *m;
6391 int off;
6392
6393 m = *mp;
6394 off = *offp;
6395 #else
6396 void
6397 sctp_input(struct mbuf *m, int off)
6398 {
6399 #endif
6400 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6401 if (mp_ncpus > 1) {
6402 struct ip *ip;
6403 struct sctphdr *sh;
6404 int offset;
6405 int cpu_to_use;
6406 uint32_t flowid, tag;
6407
6408 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
6409 flowid = m->m_pkthdr.flowid;
6410 } else {
6411 /* No flow id built by lower layers
6412 * fix it so we create one.
6413 */
6414 offset = off + sizeof(struct sctphdr);
6415 if (SCTP_BUF_LEN(m) < offset) {
6416 if ((m = m_pullup(m, offset)) == NULL) {
6417 SCTP_STAT_INCR(sctps_hdrops);
6418 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6419 return (IPPROTO_DONE);
6420 #else
6421 return;
6422 #endif
6423 }
6424 }
6425 ip = mtod(m, struct ip *);
6426 sh = (struct sctphdr *)((caddr_t)ip + off);
6427 tag = htonl(sh->v_tag);
6428 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6429 m->m_pkthdr.flowid = flowid;
6430 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
6431 }
6432 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6433 sctp_queue_to_mcore(m, off, cpu_to_use);
6434 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6435 return (IPPROTO_DONE);
6436 #else
6437 return;
6438 #endif
6439 }
6440 #endif
6441 sctp_input_with_port(m, off, 0);
6442 #if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6443 return (IPPROTO_DONE);
6444 #endif
6445 }
6446 #endif
6447 #endif
6448